aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorThomas Schwinge <tschwinge@baylibre.com>2024-03-19 16:45:27 +0100
committerThomas Schwinge <tschwinge@baylibre.com>2024-03-19 16:45:27 +0100
commitd374f52a692c1a717328280b254b4ed860195b89 (patch)
tree7a2d89b25c9e1e83d402e4cce5fd5aa9c0831710 /gcc
parentf4530cab2cb400dcb68ec2c016717b9f62eed0c8 (diff)
parente7d6c277fa28c0b9b621d23c471e0388d2912644 (diff)
downloadgcc-d374f52a692c1a717328280b254b4ed860195b89.zip
gcc-d374f52a692c1a717328280b254b4ed860195b89.tar.gz
gcc-d374f52a692c1a717328280b254b4ed860195b89.tar.bz2
Merge commit '8fc4e6c397e1ce64bec6f9fed148950821cc79e7^' into HEAD
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog2767
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/Makefile.in15
-rw-r--r--gcc/ada/ChangeLog281
-rw-r--r--gcc/ada/adaint.c17
-rw-r--r--gcc/ada/checks.adb68
-rw-r--r--gcc/ada/checks.ads13
-rw-r--r--gcc/ada/contracts.adb163
-rw-r--r--gcc/ada/doc/gnat_rm/the_implementation_of_standard_i_o.rst5
-rw-r--r--gcc/ada/einfo-utils.adb5
-rw-r--r--gcc/ada/einfo-utils.ads2
-rw-r--r--gcc/ada/exp_aggr.adb12
-rw-r--r--gcc/ada/exp_attr.adb77
-rw-r--r--gcc/ada/exp_ch4.adb41
-rw-r--r--gcc/ada/exp_ch4.ads2
-rw-r--r--gcc/ada/exp_ch5.adb4
-rw-r--r--gcc/ada/exp_ch6.adb148
-rw-r--r--gcc/ada/exp_ch7.adb149
-rw-r--r--gcc/ada/exp_ch7.ads12
-rw-r--r--gcc/ada/exp_put_image.adb2
-rw-r--r--gcc/ada/exp_unst.adb4
-rw-r--r--gcc/ada/exp_util.adb102
-rw-r--r--gcc/ada/exp_util.ads13
-rw-r--r--gcc/ada/expect.c1
-rw-r--r--gcc/ada/freeze.adb245
-rw-r--r--gcc/ada/gcc-interface/decl.cc1
-rw-r--r--gcc/ada/gcc-interface/gigi.h2
-rw-r--r--gcc/ada/gcc-interface/misc.cc7
-rw-r--r--gcc/ada/gcc-interface/trans.cc18
-rw-r--r--gcc/ada/gcc-interface/utils.cc81
-rw-r--r--gcc/ada/gcc-interface/utils2.cc5
-rw-r--r--gcc/ada/gnat_rm.texi7
-rw-r--r--gcc/ada/inline.adb2
-rw-r--r--gcc/ada/libgnarl/a-rttiev.ads6
-rw-r--r--gcc/ada/libgnat/i-cstrin.adb24
-rw-r--r--gcc/ada/libgnat/s-imgboo.adb2
-rw-r--r--gcc/ada/libgnat/s-imgboo.ads5
-rw-r--r--gcc/ada/libgnat/s-valboo.ads34
-rw-r--r--gcc/ada/libgnat/s-valspe.ads36
-rw-r--r--gcc/ada/sem_aggr.adb274
-rw-r--r--gcc/ada/sem_attr.adb20
-rw-r--r--gcc/ada/sem_ch12.adb54
-rw-r--r--gcc/ada/sem_ch13.adb4
-rw-r--r--gcc/ada/sem_ch3.adb61
-rw-r--r--gcc/ada/sem_ch4.adb18
-rw-r--r--gcc/ada/sem_ch5.adb8
-rw-r--r--gcc/ada/sem_ch6.adb8
-rw-r--r--gcc/ada/sem_ch8.adb340
-rw-r--r--gcc/ada/sem_disp.adb1
-rw-r--r--gcc/ada/sem_prag.adb102
-rw-r--r--gcc/ada/sem_res.adb97
-rw-r--r--gcc/ada/sem_scil.adb1
-rw-r--r--gcc/ada/sem_util.adb166
-rw-r--r--gcc/ada/sem_util.ads21
-rw-r--r--gcc/ada/terminals.c8
-rw-r--r--gcc/analyzer/ChangeLog40
-rw-r--r--gcc/analyzer/analyzer.h1
-rw-r--r--gcc/analyzer/bounds-checking.cc130
-rw-r--r--gcc/analyzer/call-details.cc8
-rw-r--r--gcc/analyzer/diagnostic-manager.cc53
-rw-r--r--gcc/analyzer/diagnostic-manager.h2
-rw-r--r--gcc/analyzer/engine.cc15
-rw-r--r--gcc/analyzer/infinite-loop.cc9
-rw-r--r--gcc/analyzer/infinite-recursion.cc9
-rw-r--r--gcc/analyzer/kf-analyzer.cc4
-rw-r--r--gcc/analyzer/kf.cc32
-rw-r--r--gcc/analyzer/pending-diagnostic.cc45
-rw-r--r--gcc/analyzer/pending-diagnostic.h56
-rw-r--r--gcc/analyzer/region-model.cc123
-rw-r--r--gcc/analyzer/region.cc1
-rw-r--r--gcc/analyzer/sm-fd.cc75
-rw-r--r--gcc/analyzer/sm-file.cc23
-rw-r--r--gcc/analyzer/sm-malloc.cc118
-rw-r--r--gcc/analyzer/sm-pattern-test.cc8
-rw-r--r--gcc/analyzer/sm-sensitive.cc12
-rw-r--r--gcc/analyzer/sm-signal.cc11
-rw-r--r--gcc/analyzer/sm-taint.cc212
-rw-r--r--gcc/analyzer/store.cc1
-rw-r--r--gcc/analyzer/varargs.cc39
-rw-r--r--gcc/asan.cc2
-rw-r--r--gcc/attribs.cc307
-rw-r--r--gcc/attribs.h12
-rw-r--r--gcc/btfout.cc42
-rw-r--r--gcc/builtins.cc421
-rw-r--r--gcc/builtins.def4
-rw-r--r--gcc/c-family/ChangeLog77
-rw-r--r--gcc/c-family/c-attribs.cc199
-rw-r--r--gcc/c-family/c-common.cc30
-rw-r--r--gcc/c-family/c-common.h22
-rw-r--r--gcc/c-family/c-cppbuiltin.cc3
-rw-r--r--gcc/c-family/c-opts.cc5
-rw-r--r--gcc/c-family/c.opt8
-rw-r--r--gcc/c/ChangeLog109
-rw-r--r--gcc/c/c-convert.cc14
-rw-r--r--gcc/c/c-decl.cc93
-rw-r--r--gcc/c/c-objc-common.h14
-rw-r--r--gcc/c/c-tree.h2
-rw-r--r--gcc/c/c-typeck.cc197
-rw-r--r--gcc/c/gimple-parser.cc8
-rw-r--r--gcc/calls.cc64
-rw-r--r--gcc/cfgexpand.cc37
-rw-r--r--gcc/cfgloopmanip.h1
-rw-r--r--gcc/cfgrtl.cc2
-rw-r--r--gcc/cgraph.h2
-rw-r--r--gcc/cgraphunit.cc5
-rw-r--r--gcc/common.opt63
-rw-r--r--gcc/common/config/i386/cpuinfo.h4
-rw-r--r--gcc/common/config/riscv/riscv-common.cc89
-rw-r--r--gcc/config.gcc8
-rw-r--r--gcc/config/aarch64/aarch64-builtins.cc218
-rw-r--r--gcc/config/aarch64/aarch64-c.cc59
-rw-r--r--gcc/config/aarch64/aarch64-cores.def1
-rw-r--r--gcc/config/aarch64/aarch64-cost-tables.h107
-rw-r--r--gcc/config/aarch64/aarch64-isa-modes.def40
-rw-r--r--gcc/config/aarch64/aarch64-modes.def24
-rw-r--r--gcc/config/aarch64/aarch64-option-extensions.def8
-rw-r--r--gcc/config/aarch64/aarch64-passes.def1
-rw-r--r--gcc/config/aarch64/aarch64-protos.h31
-rw-r--r--gcc/config/aarch64/aarch64-simd.md48
-rw-r--r--gcc/config/aarch64/aarch64-sme.md1984
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-base.cc192
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-base.def206
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-functions.h258
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-shapes.cc1312
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-shapes.h51
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-sme.cc579
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-sme.def198
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-sme.h83
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-sve2.cc115
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-sve2.def147
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-sve2.h21
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins.cc1551
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins.def62
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins.h278
-rw-r--r--gcc/config/aarch64/aarch64-sve.md252
-rw-r--r--gcc/config/aarch64/aarch64-sve2.md717
-rw-r--r--gcc/config/aarch64/aarch64-sys-regs.def1064
-rw-r--r--gcc/config/aarch64/aarch64-tune.md2
-rw-r--r--gcc/config/aarch64/aarch64-tuning-flags.def2
-rw-r--r--gcc/config/aarch64/aarch64.cc4085
-rw-r--r--gcc/config/aarch64/aarch64.h247
-rw-r--r--gcc/config/aarch64/aarch64.md216
-rw-r--r--gcc/config/aarch64/arm_acle.h30
-rw-r--r--gcc/config/aarch64/arm_sme.h45
-rw-r--r--gcc/config/aarch64/atomics.md2
-rw-r--r--gcc/config/aarch64/constraints.md51
-rw-r--r--gcc/config/aarch64/iterators.md489
-rw-r--r--gcc/config/aarch64/predicates.md39
-rw-r--r--gcc/config/aarch64/t-aarch6427
-rw-r--r--gcc/config/aarch64/tuning_models/ampere1.h2
-rw-r--r--gcc/config/aarch64/tuning_models/ampere1a.h4
-rw-r--r--gcc/config/aarch64/tuning_models/ampere1b.h115
-rw-r--r--gcc/config/alpha/alpha.cc7
-rw-r--r--gcc/config/arc/arc.cc74
-rw-r--r--gcc/config/arc/arc.md353
-rw-r--r--gcc/config/arm/aarch-common-protos.h2
-rw-r--r--gcc/config/arm/aarch-common.cc3
-rw-r--r--gcc/config/arm/arm.cc20
-rw-r--r--gcc/config/avr/avr.cc8
-rw-r--r--gcc/config/bfin/bfin.cc7
-rw-r--r--gcc/config/bpf/bpf.cc80
-rw-r--r--gcc/config/bpf/bpf.h7
-rw-r--r--gcc/config/bpf/core-builtins.cc5
-rw-r--r--gcc/config/cris/cris.cc6
-rw-r--r--gcc/config/csky/csky.cc7
-rw-r--r--gcc/config/epiphany/epiphany.cc7
-rw-r--r--gcc/config/frv/frv.h2
-rw-r--r--gcc/config/gcn/driver-gcn.cc32
-rw-r--r--gcc/config/gcn/gcn-builtins.def2
-rw-r--r--gcc/config/gcn/gcn-hsa.h8
-rw-r--r--gcc/config/gcn/gcn.cc24
-rw-r--r--gcc/config/gcn/gcn.opt4
-rw-r--r--gcc/config/gcn/t-gcn-hsa4
-rw-r--r--gcc/config/h8300/h8300.cc7
-rw-r--r--gcc/config/i386/gnu.h6
-rw-r--r--gcc/config/i386/gnu64.h6
-rw-r--r--gcc/config/i386/i386-features.cc9
-rw-r--r--gcc/config/i386/i386-options.cc10
-rw-r--r--gcc/config/i386/i386-options.h2
-rw-r--r--gcc/config/i386/i386-passes.def2
-rw-r--r--gcc/config/i386/i386.cc73
-rw-r--r--gcc/config/i386/i386.md17
-rw-r--r--gcc/config/i386/sse.md233
-rw-r--r--gcc/config/i386/t-gnu6438
-rw-r--r--gcc/config/ia64/ia64.cc15
-rw-r--r--gcc/config/linux.h4
-rw-r--r--gcc/config/loongarch/lasx.md283
-rw-r--r--gcc/config/loongarch/loongarch-builtins.cc52
-rw-r--r--gcc/config/loongarch/loongarch.cc41
-rw-r--r--gcc/config/loongarch/loongarch.h9
-rw-r--r--gcc/config/loongarch/loongarch.md39
-rw-r--r--gcc/config/loongarch/lsx.md293
-rw-r--r--gcc/config/loongarch/simd.md286
-rw-r--r--gcc/config/m32c/m32c.cc7
-rw-r--r--gcc/config/m32r/m32r.cc7
-rw-r--r--gcc/config/m68k/m68k.cc7
-rw-r--r--gcc/config/mcore/mcore.cc7
-rw-r--r--gcc/config/microblaze/microblaze.cc7
-rw-r--r--gcc/config/microblaze/microblaze.md4
-rw-r--r--gcc/config/mips/mips.cc7
-rw-r--r--gcc/config/mn10300/mn10300.cc3
-rw-r--r--gcc/config/msp430/msp430.cc8
-rw-r--r--gcc/config/nds32/nds32.cc13
-rw-r--r--gcc/config/nvptx/nvptx.cc11
-rw-r--r--gcc/config/pdp11/pdp11.cc6
-rw-r--r--gcc/config/riscv/autovec.md40
-rw-r--r--gcc/config/riscv/constraints.md23
-rw-r--r--gcc/config/riscv/riscv-cores.def1
-rw-r--r--gcc/config/riscv/riscv-opts.h18
-rw-r--r--gcc/config/riscv/riscv-protos.h1
-rw-r--r--gcc/config/riscv/riscv-string.cc106
-rw-r--r--gcc/config/riscv/riscv-subset.h1
-rw-r--r--gcc/config/riscv/riscv-v.cc50
-rw-r--r--gcc/config/riscv/riscv-vsetvl.cc134
-rw-r--r--gcc/config/riscv/riscv.cc82
-rw-r--r--gcc/config/riscv/riscv.md66
-rw-r--r--gcc/config/riscv/riscv.opt20
-rw-r--r--gcc/config/riscv/sfb.md37
-rw-r--r--gcc/config/riscv/vector.md461
-rw-r--r--gcc/config/rl78/rl78.cc7
-rw-r--r--gcc/config/rs6000/rs6000.cc19
-rw-r--r--gcc/config/rs6000/rs6000.md20
-rw-r--r--gcc/config/rx/rx.cc7
-rw-r--r--gcc/config/s390/s390.cc19
-rw-r--r--gcc/config/sh/sh.cc7
-rw-r--r--gcc/config/sparc/sparc.cc7
-rw-r--r--gcc/config/stormy16/stormy16.cc7
-rw-r--r--gcc/config/v850/v850.cc7
-rw-r--r--gcc/config/vax/vax.cc4
-rw-r--r--gcc/config/visium/visium.cc12
-rw-r--r--gcc/cp/ChangeLog267
-rw-r--r--gcc/cp/call.cc4
-rw-r--r--gcc/cp/class.cc2
-rw-r--r--gcc/cp/constexpr.cc15
-rw-r--r--gcc/cp/constraint.cc10
-rw-r--r--gcc/cp/contracts.cc6
-rw-r--r--gcc/cp/cp-gimplify.cc348
-rw-r--r--gcc/cp/cp-objcp-common.cc2
-rw-r--r--gcc/cp/cp-objcp-common.h15
-rw-r--r--gcc/cp/cp-tree.def3
-rw-r--r--gcc/cp/cp-tree.h65
-rw-r--r--gcc/cp/decl.cc82
-rw-r--r--gcc/cp/decl2.cc16
-rw-r--r--gcc/cp/error.cc2
-rw-r--r--gcc/cp/lambda.cc38
-rw-r--r--gcc/cp/mangle.cc379
-rw-r--r--gcc/cp/module.cc4
-rw-r--r--gcc/cp/name-lookup.cc373
-rw-r--r--gcc/cp/parser.cc114
-rw-r--r--gcc/cp/pt.cc109
-rw-r--r--gcc/cp/ptree.cc2
-rw-r--r--gcc/cp/search.cc17
-rw-r--r--gcc/cp/semantics.cc82
-rw-r--r--gcc/cp/tree.cc19
-rw-r--r--gcc/cp/typeck.cc39
-rw-r--r--gcc/d/ChangeLog23
-rw-r--r--gcc/d/d-attribs.cc35
-rw-r--r--gcc/d/d-lang.cc8
-rw-r--r--gcc/d/d-tree.h4
-rw-r--r--gcc/df-scan.cc10
-rw-r--r--gcc/diagnostic-core.h6
-rw-r--r--gcc/diagnostic-format-sarif.cc22
-rw-r--r--gcc/diagnostic-metadata.h10
-rw-r--r--gcc/diagnostic-show-locus.cc26
-rw-r--r--gcc/diagnostic.cc47
-rw-r--r--gcc/diagnostic.h2
-rw-r--r--gcc/doc/extend.texi2102
-rw-r--r--gcc/doc/install.texi5
-rw-r--r--gcc/doc/invoke.texi202
-rw-r--r--gcc/doc/sourcebuild.texi44
-rw-r--r--gcc/doc/tm.texi149
-rw-r--r--gcc/doc/tm.texi.in37
-rw-r--r--gcc/except.cc20
-rw-r--r--gcc/expr.cc396
-rw-r--r--gcc/expr.h9
-rw-r--r--gcc/flag-types.h11
-rw-r--r--gcc/fold-const.cc112
-rw-r--r--gcc/fold-mem-offsets.cc28
-rw-r--r--gcc/fortran/ChangeLog58
-rw-r--r--gcc/fortran/f95-lang.cc14
-rw-r--r--gcc/fortran/gfortran.h3
-rw-r--r--gcc/fortran/primary.cc16
-rw-r--r--gcc/fortran/trans-array.cc9
-rw-r--r--gcc/fortran/trans-expr.cc70
-rw-r--r--gcc/fortran/trans-types.cc13
-rw-r--r--gcc/function.cc65
-rw-r--r--gcc/function.h11
-rw-r--r--gcc/gcc.cc1
-rw-r--r--gcc/gengtype-lex.l3
-rw-r--r--gcc/genhooks.cc7
-rw-r--r--gcc/gimple-lower-bitint.cc107
-rw-r--r--gcc/gimple-match-exports.cc25
-rw-r--r--gcc/gimple-predicate-analysis.cc78
-rw-r--r--gcc/gimple-range-fold.h12
-rw-r--r--gcc/gimple-range.cc34
-rw-r--r--gcc/gimple-range.h1
-rw-r--r--gcc/gimplify.cc8
-rw-r--r--gcc/go/gofrontend/MERGE2
-rw-r--r--gcc/hooks.cc5
-rw-r--r--gcc/hooks.h1
-rw-r--r--gcc/internal-fn.cc8
-rw-r--r--gcc/ipa-cp.cc3
-rw-r--r--gcc/ipa-icf.cc4
-rw-r--r--gcc/ipa-inline.cc6
-rw-r--r--gcc/ipa-split.cc7
-rw-r--r--gcc/ipa-strub.cc3573
-rw-r--r--gcc/ipa-strub.h45
-rw-r--r--gcc/ira.cc2
-rw-r--r--gcc/jit/ChangeLog19
-rw-r--r--gcc/jit/dummy-frontend.cc32
-rw-r--r--gcc/langhooks-def.h8
-rw-r--r--gcc/langhooks.h4
-rw-r--r--gcc/lra-assigns.cc1
-rw-r--r--gcc/lra-coalesce.cc4
-rw-r--r--gcc/lra-constraints.cc13
-rw-r--r--gcc/lra-int.h19
-rw-r--r--gcc/lra-lives.cc4
-rw-r--r--gcc/lra-remat.cc2
-rw-r--r--gcc/lra.cc31
-rw-r--r--gcc/lra.h2
-rw-r--r--gcc/lto/ChangeLog19
-rw-r--r--gcc/lto/lto-lang.cc30
-rw-r--r--gcc/m2/ChangeLog20
-rw-r--r--gcc/m2/gm2-compiler/M2Quads.mod4
-rw-r--r--gcc/m2/lang.opt4
-rw-r--r--gcc/m2/mc/mc.flex6
-rw-r--r--gcc/m2/tools-src/makeSystem6
-rw-r--r--gcc/match.pd29
-rw-r--r--gcc/objc/ChangeLog10
-rw-r--r--gcc/objcp/ChangeLog6
-rw-r--r--gcc/passes.cc5
-rw-r--r--gcc/passes.def5
-rw-r--r--gcc/plugin.h3
-rw-r--r--gcc/range-op-mixed.h54
-rw-r--r--gcc/range-op.cc51
-rw-r--r--gcc/range-op.h5
-rw-r--r--gcc/recog.cc20
-rw-r--r--gcc/target-def.h14
-rw-r--r--gcc/target.def129
-rw-r--r--gcc/targhooks.cc10
-rw-r--r--gcc/targhooks.h5
-rw-r--r--gcc/testsuite/ChangeLog3452
-rw-r--r--gcc/testsuite/c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early-O2.c2
-rw-r--r--gcc/testsuite/c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early.c2
-rw-r--r--gcc/testsuite/c-c++-common/array-lit.c3
-rw-r--r--gcc/testsuite/c-c++-common/fhardened-1.c2
-rw-r--r--gcc/testsuite/c-c++-common/fhardened-2.c2
-rw-r--r--gcc/testsuite/c-c++-common/pr77624-1.c4
-rw-r--r--gcc/testsuite/c-c++-common/spellcheck-reserved.c4
-rw-r--r--gcc/testsuite/c-c++-common/strub-O0.c14
-rw-r--r--gcc/testsuite/c-c++-common/strub-O1.c15
-rw-r--r--gcc/testsuite/c-c++-common/strub-O2.c16
-rw-r--r--gcc/testsuite/c-c++-common/strub-O2fni.c15
-rw-r--r--gcc/testsuite/c-c++-common/strub-O3.c12
-rw-r--r--gcc/testsuite/c-c++-common/strub-O3fni.c15
-rw-r--r--gcc/testsuite/c-c++-common/strub-Og.c16
-rw-r--r--gcc/testsuite/c-c++-common/strub-Os.c18
-rw-r--r--gcc/testsuite/c-c++-common/strub-all1.c32
-rw-r--r--gcc/testsuite/c-c++-common/strub-all2.c24
-rw-r--r--gcc/testsuite/c-c++-common/strub-apply1.c15
-rw-r--r--gcc/testsuite/c-c++-common/strub-apply2.c12
-rw-r--r--gcc/testsuite/c-c++-common/strub-apply3.c8
-rw-r--r--gcc/testsuite/c-c++-common/strub-apply4.c21
-rw-r--r--gcc/testsuite/c-c++-common/strub-at-calls1.c30
-rw-r--r--gcc/testsuite/c-c++-common/strub-at-calls2.c23
-rw-r--r--gcc/testsuite/c-c++-common/strub-defer-O1.c7
-rw-r--r--gcc/testsuite/c-c++-common/strub-defer-O2.c8
-rw-r--r--gcc/testsuite/c-c++-common/strub-defer-O3.c110
-rw-r--r--gcc/testsuite/c-c++-common/strub-defer-Os.c7
-rw-r--r--gcc/testsuite/c-c++-common/strub-internal1.c31
-rw-r--r--gcc/testsuite/c-c++-common/strub-internal2.c21
-rw-r--r--gcc/testsuite/c-c++-common/strub-parms1.c48
-rw-r--r--gcc/testsuite/c-c++-common/strub-parms2.c36
-rw-r--r--gcc/testsuite/c-c++-common/strub-parms3.c58
-rw-r--r--gcc/testsuite/c-c++-common/strub-relaxed1.c18
-rw-r--r--gcc/testsuite/c-c++-common/strub-relaxed2.c14
-rw-r--r--gcc/testsuite/c-c++-common/strub-short-O0-exc.c10
-rw-r--r--gcc/testsuite/c-c++-common/strub-short-O0.c10
-rw-r--r--gcc/testsuite/c-c++-common/strub-short-O1.c10
-rw-r--r--gcc/testsuite/c-c++-common/strub-short-O2.c10
-rw-r--r--gcc/testsuite/c-c++-common/strub-short-O3.c12
-rw-r--r--gcc/testsuite/c-c++-common/strub-short-Os.c12
-rw-r--r--gcc/testsuite/c-c++-common/strub-strict1.c36
-rw-r--r--gcc/testsuite/c-c++-common/strub-strict2.c25
-rw-r--r--gcc/testsuite/c-c++-common/strub-tail-O1.c8
-rw-r--r--gcc/testsuite/c-c++-common/strub-tail-O2.c14
-rw-r--r--gcc/testsuite/c-c++-common/strub-var1.c24
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c3
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-except.c17
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c3
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c3
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c3
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c3
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c3
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c3
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c23
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-callable1.c9
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-callable2.c264
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-const1.c23
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-const2.c25
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-const3.c16
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-const4.c20
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-data1.c13
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-data2.c14
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-data3.c14
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-data4.c14
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-data5.c15
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-indcall1.c14
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-indcall2.c14
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-indcall3.c14
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-inlinable1.c16
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-inlinable2.c7
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-ptrfn1.c10
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-ptrfn2.c55
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-ptrfn3.c50
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-ptrfn4.c43
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-pure1.c18
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-pure2.c22
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-pure3.c13
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-pure4.c17
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-run1.c95
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-run2.c84
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-run3.c80
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-run4.c106
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-run4c.c5
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-run4d.c7
-rw-r--r--gcc/testsuite/c-c++-common/torture/strub-run4i.c5
-rw-r--r--gcc/testsuite/g++.dg/DRs/dr2262.C16
-rw-r--r--gcc/testsuite/g++.dg/abi/mangle-concepts1.C88
-rw-r--r--gcc/testsuite/g++.dg/abi/mangle-ttp1.C27
-rw-r--r--gcc/testsuite/g++.dg/abi/mangle10.C2
-rw-r--r--gcc/testsuite/g++.dg/abi/mangle52.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-array-ptr6.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-noreturn1.C12
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-ref12.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-ref13.C41
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-ref2.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/gen-attrs-76.C8
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/noexcept34.C8
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/lambda-generic-const10.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/array-prvalue1.C7
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/constexpr-ref1.C26
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/constexpr-ref2.C23
-rw-r--r--gcc/testsuite/g++.dg/cpp23/consteval-if10.C7
-rw-r--r--gcc/testsuite/g++.dg/cpp23/consteval-if2.C14
-rw-r--r--gcc/testsuite/g++.dg/cpp23/feat-cxx2b.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp26/feat-cxx26.C14
-rw-r--r--gcc/testsuite/g++.dg/cpp26/name-independent-decl1.C194
-rw-r--r--gcc/testsuite/g++.dg/cpp26/name-independent-decl2.C171
-rw-r--r--gcc/testsuite/g++.dg/cpp26/name-independent-decl3.C12
-rw-r--r--gcc/testsuite/g++.dg/cpp26/name-independent-decl4.C12
-rw-r--r--gcc/testsuite/g++.dg/cpp26/name-independent-decl5.C92
-rw-r--r--gcc/testsuite/g++.dg/cpp26/name-independent-decl6.C135
-rw-r--r--gcc/testsuite/g++.dg/cpp26/static_assert1.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/class-deduction-alias3.C5
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/class-deduction-alias8.C5
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-memfn1.C3
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop1.C169
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop10.C41
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop11.C49
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop12.C30
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop13.C23
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop14.C78
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop15.C107
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop16.C73
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop17.C17
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop18.C20
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop19.C7
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop2.C90
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop20.C21
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop3.C27
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop4.C30
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop5.C27
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop6.C59
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop7.C76
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop8.C82
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval-prop9.C67
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval11.C18
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval3.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval34.C8
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval36.C26
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/consteval9.C2
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/constexpr-ref1.C54
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/feat-cxx2a.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/nontype-class4.C3
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/nontype-class4a.C18
-rw-r--r--gcc/testsuite/g++.dg/cpp2a/spaceship-synth9.C2
-rw-r--r--gcc/testsuite/g++.dg/ext/complit17.C4
-rw-r--r--gcc/testsuite/g++.dg/ext/has-feature.C6
-rw-r--r--gcc/testsuite/g++.dg/ext/unroll-2.C3
-rw-r--r--gcc/testsuite/g++.dg/ext/unroll-3.C3
-rw-r--r--gcc/testsuite/g++.dg/ext/unroll-5.C36
-rw-r--r--gcc/testsuite/g++.dg/ext/unroll-6.C85
-rw-r--r--gcc/testsuite/g++.dg/ext/unroll-7.C45
-rw-r--r--gcc/testsuite/g++.dg/ext/unroll-8.C86
-rw-r--r--gcc/testsuite/g++.dg/gomp/attrs-11.C4
-rw-r--r--gcc/testsuite/g++.dg/lookup/scoped11.C14
-rw-r--r--gcc/testsuite/g++.dg/lookup/scoped12.C14
-rw-r--r--gcc/testsuite/g++.dg/lookup/scoped13.C14
-rw-r--r--gcc/testsuite/g++.dg/lookup/scoped14.C14
-rw-r--r--gcc/testsuite/g++.dg/lookup/scoped15.C21
-rw-r--r--gcc/testsuite/g++.dg/opt/devirt2.C4
-rw-r--r--gcc/testsuite/g++.dg/opt/pr111601.C86
-rw-r--r--gcc/testsuite/g++.dg/strub-run1.C19
-rw-r--r--gcc/testsuite/g++.dg/template/partial-order4.C17
-rw-r--r--gcc/testsuite/g++.dg/template/spec26.C10
-rw-r--r--gcc/testsuite/g++.dg/torture/strub-init1.C13
-rw-r--r--gcc/testsuite/g++.dg/torture/strub-init2.C14
-rw-r--r--gcc/testsuite/g++.dg/torture/strub-init3.C13
-rw-r--r--gcc/testsuite/g++.dg/torture/uninit-pr112766.C17
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr112711.C31
-rw-r--r--gcc/testsuite/g++.dg/warn/Wparentheses-33.C25
-rw-r--r--gcc/testsuite/g++.dg/warn/Wuse-after-free3.C4
-rw-r--r--gcc/testsuite/g++.target/aarch64/sme/aarch64-sme-acle-asm.exp82
-rw-r--r--gcc/testsuite/g++.target/aarch64/sme/aarch64-sme.exp46
-rw-r--r--gcc/testsuite/g++.target/aarch64/sme/exceptions_1.C189
-rw-r--r--gcc/testsuite/g++.target/aarch64/sme/exceptions_2.C148
-rw-r--r--gcc/testsuite/g++.target/aarch64/sme/keyword_macros_1.C10
-rw-r--r--gcc/testsuite/g++.target/aarch64/sme/streaming_mode_1.C142
-rw-r--r--gcc/testsuite/g++.target/aarch64/sme/streaming_mode_2.C25
-rw-r--r--gcc/testsuite/g++.target/aarch64/sme2/aarch64-sme2-acle-asm.exp82
-rw-r--r--gcc/testsuite/g++.target/aarch64/sve/aarch64-ssve.exp308
-rw-r--r--gcc/testsuite/g++.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp1
-rw-r--r--gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_4.c3
-rw-r--r--gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_5.c1
-rw-r--r--gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_7.c1
-rw-r--r--gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_1.C2
-rw-r--r--gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_2.C2
-rw-r--r--gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/svcount_1.C10
-rw-r--r--gcc/testsuite/g++.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp1
-rw-r--r--gcc/testsuite/g++.target/riscv/rvv/autovec/bug-1.C (renamed from gcc/testsuite/g++.target/riscv/rvv/autovec/bug-01.C)2
-rw-r--r--gcc/testsuite/g++.target/riscv/rvv/autovec/bug-2.C26
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/comp-goto-1.c2
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr65369.c2
-rw-r--r--gcc/testsuite/gcc.dg/20030906-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/20030906-1a.c21
-rw-r--r--gcc/testsuite/gcc.dg/20030906-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/20030906-2a.c21
-rw-r--r--gcc/testsuite/gcc.dg/Wimplicit-function-declaration-c99-2.c7
-rw-r--r--gcc/testsuite/gcc.dg/Wimplicit-function-declaration-c99.c2
-rw-r--r--gcc/testsuite/gcc.dg/Wimplicit-int-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/Wimplicit-int-1a.c11
-rw-r--r--gcc/testsuite/gcc.dg/Wimplicit-int-4.c2
-rw-r--r--gcc/testsuite/gcc.dg/Wimplicit-int-4a.c11
-rw-r--r--gcc/testsuite/gcc.dg/Wincompatible-pointer-types-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/Wincompatible-pointer-types-5.c10
-rw-r--r--gcc/testsuite/gcc.dg/Wint-conversion-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/Wint-conversion-3.c2
-rw-r--r--gcc/testsuite/gcc.dg/Wint-conversion-4.c14
-rw-r--r--gcc/testsuite/gcc.dg/Wnonnull-4.c1
-rw-r--r--gcc/testsuite/gcc.dg/Wreturn-mismatch-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/Wreturn-mismatch-1a.c40
-rw-r--r--gcc/testsuite/gcc.dg/Wreturn-mismatch-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/Wreturn-mismatch-2a.c41
-rw-r--r--gcc/testsuite/gcc.dg/analyzer/fd-accept.c2
-rw-r--r--gcc/testsuite/gcc.dg/analyzer/fd-bind.c4
-rw-r--r--gcc/testsuite/gcc.dg/analyzer/fd-socket-misuse.c2
-rw-r--r--gcc/testsuite/gcc.dg/anon-struct-11.c5
-rw-r--r--gcc/testsuite/gcc.dg/anon-struct-11a.c111
-rw-r--r--gcc/testsuite/gcc.dg/anon-struct-13.c2
-rw-r--r--gcc/testsuite/gcc.dg/anon-struct-13a.c76
-rw-r--r--gcc/testsuite/gcc.dg/assign-warn-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/assign-warn-4.c21
-rw-r--r--gcc/testsuite/gcc.dg/bitint-41.c2
-rw-r--r--gcc/testsuite/gcc.dg/bitint-43.c19
-rw-r--r--gcc/testsuite/gcc.dg/bitint-44.c10
-rw-r--r--gcc/testsuite/gcc.dg/bitint-45.c11
-rw-r--r--gcc/testsuite/gcc.dg/bitint-46.c32
-rw-r--r--gcc/testsuite/gcc.dg/bitint-47.c13
-rw-r--r--gcc/testsuite/gcc.dg/bitint-48.c23
-rw-r--r--gcc/testsuite/gcc.dg/builtin-arith-overflow-4.c2
-rw-r--r--gcc/testsuite/gcc.dg/builtin-arith-overflow-4a.c43
-rw-r--r--gcc/testsuite/gcc.dg/c23-qual-4.c6
-rw-r--r--gcc/testsuite/gcc.dg/cpp/expr.c22
-rw-r--r--gcc/testsuite/gcc.dg/debug/btf/btf-datasec-3.c28
-rw-r--r--gcc/testsuite/gcc.dg/debug/btf/btf-enum-small.c28
-rw-r--r--gcc/testsuite/gcc.dg/debug/btf/btf-function-6.c4
-rw-r--r--gcc/testsuite/gcc.dg/debug/btf/btf-function-7.c19
-rw-r--r--gcc/testsuite/gcc.dg/dfp/composite-type-2.c58
-rw-r--r--gcc/testsuite/gcc.dg/dfp/composite-type.c2
-rw-r--r--gcc/testsuite/gcc.dg/diag-aka-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/diag-aka-1a.c29
-rw-r--r--gcc/testsuite/gcc.dg/diagnostic-range-bad-return-2.c52
-rw-r--r--gcc/testsuite/gcc.dg/diagnostic-range-bad-return.c2
-rw-r--r--gcc/testsuite/gcc.dg/diagnostic-types-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/diagnostic-types-2.c24
-rw-r--r--gcc/testsuite/gcc.dg/enum-compat-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/enum-compat-2.c32
-rw-r--r--gcc/testsuite/gcc.dg/func-ptr-conv-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/func-ptr-conv-2.c56
-rw-r--r--gcc/testsuite/gcc.dg/gnu23-attr-syntax-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/gnu23-attr-syntax-3.c17
-rw-r--r--gcc/testsuite/gcc.dg/gnu23-builtins-no-dfp-1.c12
-rw-r--r--gcc/testsuite/gcc.dg/gomp/pr35738-2.c18
-rw-r--r--gcc/testsuite/gcc.dg/gomp/pr35738.c2
-rw-r--r--gcc/testsuite/gcc.dg/graphite/pr83126.c2
-rw-r--r--gcc/testsuite/gcc.dg/graphite/pr83255.c2
-rw-r--r--gcc/testsuite/gcc.dg/hardbool-err.c31
-rw-r--r--gcc/testsuite/gcc.dg/hardbool-trap.c13
-rw-r--r--gcc/testsuite/gcc.dg/init-bad-7.c2
-rw-r--r--gcc/testsuite/gcc.dg/init-bad-7a.c12
-rw-r--r--gcc/testsuite/gcc.dg/init-excess-3.c4
-rw-r--r--gcc/testsuite/gcc.dg/missing-header-fixit-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/missing-header-fixit-1a.c37
-rw-r--r--gcc/testsuite/gcc.dg/missing-header-fixit-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/missing-header-fixit-2a.c31
-rw-r--r--gcc/testsuite/gcc.dg/missing-header-fixit-4.c2
-rw-r--r--gcc/testsuite/gcc.dg/missing-header-fixit-4a.c27
-rw-r--r--gcc/testsuite/gcc.dg/missing-header-fixit-5.c2
-rw-r--r--gcc/testsuite/gcc.dg/missing-header-fixit-5a.c42
-rw-r--r--gcc/testsuite/gcc.dg/noncompile/incomplete-3.c2
-rw-r--r--gcc/testsuite/gcc.dg/noncompile/pr79758-2.c6
-rw-r--r--gcc/testsuite/gcc.dg/noncompile/pr79758.c1
-rw-r--r--gcc/testsuite/gcc.dg/overflow-warn-1.c4
-rw-r--r--gcc/testsuite/gcc.dg/overflow-warn-3.c4
-rw-r--r--gcc/testsuite/gcc.dg/param-type-mismatch-2.c187
-rw-r--r--gcc/testsuite/gcc.dg/param-type-mismatch.c2
-rw-r--r--gcc/testsuite/gcc.dg/pch/rwsr-pch.c7
-rw-r--r--gcc/testsuite/gcc.dg/pch/rwsr-pch.hs10
-rw-r--r--gcc/testsuite/gcc.dg/permerror-default.c85
-rw-r--r--gcc/testsuite/gcc.dg/permerror-fpermissive-nowarning.c6
-rw-r--r--gcc/testsuite/gcc.dg/permerror-fpermissive.c85
-rw-r--r--gcc/testsuite/gcc.dg/permerror-gnu89-nopermissive.c85
-rw-r--r--gcc/testsuite/gcc.dg/permerror-gnu89-pedantic.c85
-rw-r--r--gcc/testsuite/gcc.dg/permerror-gnu89.c85
-rw-r--r--gcc/testsuite/gcc.dg/permerror-noerror.c85
-rw-r--r--gcc/testsuite/gcc.dg/permerror-nowarning.c5
-rw-r--r--gcc/testsuite/gcc.dg/permerror-pedantic.c85
-rw-r--r--gcc/testsuite/gcc.dg/permerror-system.c45
-rw-r--r--gcc/testsuite/gcc.dg/plugin/analyzer_cpython_plugin.c10
-rw-r--r--gcc/testsuite/gcc.dg/plugin/analyzer_gil_plugin.c28
-rw-r--r--gcc/testsuite/gcc.dg/pointer-array-atomic-2.c60
-rw-r--r--gcc/testsuite/gcc.dg/pointer-array-atomic.c2
-rw-r--r--gcc/testsuite/gcc.dg/pointer-array-quals-1.c6
-rw-r--r--gcc/testsuite/gcc.dg/pr105635-2.c11
-rw-r--r--gcc/testsuite/gcc.dg/pr105635.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr111409.c4
-rw-r--r--gcc/testsuite/gcc.dg/pr111922.c29
-rw-r--r--gcc/testsuite/gcc.dg/pr112719.c18
-rw-r--r--gcc/testsuite/gcc.dg/pr112733.c16
-rw-r--r--gcc/testsuite/gcc.dg/pr112760.c22
-rw-r--r--gcc/testsuite/gcc.dg/pr112837.c11
-rw-r--r--gcc/testsuite/gcc.dg/pr112845.c9
-rw-r--r--gcc/testsuite/gcc.dg/pr23075-2.c14
-rw-r--r--gcc/testsuite/gcc.dg/pr23075.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr29521-a.c15
-rw-r--r--gcc/testsuite/gcc.dg/pr29521.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr61162-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr61162-3.c13
-rw-r--r--gcc/testsuite/gcc.dg/pr61852.c4
-rw-r--r--gcc/testsuite/gcc.dg/pr67730-a.c11
-rw-r--r--gcc/testsuite/gcc.dg/pr67730.c2
-rw-r--r--gcc/testsuite/gcc.dg/spec-barrier-3.c2
-rw-r--r--gcc/testsuite/gcc.dg/spec-barrier-3a.c13
-rw-r--r--gcc/testsuite/gcc.dg/spellcheck-identifiers-1a.c136
-rw-r--r--gcc/testsuite/gcc.dg/spellcheck-identifiers-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/spellcheck-identifiers-2a.c33
-rw-r--r--gcc/testsuite/gcc.dg/spellcheck-identifiers-3.c2
-rw-r--r--gcc/testsuite/gcc.dg/spellcheck-identifiers-3a.c45
-rw-r--r--gcc/testsuite/gcc.dg/spellcheck-identifiers-4.c2
-rw-r--r--gcc/testsuite/gcc.dg/spellcheck-identifiers-4a.c10
-rw-r--r--gcc/testsuite/gcc.dg/spellcheck-identifiers.c2
-rw-r--r--gcc/testsuite/gcc.dg/torture/bitint-43.c2
-rw-r--r--gcc/testsuite/gcc.dg/torture/bitint-44.c2
-rw-r--r--gcc/testsuite/gcc.dg/torture/bitint-45.c32
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool-5a.c6
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool-i-5a.c6
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool-i.c5
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool-ll-5a.c6
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool-ll.c5
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool-s-5a.c6
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool-s.c5
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool-ul-5a.c6
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool-ul.c5
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool-us-5a.c6
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool-us.c5
-rw-r--r--gcc/testsuite/gcc.dg/torture/hardbool.c118
-rw-r--r--gcc/testsuite/gcc.dg/torture/inline-mem-cmp-1.c7
-rw-r--r--gcc/testsuite/gcc.dg/torture/inline-mem-cpy-1.c8
-rw-r--r--gcc/testsuite/gcc.dg/torture/inline-mem-cpy-cmp-1.c11
-rw-r--r--gcc/testsuite/gcc.dg/torture/inline-mem-move-1.c8
-rw-r--r--gcc/testsuite/gcc.dg/torture/inline-mem-set-1.c84
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr109689.c34
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr112827-1.c14
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr112827-2.c18
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr112856.c18
-rw-r--r--gcc/testsuite/gcc.dg/transparent-union-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/transparent-union-1a.c85
-rw-r--r--gcc/testsuite/gcc.dg/tree-prof/time-profiler-3.c2
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr112721.c26
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr112767.c18
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/pr112741.c21
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr111754.c14
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr112818.c34
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-1.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-2.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-3.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-4.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-64x1_1.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/ice_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/acle/memtag_2.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/acle/memtag_2a.c71
-rw-r--r--gcc/testsuite/gcc.target/aarch64/acle/rwsr-1.c28
-rw-r--r--gcc/testsuite/gcc.target/aarch64/acle/rwsr-2.c25
-rw-r--r--gcc/testsuite/gcc.target/aarch64/acle/rwsr-3.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/acle/rwsr.c144
-rw-r--r--gcc/testsuite/gcc.target/aarch64/auto-init-1.c3
-rw-r--r--gcc/testsuite/gcc.target/aarch64/csinc-3.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/eh_return-2.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/eh_return-3.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movdf_2.c51
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movdi_3.c59
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movhf_2.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movhi_2.c61
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movqi_2.c59
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movsf_2.c51
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movsi_2.c59
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movtf_3.c81
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movtf_4.c78
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movti_3.c86
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movti_4.c83
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv16qi_4.c82
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv16qi_5.c79
-rw-r--r--gcc/testsuite/gcc.target/aarch64/movv8qi_4.c55
-rw-r--r--gcc/testsuite/gcc.target/aarch64/pr112406.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/pragma_cpp_predefs_4.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/return_address_sign_1.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/return_address_sign_2.c17
-rw-r--r--gcc/testsuite/gcc.target/aarch64/return_address_sign_b_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/return_address_sign_b_2.c17
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/aarch64-sme-acle-asm.exp81
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/aarch64-sme.exp46
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addha_za32.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addha_za64.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addva_za32.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addva_za64.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_has_sme_sc.c25
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_ns.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_s.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_sc.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s16.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s32.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s64.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s8.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u16.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u32.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u64.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u8.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsb_s.c310
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsb_sc.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsd_s.c277
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsd_sc.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsh_s.c279
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsh_sc.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsw_s.c278
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsw_sc.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za128.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za16.c123
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za32.c123
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za64.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za8.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za128.c83
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za16.c126
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za32.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za64.c105
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za8.c95
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za128.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za16.c123
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za32.c123
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za64.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za8.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za128.c83
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za16.c126
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za32.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za64.c105
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za8.c95
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_vnum_za_s.c147
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_vnum_za_sc.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_za_s.c124
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_za_sc.c71
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mopa_za32.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mopa_za64.c70
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mops_za32.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mops_za64.c70
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za128.c435
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za16.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za32.c196
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za64.c186
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za8.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za128.c435
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za16.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za32.c196
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za64.c186
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za8.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_bf16.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f16.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f32.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f64.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s16.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s32.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s64.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s8.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u16.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u32.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u64.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u8.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za128.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za16.c123
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za32.c123
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za64.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za8.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za128.c83
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za16.c126
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za32.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za64.c105
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za8.c95
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za128.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za16.c123
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za32.c123
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za64.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za8.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za128.c83
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za16.c126
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za32.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za64.c105
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za8.c95
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_vnum_za_s.c147
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_vnum_za_sc.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_za_s.c124
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_za_sc.c71
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumopa_za32.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumopa_za64.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumops_za32.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumops_za64.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/test_sme_acle.h62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/undef_za.c33
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmopa_za32.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmopa_za64.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmops_za32.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmops_za64.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za128.c193
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za16.c133
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za32.c143
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za64.c133
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za8.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za128.c193
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za16.c133
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za32.c143
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za64.c133
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za8.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/zero_mask_za.c130
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/acle-asm/zero_za.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/arm_neon_1.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/arm_neon_2.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/arm_neon_3.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_1.c233
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_10.c37
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_2.c43
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_3.c166
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_4.c43
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_5.c318
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_6.c45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_7.c516
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_8.c87
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_9.c103
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/clamp_1.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/clamp_2.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/clamp_3.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/clamp_4.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_1.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_10.c57
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_11.c57
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_12.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_13.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_14.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_15.c27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_2.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_3.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_4.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_5.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_6.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_7.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_8.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/inlining_9.c55
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/keyword_macros_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_1.c466
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_2.c177
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_3.c273
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_4.c145
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_1.c58
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_2.c44
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_3.c46
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_4.c25
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_5.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_6.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_7.c25
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/sibcall_1.c45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/sibcall_2.c45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/sibcall_3.c45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/sibcall_4.c45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/sibcall_5.c45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/sibcall_6.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/sibcall_7.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/sibcall_8.c19
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_1.c130
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_2.c25
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_3.c63
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_4.c22
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/za_state_1.c154
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/za_state_2.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/za_state_3.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/za_state_4.c585
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/za_state_5.c595
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/za_state_6.c23
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/zt0_state_1.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/zt0_state_2.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/zt0_state_3.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/zt0_state_4.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/zt0_state_5.c260
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme/zt0_state_6.c54
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/aarch64-sme2-acle-asm.exp81
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s16_x2.c115
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s16_x4.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s32_x2.c115
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s32_x4.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s64_x2.c115
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s64_x4.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s8_x2.c115
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s8_x4.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u16_x2.c115
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u16_x4.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u32_x2.c115
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u32_x4.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u64_x2.c115
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u64_x4.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u8_x2.c115
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u8_x4.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_s32_vg1x2.c180
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_s32_vg1x4.c172
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_u32_vg1x2.c180
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_u32_vg1x4.c172
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_s64_vg1x2.c182
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_s64_vg1x4.c174
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_u64_vg1x2.c182
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_u64_vg1x4.c174
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_f32_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_f32_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_s32_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_s32_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_u32_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_u32_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_f64_vg1x2.c126
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_f64_vg1x4.c141
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_s64_vg1x2.c124
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_s64_vg1x4.c139
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_u64_vg1x2.c124
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_u64_vg1x4.c139
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslb_f32.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslb_lane_f32.c84
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslt_f32.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslt_lane_f32.c84
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bmopa_za32.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bmops_za32.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16_x2.c94
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16_x4.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32_x2.c94
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32_x4.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64_x2.c94
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64_x4.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s16_x2.c94
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s16_x4.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s32_x2.c94
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s32_x4.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s64_x2.c94
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s64_x4.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s8_x2.c94
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s8_x4.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u16_x2.c94
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u16_x4.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u32_x2.c94
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u32_x4.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u64_x2.c94
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u64_x4.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u8_x2.c94
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u8_x4.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c16.c39
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c32.c39
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c64.c39
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c8.c39
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_bf16_f32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f16_f32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_s32_x2.c43
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_s32_x4.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_u32_x2.c43
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_u32_x4.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_s32_f32_x2.c43
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_s32_f32_x4.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_u32_f32_x2.c43
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_u32_f32_x4.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvtn_bf16_f32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvtn_f16_f32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_f32.c44
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_f32.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_s32.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_u32.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_bf16_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_bf16_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_f16_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_f16_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s16_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s16_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s8_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s8_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u16_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u16_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u8_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u8_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_s16_vg1x2.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_s16_vg1x4.c110
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_u16_vg1x2.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_u16_vg1x4.c110
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_s32.c44
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_u32.c44
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_bf16_vg1x2.c243
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_bf16_vg1x4.c254
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_f16_vg1x2.c243
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_f16_vg1x4.c254
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s16_vg1x2.c243
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s16_vg1x4.c254
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s8_vg1x2.c243
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s8_vg1x4.c254
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u16_vg1x2.c243
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u16_vg1x4.c254
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u8_vg1x2.c243
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u8_vg1x4.c254
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_s16_vg1x2.c245
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_s16_vg1x4.c256
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_u16_vg1x2.c245
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_u16_vg1x4.c256
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_bf16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_bf16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s8_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s8_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u8_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u8_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_bf16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_bf16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s8_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s8_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u8_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u8_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldr_zt.c36
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s8.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s8_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32_x4.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u8.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u8_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f16_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f16_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f32_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f32_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f64_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f64_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s16_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s16_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s32_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s32_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s64_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s64_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s8_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s8_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u16_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u16_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u32_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u32_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u64_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u64_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u8_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u8_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f16_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f16_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f32_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f32_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f64_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f64_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f16_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f16_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f32_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f32_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f64_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f64_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s16_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s16_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s32_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s32_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s64_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s64_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s8_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s8_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u16_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u16_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u32_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u32_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u64_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u64_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u8_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u8_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f16_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f16_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f32_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f32_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f64_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f64_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x4.c118
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x4.c118
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f32_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f32_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x4.c118
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x1.c150
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x4.c128
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x4.c118
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x1.c150
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x4.c128
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_f64_vg1x2.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_f64_vg1x4.c110
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x1.c152
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x2.c124
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x4.c130
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x1.c152
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x2.c124
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x4.c130
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x2.c247
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x4.c258
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x2.c247
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x4.c258
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f32_vg1x2.c180
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f32_vg1x4.c172
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x2.c247
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x4.c258
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x1.c149
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x2.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x4.c260
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x2.c247
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x4.c258
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x1.c149
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x2.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x4.c260
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_f64_vg1x2.c182
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_f64_vg1x4.c174
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x1.c151
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x2.c251
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x4.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x1.c151
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x2.c251
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x4.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x4.c118
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x4.c118
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f32_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f32_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x4.c118
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x1.c150
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x4.c128
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x4.c118
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x1.c150
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x4.c128
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_f64_vg1x2.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_f64_vg1x4.c110
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x1.c152
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x2.c124
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x4.c130
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x1.c152
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x2.c124
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x4.c130
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x2.c247
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x4.c258
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x2.c247
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x4.c258
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f32_vg1x2.c180
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f32_vg1x4.c172
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x2.c247
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x4.c258
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x1.c149
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x2.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x4.c260
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x1.c148
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x2.c247
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x4.c258
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x1.c149
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x2.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x4.c260
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_f64_vg1x2.c182
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_f64_vg1x4.c174
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x1.c151
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x2.c251
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x4.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x1.c151
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x2.c251
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x4.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mopa_za32.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mops_za32.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c16.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c16_x2.c54
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c32.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c32_x2.c54
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c64.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c64_x2.c54
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c8.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c8_x2.c54
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pfalse_c.c39
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b16.c89
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b32.c89
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b64.c80
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b8.c89
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c16.c89
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c32.c89
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c64.c80
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c8.c89
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c16.c41
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c32.c41
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c64.c41
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c8.c41
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s16_s32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s16_s64_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s8_s32_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_s32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_s64_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_u32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_u64_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u8_s32_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u8_u32_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s16_s32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s16_s64_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s8_s32_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_s32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_s64_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_u32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_u64_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u8_s32_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u8_u32_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s16_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s16_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s32_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s32_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s64_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s64_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s8_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s8_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s16_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s8_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u16_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u8_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s16_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s8_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u16_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u8_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u16_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u8_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u16_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u8_x4.c65
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za16_vg2.c140
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za16_vg4.c138
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za32_vg2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za32_vg4.c129
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za64_vg2.c113
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za64_vg4.c129
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za8_vg2.c140
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za8_vg4.c156
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za16_vg2.c140
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za16_vg4.c138
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za32_vg2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za32_vg4.c129
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za64_vg2.c113
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za64_vg4.c129
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za8_vg2.c140
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za8_vg4.c156
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za16_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za16_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za32_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za32_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za64_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za64_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za8_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za8_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rinta_s32_x2.c61
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rinta_s32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintm_u32_x2.c61
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintm_u32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintn_u32_x2.c61
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintn_u32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintp_u32_x2.c61
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintp_u32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s16_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s16_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s32_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s32_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s64_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s64_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s8_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s8_x4.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u16_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u16_x4.c228
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u32_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u32_x4.c228
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u64_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u64_x4.c228
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u8_x2.c207
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u8_x4.c228
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_bf16_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_bf16_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f16_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f16_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f32_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f32_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f64_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f64_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s16_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s16_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s32_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s32_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s64_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s64_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s8_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s8_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u16_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u16_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u32_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u32_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u64_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u64_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u8_x2.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u8_x4.c92
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_bf16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_bf16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s8_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s8_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u8_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u8_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_bf16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_bf16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s8_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s8_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u16_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u16_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u32_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u32_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u64_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u64_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u8_x2.c262
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u8_x4.c354
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/str_zt.c36
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_s32_vg1x2.c180
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_s32_vg1x4.c172
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_u32_vg1x2.c180
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_u32_vg1x4.c172
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_s64_vg1x2.c182
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_s64_vg1x4.c174
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_u64_vg1x2.c182
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_u64_vg1x4.c174
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_f32_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_f32_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_s32_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_s32_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_u32_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_u32_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_f64_vg1x2.c126
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_f64_vg1x4.c141
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_s64_vg1x2.c124
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_s64_vg1x4.c139
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_u64_vg1x2.c124
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_u64_vg1x4.c139
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_lane_za32_s8_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_lane_za32_s8_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_za32_s8_vg1x2.c243
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_za32_s8_vg1x4.c254
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/suvdot_lane_za32_s8_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/test_sme2_acle.h124
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s16_x4.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s32_x4.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s8_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s8_x4.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u16_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u16_x4.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u32_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u32_x4.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u8_x2.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u8_x4.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_lane_za32_u8_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_lane_za32_u8_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_za32_u8_vg1x2.c243
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_za32_u8_vg1x4.c254
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usvdot_lane_za32_u8_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_bf16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_bf16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s8_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s8_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u8_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u8_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_bf16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_bf16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s8_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s8_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u8_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u8_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_bf16_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_f16_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_s16_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_s8_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_u16_vg1x2.c102
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_u8_vg1x4.c108
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za64_s16_vg1x4.c110
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za64_u16_vg1x4.c110
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b16.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b32.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b64.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b8.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c16.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c32.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c64.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c8.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b16.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b32.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b64.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b8.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c16.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c32.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c64.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c8.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b16.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b32.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b64.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b8.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c16.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c32.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c64.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c8.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b16.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b32.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b64.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b8.c119
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c16.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c32.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c64.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c8.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za16_vg2.c140
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za16_vg4.c138
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za32_vg2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za32_vg4.c129
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za64_vg2.c113
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za64_vg4.c129
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za8_vg2.c140
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za8_vg4.c156
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za16_vg2.c140
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za16_vg4.c138
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za32_vg2.c112
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za32_vg4.c129
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za64_vg2.c113
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za64_vg4.c129
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za8_vg2.c140
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za8_vg4.c156
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za16_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za16_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za32_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za32_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za64_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za64_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za8_vg1x2.c122
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za8_vg1x4.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zero_zt.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_bf16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_bf16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s8_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s8_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u8_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u8_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_bf16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_bf16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s8_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s8_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u16_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u16_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u32_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u32_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u64_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u64_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u8_x2.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u8_x4.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrb.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrd.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrh.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrw.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfmmla_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntb.c71
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntd.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cnth.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntw.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/create2_1.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/get2_b.c55
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_bf16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sw_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sw_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uw_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uw_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_bf16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_bf16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sw_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sw_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uw_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uw_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfb.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfb_gather.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfd.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfd_gather.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfh.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfh_gather.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfw.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfw_gather.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/rdffr_1.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_b.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_bf16.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f16.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f32.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f64.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s16.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s32.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s64.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s8.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u16.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u32.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u64.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u8.c62
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/set2_b.c41
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1w_scatter_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1w_scatter_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h317
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/asm/usmmla_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/adr_index_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/adr_offset_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_1.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_n.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_single_n_1.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_single_n_2.c36
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_lane_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_long_lane_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_long_opt_n_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_n_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_narrowb_opt_n_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_narrowt_opt_n_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_n_2.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_n_3.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_single_n_1.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_single_n_2.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_rotate_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_single_1.c34
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_to_uint_1.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint64_n_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint64_opt_n_2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_n_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_opt_n_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_wide_1.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_wide_opt_n_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_int_m_1.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_m_1.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_m_2.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_int_opt_single_1.c61
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_1.c73
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_2.c78
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_3.c78
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_4.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_1.c76
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_2.c29
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_3.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_uint_opt_single_1.c61
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_uint_m_1.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binaryxn_1.c23
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binaryxn_2.c33
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/clamp_1.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/clast_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_1.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_opt_n_1.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_scalar_count_1.c55
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_wide_opt_n_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/count_vector_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_3.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_5.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_int_lane_1.c59
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_lane_1.c83
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_lane_2.c83
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_uint_lane_1.c59
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/fold_left_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/func_redef_4.c3
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/func_redef_5.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/inc_dec_pred_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ld1sh_gather_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_2.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_3.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_restricted_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_2.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_3.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_4.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/mmla_1.c24
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/read_za_m_1.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/reduction_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/reduction_wide_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_3.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_5.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowb_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowb_to_uint_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowt_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowt_to_uint_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowxn_1.c89
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/sizeless-1.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/sizeless-2.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_2.c14
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_index_1.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_index_restricted_1.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_2.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_restricted_1.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/storexn_1.c33
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/svboolx2_1.c135
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/svcount_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_lane_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_lanex2_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_opt_n_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_intq_uintq_lane_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_intq_uintq_opt_n_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_lane_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_lane_rotate_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_long_lane_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_long_opt_n_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_opt_n_1.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_lane_1.c36
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_lane_rotate_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_opt_n_2.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_or_011_lane_1.c33
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_rotate_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_rotate_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_shift_right_imm_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uint_1.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_lane_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_opt_n_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/tmad_1.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowt_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convertxn_1.c28
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowb_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowb_to_uint_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowt_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowt_to_uint_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_int_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_3.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_uint_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_widen_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_m_1.c49
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_1.c54
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_2.c27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_3.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unaryxn_1.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/undeclared_2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_1.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_m_1.c48
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_slice_1.c38
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_7.c29
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr106326_1.c378
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/loop_add_4.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/annotate_1.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/annotate_2.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/args_12.c214
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_1.c3
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pcs/struct_3_128.c6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pr112278.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/slp_3.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/slp_4.c19
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesd_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aese_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histseg_s8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histseg_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_s16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_s8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_s16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_s8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_u16.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_u8.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmullb_pair_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmullt_pair_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/rax1_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/rax1_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4ekey_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u32.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_s64.c1
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_u64.c1
-rw-r--r--gcc/testsuite/gcc.target/arc/jli-1.c4
-rw-r--r--gcc/testsuite/gcc.target/arc/jli-2.c2
-rw-r--r--gcc/testsuite/gcc.target/arc/lra-1.c12
-rw-r--r--gcc/testsuite/gcc.target/arc/naked-1.c8
-rw-r--r--gcc/testsuite/gcc.target/arc/naked-2.c6
-rw-r--r--gcc/testsuite/gcc.target/arc/pic-1.c3
-rw-r--r--gcc/testsuite/gcc.target/arc/pr9001191897.c3
-rw-r--r--gcc/testsuite/gcc.target/arc/pr9001195952.c2
-rw-r--r--gcc/testsuite/gcc.target/arc/tmac-1.c4
-rw-r--r--gcc/testsuite/gcc.target/arc/tmac-2.c4
-rw-r--r--gcc/testsuite/gcc.target/arm/bfloat16_vector_typecheck_1.c4
-rw-r--r--gcc/testsuite/gcc.target/arm/bfloat16_vector_typecheck_2.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/pr112337.c4
-rw-r--r--gcc/testsuite/gcc.target/avr/pr112830.c12
-rw-r--r--gcc/testsuite/gcc.target/avr/pr86869.c9
-rw-r--r--gcc/testsuite/gcc.target/avr/pr89270.c7
-rw-r--r--gcc/testsuite/gcc.target/bpf/core-builtin-enumvalue-opt.c8
-rw-r--r--gcc/testsuite/gcc.target/bpf/core-builtin-enumvalue.c12
-rw-r--r--gcc/testsuite/gcc.target/bpf/core-builtin-type-based.c8
-rw-r--r--gcc/testsuite/gcc.target/bpf/core-builtin-type-id.c6
-rw-r--r--gcc/testsuite/gcc.target/bpf/divmod-libcall-1.c19
-rw-r--r--gcc/testsuite/gcc.target/bpf/divmod-libcall-2.c16
-rw-r--r--gcc/testsuite/gcc.target/bpf/section-name-quoting-1.c20
-rw-r--r--gcc/testsuite/gcc.target/gcn/avgpr-mem-double.c1
-rw-r--r--gcc/testsuite/gcc.target/gcn/avgpr-mem-int.c1
-rw-r--r--gcc/testsuite/gcc.target/gcn/avgpr-mem-long.c1
-rw-r--r--gcc/testsuite/gcc.target/gcn/avgpr-mem-short.c1
-rw-r--r--gcc/testsuite/gcc.target/gcn/avgpr-spill-double.c1
-rw-r--r--gcc/testsuite/gcc.target/gcn/avgpr-spill-int.c1
-rw-r--r--gcc/testsuite/gcc.target/gcn/avgpr-spill-long.c1
-rw-r--r--gcc/testsuite/gcc.target/gcn/avgpr-spill-short.c1
-rw-r--r--gcc/testsuite/gcc.target/h8300/pr17306-2.c2
-rw-r--r--gcc/testsuite/gcc.target/h8300/pr58400.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-interrupt-1.c5
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-push2pop2-1.c5
-rw-r--r--gcc/testsuite/gcc.target/i386/apx-push2pop2_force_drap-1.c5
-rw-r--r--gcc/testsuite/gcc.target/i386/libcall-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/pr112445.c22
-rw-r--r--gcc/testsuite/gcc.target/i386/pr112816.c27
-rw-r--r--gcc/testsuite/gcc.target/i386/pr112830.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/sdotprodint8_emulate.c15
-rw-r--r--gcc/testsuite/gcc.target/i386/sse2-bfloat16-scalar-typecheck.c4
-rw-r--r--gcc/testsuite/gcc.target/i386/sse2-pr112816.c16
-rw-r--r--gcc/testsuite/gcc.target/i386/udotprodint8_emulate.c15
-rw-r--r--gcc/testsuite/gcc.target/i386/user_msr-1.c9
-rw-r--r--gcc/testsuite/gcc.target/i386/vect-bfloat16-typecheck_1.c4
-rw-r--r--gcc/testsuite/gcc.target/i386/vect-bfloat16-typecheck_2.c4
-rw-r--r--gcc/testsuite/gcc.target/loongarch/lasx-extract-even_odd-opt.c54
-rw-r--r--gcc/testsuite/gcc.target/loongarch/popcnt.c41
-rw-r--r--gcc/testsuite/gcc.target/loongarch/popcount.c17
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vect-frint-no-inexact.c48
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c23
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c43
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vect-frint.c85
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c44
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vect-ftint.c83
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vect-muh.c36
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vect-rotr.c36
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp23
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c1
-rw-r--r--gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c1
-rw-r--r--gcc/testsuite/gcc.target/nios2/cdx-ldstwm-1.c2
-rw-r--r--gcc/testsuite/gcc.target/nios2/cdx-ldstwm-2.c3
-rw-r--r--gcc/testsuite/gcc.target/powerpc/conditional-return.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/arch-29.c7
-rw-r--r--gcc/testsuite/gcc.target/riscv/arch-30.c7
-rw-r--r--gcc/testsuite/gcc.target/riscv/mcpu-sifive-x280.c20
-rw-r--r--gcc/testsuite/gcc.target/riscv/predef-13.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/copysign-zvfh-run.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vadd-zvfh-run.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-zvfh-run.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmax-zvfh-run.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmin-zvfh-run.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmul-zvfh-run.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_copysign-zvfh-run.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112552.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112694-2.c35
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112694-3.c37
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112801.c36
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112851.c21
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112852.c87
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112854.c12
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112855.c26
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112872.c16
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/struct/struct_vect_run-10.c7
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/struct/struct_vect_run-6.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/abs-zvfh-run.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/vneg-zvfh-run.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-1.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-2.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mod-1.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-1.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-10.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-11.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-12.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-13.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-14.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-15.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-16.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-17.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-3.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-5.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-7.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-8.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-9.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-1.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-2.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-3.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-5.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-6.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-1.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-10.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-11.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-12.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-2.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-3.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-5.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-6.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-7.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-8.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/zve32f-1.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-1.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-2.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-3.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-4.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-5.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-1.c104
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c104
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c188
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c119
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c86
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-16.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-17.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-18.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-19.c103
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-2.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-20.c103
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-21.c106
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-22.c188
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-23.c119
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-24.c86
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-25.c104
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-26.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-27.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-28.c104
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-29.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-3.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-30.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-31.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-32.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-33.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-34.c101
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-35.c107
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-36.c107
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-37.c103
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-38.c82
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-4.c104
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-5.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-6.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c106
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c68
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c51
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112743-1.c16
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/pr112743-2.c52
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/unop_v_constraint-2.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-84.c6
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-3.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112713-1.c24
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112713-2.c47
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112776.c36
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112813-1.c32
-rw-r--r--gcc/testsuite/gcc.target/riscv/zicond-sfb-primitiveSemantics.c50
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvkn-1.c8
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvkn.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvknc-1.c8
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvknc-2.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvknc.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvkng-1.c8
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvkng-2.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvkng.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvks-1.c8
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvks.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvksc-1.c8
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvksc-2.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvksc.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvksg-1.c8
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvksg-2.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/zvksg.c4
-rw-r--r--gcc/testsuite/gcc.target/s390/pr112753.c8
-rw-r--r--gcc/testsuite/gcc.target/x86_64/abi/avx512fp16/m256h/test_passing_m256.c2
-rw-r--r--gcc/testsuite/gcc.target/x86_64/abi/avx512fp16/m512h/test_passing_m512.c2
-rwxr-xr-xgcc/testsuite/gfortran.dg/asan/pr110415-2.f9045
-rwxr-xr-xgcc/testsuite/gfortran.dg/asan/pr110415-3.f9049
-rw-r--r--gcc/testsuite/gfortran.dg/associate_62.f9025
-rw-r--r--gcc/testsuite/gfortran.dg/coarray_poly_6.f902
-rw-r--r--gcc/testsuite/gfortran.dg/coarray_poly_7.f902
-rw-r--r--gcc/testsuite/gfortran.dg/coarray_poly_8.f902
-rw-r--r--gcc/testsuite/gfortran.dg/missing_optional_dummy_6a.f902
-rw-r--r--gcc/testsuite/gfortran.dg/missing_optional_dummy_7.f9064
-rw-r--r--gcc/testsuite/gfortran.dg/optional_deferred_char_1.f90100
-rw-r--r--gcc/testsuite/gfortran.dg/pr100988.f9061
-rw-r--r--gcc/testsuite/gfortran.dg/pr110415.f9020
-rw-r--r--gcc/testsuite/gm2/link/externalscaffold/pass/scaffold.c1
-rw-r--r--gcc/testsuite/gnat.dg/strub_access.adb21
-rw-r--r--gcc/testsuite/gnat.dg/strub_access1.adb16
-rw-r--r--gcc/testsuite/gnat.dg/strub_attr.adb37
-rw-r--r--gcc/testsuite/gnat.dg/strub_attr.ads12
-rw-r--r--gcc/testsuite/gnat.dg/strub_disp.adb64
-rw-r--r--gcc/testsuite/gnat.dg/strub_disp1.adb79
-rw-r--r--gcc/testsuite/gnat.dg/strub_ind.adb33
-rw-r--r--gcc/testsuite/gnat.dg/strub_ind.ads17
-rw-r--r--gcc/testsuite/gnat.dg/strub_ind1.adb41
-rw-r--r--gcc/testsuite/gnat.dg/strub_ind1.ads17
-rw-r--r--gcc/testsuite/gnat.dg/strub_ind2.adb34
-rw-r--r--gcc/testsuite/gnat.dg/strub_ind2.ads17
-rw-r--r--gcc/testsuite/gnat.dg/strub_intf.adb93
-rw-r--r--gcc/testsuite/gnat.dg/strub_intf1.adb86
-rw-r--r--gcc/testsuite/gnat.dg/strub_intf2.adb55
-rw-r--r--gcc/testsuite/gnat.dg/strub_renm.adb21
-rw-r--r--gcc/testsuite/gnat.dg/strub_renm1.adb32
-rw-r--r--gcc/testsuite/gnat.dg/strub_renm2.adb32
-rw-r--r--gcc/testsuite/gnat.dg/strub_var.adb16
-rw-r--r--gcc/testsuite/gnat.dg/strub_var1.adb20
-rw-r--r--gcc/testsuite/lib/scanasm.exp10
-rw-r--r--gcc/testsuite/lib/scanoffload.exp21
-rw-r--r--gcc/testsuite/lib/target-supports.exp91
-rw-r--r--gcc/tree-cfg.cc19
-rw-r--r--gcc/tree-inline.cc9
-rw-r--r--gcc/tree-pass.h7
-rw-r--r--gcc/tree-scalar-evolution.cc10
-rw-r--r--gcc/tree-sra.cc40
-rw-r--r--gcc/tree-ssa-ccp.cc4
-rw-r--r--gcc/tree-ssa-dce.cc3
-rw-r--r--gcc/tree-ssa-loop-ch.cc9
-rw-r--r--gcc/tree-ssa-loop-ivcanon.cc8
-rw-r--r--gcc/tree-ssa-operands.cc3
-rw-r--r--gcc/tree-vect-loop.cc6
-rw-r--r--gcc/tree-vect-stmts.cc51
-rw-r--r--gcc/tree-vectorizer.h8
-rw-r--r--gcc/tree.cc2
-rw-r--r--gcc/tsystem.h8
-rw-r--r--gcc/value-query.h3
-rw-r--r--gcc/value-range.h11
-rw-r--r--gcc/varasm.cc24
-rw-r--r--gcc/wide-int.cc35
2791 files changed, 188209 insertions, 6998 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index aad4c29..23c65f4 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,2770 @@
+2023-12-06 Alexandre Oliva <oliva@adacore.com>
+
+ * Makefile.in (OBJS): Add ipa-strub.o.
+ (GTFILES): Add ipa-strub.cc.
+ * builtins.def (BUILT_IN_STACK_ADDRESS): New.
+ (BUILT_IN___STRUB_ENTER): New.
+ (BUILT_IN___STRUB_UPDATE): New.
+ (BUILT_IN___STRUB_LEAVE): New.
+ * builtins.cc: Include ipa-strub.h.
+ (STACK_STOPS, STACK_UNSIGNED): Define.
+ (expand_builtin_stack_address): New.
+ (expand_builtin_strub_enter): New.
+ (expand_builtin_strub_update): New.
+ (expand_builtin_strub_leave): New.
+ (expand_builtin): Call them.
+ * common.opt (fstrub=*): New options.
+ * doc/extend.texi (strub): New type attribute.
+ (__builtin_stack_address): New function.
+ (Stack Scrubbing): New section.
+ * doc/invoke.texi (-fstrub=*): New options.
+ (-fdump-ipa-*): New passes.
+ * gengtype-lex.l: Ignore multi-line pp-directives.
+ * ipa-inline.cc: Include ipa-strub.h.
+ (can_inline_edge_p): Test strub_inlinable_to_p.
+ * ipa-split.cc: Include ipa-strub.h.
+ (execute_split_functions): Test strub_splittable_p.
+ * ipa-strub.cc, ipa-strub.h: New.
+ * passes.def: Add strub_mode and strub passes.
+ * tree-cfg.cc (gimple_verify_flow_info): Note on debug stmts.
+ * tree-pass.h (make_pass_ipa_strub_mode): Declare.
+ (make_pass_ipa_strub): Declare.
+ (make_pass_ipa_function_and_variable_visibility): Fix
+ formatting.
+ * tree-ssa-ccp.cc (optimize_stack_restore): Keep restores
+ before strub leave.
+ * attribs.cc: Include ipa-strub.h.
+ (decl_attributes): Support applying attributes to function
+ type, rather than pointer type, at handler's request.
+ (comp_type_attributes): Combine strub_comptypes and target
+ comp_type results.
+ * doc/tm.texi.in (TARGET_STRUB_USE_DYNAMIC_ARRAY): New.
+ (TARGET_STRUB_MAY_USE_MEMSET): New.
+ * doc/tm.texi: Rebuilt.
+ * cgraph.h (symtab_node::reset): Add preserve_comdat_group
+ param, with a default.
+ * cgraphunit.cc (symtab_node::reset): Use it.
+
+2023-12-05 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112851
+ PR target/112852
+ * config/riscv/riscv-v.cc (vls_mode_valid_p): Block VLSmodes according
+ TARGET_MAX_LMUL and BITS_PER_RISCV_VECTOR.
+
+2023-12-05 David Faust <david.faust@oracle.com>
+
+ PR debug/112849
+ * btfout.cc (btf_collect_datasec): Avoid incorrectly creating an
+ entry in a BTF_KIND_DATASEC record for extern variable decls without
+ a known section.
+
+2023-12-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112606
+ * config/rs6000/rs6000.md (copysign<mode>3): Change predicate
+ of the last argument from gpc_reg_operand to any_operand. If
+ operands[2] is CONST_DOUBLE, emit abs or neg abs depending on
+ its sign, otherwise if it doesn't satisfy gpc_reg_operand,
+ force it to REG using copy_to_mode_reg.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * attribs.cc (handle_ignored_attributes_option): Add extra
+ braces to work around PR 16333 in older compilers.
+ * config/aarch64/aarch64.cc (aarch64_gnu_attribute_table): Likewise.
+ (aarch64_arm_attribute_table): Likewise.
+ * config/arm/arm.cc (arm_gnu_attribute_table): Likewise.
+ * config/i386/i386-options.cc (ix86_gnu_attribute_table): Likewise.
+ * config/ia64/ia64.cc (ia64_gnu_attribute_table): Likewise.
+ * config/rs6000/rs6000.cc (rs6000_gnu_attribute_table): Likewise.
+ * target-def.h (TARGET_GNU_ATTRIBUTES): Likewise.
+ * genhooks.cc (emit_init_macros): Likewise, when emitting the
+ instantiation of TARGET_ATTRIBUTE_TABLE.
+ * langhooks-def.h (LANG_HOOKS_INITIALIZER): Likewise, when
+ instantiating LANG_HOOKS_ATTRIBUTE_TABLE.
+ (LANG_HOOKS_ATTRIBUTE_TABLE): Define to be empty by default.
+ * target.def (attribute_table): Likewise.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/112860
+ * passes.cc (should_skip_pass_p): Do not skip ISEL.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR sanitizer/111736
+ * asan.cc (asan_protect_global): Do not protect globals
+ in non-generic address-space.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR ipa/92606
+ * ipa-icf.cc (sem_variable::equals_wpa): Compare address-spaces.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/112830
+ * gimplify.cc (gimplify_modify_expr): Avoid turning aggregate
+ copy of non-generic address-spaces to memcpy.
+ (gimplify_modify_expr_to_memcpy): Assert we are dealing with
+ a copy inside the generic address-space.
+ (gimplify_modify_expr_to_memset): Likewise.
+ * tree-cfg.cc (verify_gimple_assign_single): Allow
+ WITH_SIZE_EXPR as part of the RHS of an assignment.
+ * builtins.cc (get_memory_address): Assert we are dealing
+ with the generic address-space.
+ * tree-ssa-dce.cc (ref_may_be_aliased): Handle WITH_SIZE_EXPR.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/109689
+ PR tree-optimization/112856
+ * cfgloopmanip.h (unloop_loops): Adjust API.
+ * tree-ssa-loop-ivcanon.cc (unloop_loops): Take edges_to_remove
+ as parameter.
+ (canonicalize_induction_variables): Adjust.
+ (tree_unroll_loops_completely): Likewise.
+ * tree-ssa-loop-ch.cc (ch_base::copy_headers): Rewrite into
+ LC SSA if we unlooped some loops and we are in LC SSA.
+
+2023-12-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112845
+ * config/i386/i386.md (movabsq $(i32 << shift), r64 peephole2): FAIL
+ if the new immediate is ix86_endbr_immediate_operand.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.h (TARGET_STREAMING_SME2): New macro.
+ (P_ALIASES): Likewise.
+ (REGISTER_NAMES): Add pn aliases of the predicate registers.
+ (W8_W11_REGNUM_P): New macro.
+ (W8_W11_REGS): New register class.
+ (REG_CLASS_NAMES, REG_CLASS_CONTENTS): Update accordingly.
+ * config/aarch64/aarch64.cc (aarch64_print_operand): Add support
+ for %K, which prints a predicate as a counter. Handle tuples of
+ predicates.
+ (aarch64_regno_regclass): Handle W8_W11_REGS.
+ (aarch64_class_max_nregs): Likewise.
+ * config/aarch64/constraints.md (Uci, Uw2, Uw4): New constraints.
+ (x, y): Move further up file.
+ (Uph): Redefine as the high predicate registers, renaming the old
+ constraint to...
+ (Uih): ...this.
+ * config/aarch64/predicates.md (const_0_to_7_operand): New predicate.
+ (const_0_to_4_step_4_operand, const_0_to_6_step_2_operand): Likewise.
+ (const_0_to_12_step_4_operand, const_0_to_14_step_2_operand): Likewise.
+ (aarch64_simd_shift_imm_qi): Use const_0_to_7_operand.
+ * config/aarch64/iterators.md (VNx16SI_ONLY, VNx8SI_ONLY)
+ (VNx8DI_ONLY, SVE_FULL_BHSIx2, SVE_FULL_HF, SVE_FULL_SIx2_SDIx4)
+ (SVE_FULL_BHS, SVE_FULLx24, SVE_DIx24, SVE_BHSx24, SVE_Ix24)
+ (SVE_Fx24, SVE_SFx24, SME_ZA_BIx24, SME_ZA_BHIx124, SME_ZA_BHIx24)
+ (SME_ZA_HFx124, SME_ZA_HFx24, SME_ZA_HIx124, SME_ZA_HIx24)
+ (SME_ZA_SDIx24, SME_ZA_SDFx24): New mode iterators.
+ (UNSPEC_REVD, UNSPEC_CNTP_C, UNSPEC_PEXT, UNSPEC_PEXTx2): New unspecs.
+ (UNSPEC_PSEL, UNSPEC_PTRUE_C, UNSPEC_SQRSHR, UNSPEC_SQRSHRN)
+ (UNSPEC_SQRSHRU, UNSPEC_SQRSHRUN, UNSPEC_UQRSHR, UNSPEC_UQRSHRN)
+ (UNSPEC_UZP, UNSPEC_UZPQ, UNSPEC_ZIP, UNSPEC_ZIPQ, UNSPEC_BFMLSLB)
+ (UNSPEC_BFMLSLT, UNSPEC_FCVTN, UNSPEC_FDOT, UNSPEC_SQCVT): Likewise.
+ (UNSPEC_SQCVTN, UNSPEC_SQCVTU, UNSPEC_SQCVTUN, UNSPEC_UQCVT): Likewise.
+ (UNSPEC_SME_ADD, UNSPEC_SME_ADD_WRITE, UNSPEC_SME_BMOPA): Likewise.
+ (UNSPEC_SME_BMOPS, UNSPEC_SME_FADD, UNSPEC_SME_FDOT, UNSPEC_SME_FVDOT)
+ (UNSPEC_SME_FMLA, UNSPEC_SME_FMLS, UNSPEC_SME_FSUB, UNSPEC_SME_READ)
+ (UNSPEC_SME_SDOT, UNSPEC_SME_SVDOT, UNSPEC_SME_SMLA, UNSPEC_SME_SMLS)
+ (UNSPEC_SME_SUB, UNSPEC_SME_SUB_WRITE, UNSPEC_SME_SUDOT): Likewise.
+ (UNSPEC_SME_SUVDOT, UNSPEC_SME_UDOT, UNSPEC_SME_UVDOT): Likewise.
+ (UNSPEC_SME_UMLA, UNSPEC_SME_UMLS, UNSPEC_SME_USDOT): Likewise.
+ (UNSPEC_SME_USVDOT, UNSPEC_SME_WRITE): Likewise.
+ (Vetype, VNARROW, V2XWIDE, Ventype, V_INT_EQUIV, v_int_equiv)
+ (VSINGLE, vsingle, b): Add tuple modes.
+ (v2xwide, za32_offset_range, za64_offset_range, za32_long)
+ (za32_last_offset, vg_modifier, z_suffix, aligned_operand)
+ (aligned_fpr): New mode attributes.
+ (SVE_INT_BINARY_MULTI, SVE_INT_BINARY_SINGLE, SVE_INT_BINARY_MULTI)
+ (SVE_FP_BINARY_MULTI): New int iterators.
+ (SVE_BFLOAT_TERNARY_LONG): Add UNSPEC_BFMLSLB and UNSPEC_BFMLSLT.
+ (SVE_BFLOAT_TERNARY_LONG_LANE): Likewise.
+ (SVE_WHILE_ORDER, SVE2_INT_SHIFT_IMM_NARROWxN, SVE_QCVTxN)
+ (SVE2_SFx24_UNARY, SVE2_x24_PERMUTE, SVE2_x24_PERMUTEQ)
+ (UNSPEC_REVD_ONLY, SME2_INT_MOP, SME2_BMOP, SME_BINARY_SLICE_SDI)
+ (SME_BINARY_SLICE_SDF, SME_BINARY_WRITE_SLICE_SDI, SME_INT_DOTPROD)
+ (SME_INT_DOTPROD_LANE, SME_FP_DOTPROD, SME_FP_DOTPROD_LANE)
+ (SME_INT_TERNARY_SLICE, SME_FP_TERNARY_SLICE, BHSD_BITS)
+ (LUTI_BITS): New int iterators.
+ (optab, sve_int_op): Handle the new unspecs.
+ (sme_int_op, has_16bit_form): New int attributes.
+ (bits_etype): Handle 64.
+ * config/aarch64/aarch64.md (UNSPEC_LD1_SVE_COUNT): New unspec.
+ (UNSPEC_ST1_SVE_COUNT, UNSPEC_LDNT1_SVE_COUNT): Likewise.
+ (UNSPEC_STNT1_SVE_COUNT): Likewise.
+ * config/aarch64/atomics.md (cas_short_expected_imm): Use Uhi
+ rather than Uph for HImode immediates.
+ * config/aarch64/aarch64-sve.md (@aarch64_ld1<SVE_FULLx24:mode>)
+ (@aarch64_ldnt1<SVE_FULLx24:mode>, @aarch64_st1<SVE_FULLx24:mode>)
+ (@aarch64_stnt1<SVE_FULLx24:mode>): New patterns.
+ (@aarch64_<sur>dot_prod_lane<vsi2qi>): Extend to...
+ (@aarch64_<sur>dot_prod_lane<SVE_FULL_SDI:mode><SVE_FULL_BHI:mode>)
+ (@aarch64_<sur>dot_prod_lane<VNx4SI_ONLY:mode><VNx16QI_ONLY:mode>):
+ ...these new patterns.
+ (SVE_WHILE_B, SVE_WHILE_B_X2, SVE_WHILE_C): New constants. Add
+ SVE_WHILE_B to existing while patterns.
+ * config/aarch64/aarch64-sve2.md (@aarch64_sve_ptrue_c<BHSD_BITS>)
+ (@aarch64_sve_pext<BHSD_BITS>, @aarch64_sve_pext<BHSD_BITS>x2)
+ (@aarch64_sve_psel<BHSD_BITS>, *aarch64_sve_psel<BHSD_BITS>_plus)
+ (@aarch64_sve_cntp_c<BHSD_BITS>, <frint_pattern><mode>2)
+ (<optab><mode>3, *<optab><mode>3, @aarch64_sve_single_<optab><mode>)
+ (@aarch64_sve_<sve_int_op><mode>): New patterns.
+ (@aarch64_sve_single_<sve_int_op><mode>, @aarch64_sve_<su>clamp<mode>)
+ (*aarch64_sve_<su>clamp<mode>_x, @aarch64_sve_<su>clamp_single<mode>)
+ (@aarch64_sve_fclamp<mode>, *aarch64_sve_fclamp<mode>_x)
+ (@aarch64_sve_fclamp_single<mode>, <optab><mode><v2xwide>2)
+ (@aarch64_sve_<sur>dotvnx4sivnx8hi): New patterns.
+ (@aarch64_sve_<maxmin_uns_op><mode>): Likewise.
+ (*aarch64_sve_<maxmin_uns_op><mode>): Likewise.
+ (@aarch64_sve_single_<maxmin_uns_op><mode>): Likewise.
+ (aarch64_sve_fdotvnx4sfvnx8hf): Likewise.
+ (aarch64_fdot_prod_lanevnx4sfvnx8hf): Likewise.
+ (@aarch64_sve_<optab><VNx16QI_ONLY:mode><VNx16SI_ONLY:mode>): Likewise.
+ (@aarch64_sve_<optab><VNx8HI_ONLY:mode><VNx8SI_ONLY:mode>): Likewise.
+ (@aarch64_sve_<optab><VNx8HI_ONLY:mode><VNx8DI_ONLY:mode>): Likewise.
+ (truncvnx8sf<mode>2, @aarch64_sve_cvtn<mode>): Likewise.
+ (<optab><v_int_equiv><mode>2, <optab><mode><v_int_equiv>2): Likewise.
+ (@aarch64_sve_sel<mode>): Likewise.
+ (@aarch64_sve_while<while_optab_cmp>_b<BHSD_BITS>_x2): Likewise.
+ (@aarch64_sve_while<while_optab_cmp>_c<BHSD_BITS>): Likewise.
+ (@aarch64_pred_<optab><mode>, @cond_<optab><mode>): Likewise.
+ (@aarch64_sve_<optab><mode>): Likewise.
+ * config/aarch64/aarch64-sme.md (@aarch64_sme_<optab><mode><mode>)
+ (*aarch64_sme_<optab><mode><mode>_plus, @aarch64_sme_read<mode>)
+ (*aarch64_sme_read<mode>_plus, @aarch64_sme_write<mode>): New patterns.
+ (*aarch64_sme_write<mode>_plus aarch64_sme_zero_zt0): Likewise.
+ (@aarch64_sme_<optab><mode>, *aarch64_sme_<optab><mode>_plus)
+ (@aarch64_sme_single_<optab><mode>): Likewise.
+ (*aarch64_sme_single_<optab><mode>_plus): Likewise.
+ (@aarch64_sme_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>)
+ (*aarch64_sme_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>_plus)
+ (@aarch64_sme_single_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>)
+ (*aarch64_sme_single_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>_plus)
+ (@aarch64_sme_single_sudot<VNx4SI_ONLY:mode><SME_ZA_BIx24:mode>)
+ (*aarch64_sme_single_sudot<VNx4SI_ONLY:mode><SME_ZA_BIx24:mode>_plus)
+ (@aarch64_sme_lane_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>)
+ (*aarch64_sme_lane_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>_plus)
+ (@aarch64_sme_<optab><VNx4SI_ONLY:mode><SVE_FULL_BHI:mode>)
+ (*aarch64_sme_<optab><VNx4SI_ONLY:mode><SVE_FULL_BHI:mode>_plus)
+ (@aarch64_sme_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx24:mode>)
+ (*aarch64_sme_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx24:mode>_plus)
+ (@aarch64_sme_single_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx24:mode>)
+ (*aarch64_sme_single_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx24:mode>_plus)
+ (@aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx124:mode>)
+ (*aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx124:mode>)
+ (@aarch64_sme_<optab><VNx2DI_ONLY:mode><VNx8HI_ONLY:mode>)
+ (*aarch64_sme_<optab><VNx2DI_ONLY:mode><VNx8HI_ONLY:mode>_plus)
+ (@aarch64_sme_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx24:mode>)
+ (*aarch64_sme_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx24:mode>_plus)
+ (@aarch64_sme_single_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx24:mode>)
+ (*aarch64_sme_single_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx24:mode>_plus)
+ (@aarch64_sme_lane_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx124:mode>)
+ (*aarch64_sme_lane_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx124:mode>)
+ (@aarch64_sme_<optab><VNx4SI_ONLY:mode><VNx8HI_ONLY:mode>)
+ (@aarch64_sme_<optab><VNx4SI_ONLY:mode><VNx4SI_ONLY:mode>)
+ (@aarch64_sme_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>)
+ (*aarch64_sme_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>_plus)
+ (@aarch64_sme_single_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>)
+ (*aarch64_sme_single_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>_plus)
+ (@aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>)
+ (*aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>_plus)
+ (@aarch64_sme_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>)
+ (*aarch64_sme_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>_plus)
+ (@aarch64_sme_single_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>)
+ (*aarch64_sme_single_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>_plus)
+ (@aarch64_sme_lane_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>)
+ (*aarch64_sme_lane_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>)
+ (@aarch64_sme_<optab><VNx4SI_ONLY:mode><SVE_FULL_HF:mode>)
+ (*aarch64_sme_<optab><VNx4SI_ONLY:mode><SVE_FULL_HF:mode>_plus)
+ (@aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx124:mode>)
+ (*aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx124:mode>)
+ (@aarch64_sme_lut<LUTI_BITS><mode>): Likewise.
+ (UNSPEC_SME_LUTI): New unspec.
+ * config/aarch64/aarch64-sve-builtins.def (single): New mode suffix.
+ (c8, c16, c32, c64): New type suffixes.
+ (vg1x2, vg1x4, vg2, vg2x1, vg2x2, vg2x4, vg4, vg4x1, vg4x2)
+ (vg4x4): New group suffixes.
+ * config/aarch64/aarch64-sve-builtins.h (CP_READ_ZT0)
+ (CP_WRITE_ZT0): New constants.
+ (get_svbool_t): Delete.
+ (function_resolver::report_mismatched_num_vectors): New member
+ function.
+ (function_resolver::resolve_conversion): Likewise.
+ (function_resolver::infer_predicate_type): Likewise.
+ (function_resolver::infer_64bit_scalar_integer_pair): Likewise.
+ (function_resolver::require_matching_predicate_type): Likewise.
+ (function_resolver::require_nonscalar_type): Likewise.
+ (function_resolver::finish_opt_single_resolution): Likewise.
+ (function_resolver::require_derived_vector_type): Add an
+ expected_num_vectors parameter.
+ (function_expander::map_to_rtx_codes): Add an extra parameter
+ for unconditional FP unspecs.
+ (function_instance::gp_type_index): New member function.
+ (function_instance::gp_type): Likewise.
+ (function_instance::gp_mode): Handle multi-vector operations.
+ * config/aarch64/aarch64-sve-builtins.cc (TYPES_all_count)
+ (TYPES_all_pred_count, TYPES_c, TYPES_bhs_data, TYPES_bhs_widen)
+ (TYPES_hs_data, TYPES_cvt_h_s_float, TYPES_cvt_s_s, TYPES_qcvt_x2)
+ (TYPES_qcvt_x4, TYPES_qrshr_x2, TYPES_qrshru_x2, TYPES_qrshr_x4)
+ (TYPES_qrshru_x4, TYPES_while_x, TYPES_while_x_c, TYPES_s_narrow_fsu)
+ (TYPES_za_s_b_signed, TYPES_za_s_b_unsigned, TYPES_za_s_b_integer)
+ (TYPES_za_s_h_integer, TYPES_za_s_h_data, TYPES_za_s_unsigned)
+ (TYPES_za_s_float, TYPES_za_s_data, TYPES_za_d_h_integer): New type
+ macros.
+ (groups_x2, groups_x12, groups_x4, groups_x24, groups_x124)
+ (groups_vg1x2, groups_vg1x4, groups_vg1x24, groups_vg2, groups_vg4)
+ (groups_vg24): New group arrays.
+ (function_instance::reads_global_state_p): Handle CP_READ_ZT0.
+ (function_instance::modifies_global_state_p): Handle CP_WRITE_ZT0.
+ (add_shared_state_attribute): Handle zt0 state.
+ (function_builder::add_overloaded_functions): Skip MODE_single
+ for non-tuple groups.
+ (function_resolver::report_mismatched_num_vectors): New function.
+ (function_resolver::resolve_to): Add a fallback error message for
+ the general two-type case.
+ (function_resolver::resolve_conversion): New function.
+ (function_resolver::infer_predicate_type): Likewise.
+ (function_resolver::infer_64bit_scalar_integer_pair): Likewise.
+ (function_resolver::require_matching_predicate_type): Likewise.
+ (function_resolver::require_matching_vector_type): Specifically
+ diagnose mismatched vector counts.
+ (function_resolver::require_derived_vector_type): Add an
+ expected_num_vectors parameter. Extend to handle cases where
+ tuples are expected.
+ (function_resolver::require_nonscalar_type): New function.
+ (function_resolver::check_gp_argument): Use gp_type_index rather
+ than hard-coding VECTOR_TYPE_svbool_t.
+ (function_resolver::finish_opt_single_resolution): New function.
+ (function_checker::require_immediate_either_or): Remove hard-coded
+ constants.
+ (function_expander::direct_optab_handler): New function.
+ (function_expander::use_pred_x_insn): Only add a strictness flag
+ is the insn has an operand for it.
+ (function_expander::map_to_rtx_codes): Take an unconditional
+ FP unspec as an extra parameter. Handle tuples and MODE_single.
+ (function_expander::map_to_unspecs): Handle tuples and MODE_single.
+ * config/aarch64/aarch64-sve-builtins-functions.h (read_zt0)
+ (write_zt0): New typedefs.
+ (full_width_access::memory_vector): Use the function's
+ vectors_per_tuple.
+ (rtx_code_function_base): Add an optional unconditional FP unspec.
+ (rtx_code_function::expand): Update accordingly.
+ (rtx_code_function_rotated::expand): Likewise.
+ (unspec_based_function_exact_insn::expand): Use tuple_mode instead
+ of vector_mode.
+ (unspec_based_uncond_function): New typedef.
+ (cond_or_uncond_unspec_function): New class.
+ (sme_1mode_function::expand): Handle single forms.
+ (sme_2mode_function_t): Likewise, adding a template parameter for them.
+ (sme_2mode_function): Update accordingly.
+ (sme_2mode_lane_function): New typedef.
+ (multireg_permute): New class.
+ (class integer_conversion): Likewise.
+ (while_comparison::expand): Handle svcount_t and svboolx2_t results.
+ * config/aarch64/aarch64-sve-builtins-shapes.h
+ (binary_int_opt_single_n, binary_opt_single_n, binary_single)
+ (binary_za_slice_lane, binary_za_slice_int_opt_single)
+ (binary_za_slice_opt_single, binary_za_slice_uint_opt_single)
+ (binaryx, clamp, compare_scalar_count, count_pred_c)
+ (dot_za_slice_int_lane, dot_za_slice_lane, dot_za_slice_uint_lane)
+ (extract_pred, inherent_zt, ldr_zt, read_za, read_za_slice)
+ (select_pred, shift_right_imm_narrowxn, storexn, str_zt)
+ (unary_convertxn, unary_za_slice, unaryxn, write_za)
+ (write_za_slice): Declare.
+ * config/aarch64/aarch64-sve-builtins-shapes.cc
+ (za_group_is_pure_overload): New function.
+ (apply_predication): Use the function's gp_type for the predicate,
+ instead of hard-coding the use of svbool_t.
+ (parse_element_type): Add support for "c" (svcount_t).
+ (parse_type): Add support for "c0" and "c1" (conversion destination
+ and source types).
+ (binary_za_slice_lane_base): New class.
+ (binary_za_slice_opt_single_base): Likewise.
+ (load_contiguous_base::resolve): Pass the group suffix to r.resolve.
+ (luti_lane_zt_base): New class.
+ (binary_int_opt_single_n, binary_opt_single_n, binary_single)
+ (binary_za_slice_lane, binary_za_slice_int_opt_single)
+ (binary_za_slice_opt_single, binary_za_slice_uint_opt_single)
+ (binaryx, clamp): New shapes.
+ (compare_scalar_def::build): Allow the return type to be a tuple.
+ (compare_scalar_def::expand): Pass the group suffix to r.resolve.
+ (compare_scalar_count, count_pred_c, dot_za_slice_int_lane)
+ (dot_za_slice_lane, dot_za_slice_uint_lane, extract_pred, inherent_zt)
+ (ldr_zt, read_za, read_za_slice, select_pred, shift_right_imm_narrowxn)
+ (storexn, str_zt): New shapes.
+ (ternary_qq_lane_def, ternary_qq_opt_n_def): Replace with...
+ (ternary_qq_or_011_lane_def, ternary_qq_opt_n_or_011_def): ...these
+ new classes. Allow a second suffix that specifies the type of the
+ second vector argument, and that is used to derive the third.
+ (unary_def::build): Extend to handle tuple types.
+ (unary_convert_def::build): Use the new c0 and c1 format specifiers.
+ (unary_convertxn, unary_za_slice, unaryxn, write_za): New shapes.
+ (write_za_slice): Likewise.
+ * config/aarch64/aarch64-sve-builtins-base.cc (svbic_impl::expand)
+ (svext_bhw_impl::expand): Update call to map_to_rtx_costs.
+ (svcntp_impl::expand): Handle svcount_t variants.
+ (svcvt_impl::expand): Handle unpredicated conversions separately,
+ dealing with tuples.
+ (svdot_impl::expand): Handle 2-way dot products.
+ (svdotprod_lane_impl::expand): Likewise.
+ (svld1_impl::fold): Punt on tuple loads.
+ (svld1_impl::expand): Handle tuple loads.
+ (svldnt1_impl::expand): Likewise.
+ (svpfalse_impl::fold): Punt on svcount_t forms.
+ (svptrue_impl::fold): Likewise.
+ (svptrue_impl::expand): Handle svcount_t forms.
+ (svrint_impl): New class.
+ (svsel_impl::fold): Punt on tuple forms.
+ (svsel_impl::expand): Handle tuple forms.
+ (svst1_impl::fold): Punt on tuple loads.
+ (svst1_impl::expand): Handle tuple loads.
+ (svstnt1_impl::expand): Likewise.
+ (svwhilelx_impl::fold): Punt on tuple forms.
+ (svdot_lane): Use UNSPEC_FDOT.
+ (svmax, svmaxnm, svmin, svminmm): Add unconditional FP unspecs.
+ (rinta, rinti, rintm, rintn, rintp, rintx, rintz): Use svrint_impl.
+ * config/aarch64/aarch64-sve-builtins-base.def (svcreate2, svget2)
+ (svset2, svundef2): Add _b variants.
+ (svcvt): Use unary_convertxn.
+ (svdot): Use ternary_qq_opt_n_or_011.
+ (svdot_lane): Use ternary_qq_or_011_lane.
+ (svmax, svmaxnm, svmin, svminnm): Use binary_opt_single_n.
+ (svpfalse): Add a form that returns svcount_t results.
+ (svrinta, svrintm, svrintn, svrintp): Use unaryxn.
+ (svsel): Use binaryxn.
+ (svst1, svstnt1): Use storexn.
+ * config/aarch64/aarch64-sve-builtins-sme.h
+ (svadd_za, svadd_write_za, svbmopa_za, svbmops_za, svdot_za)
+ (svdot_lane_za, svldr_zt, svluti2_lane_zt, svluti4_lane_zt)
+ (svmla_za, svmla_lane_za, svmls_za, svmls_lane_za, svread_za)
+ (svstr_zt, svsub_za, svsub_write_za, svsudot_za, svsudot_lane_za)
+ (svsuvdot_lane_za, svusdot_za, svusdot_lane_za, svusvdot_lane_za)
+ (svvdot_lane_za, svwrite_za, svzero_zt): Declare.
+ * config/aarch64/aarch64-sve-builtins-sme.cc (load_store_za_base):
+ Rename to...
+ (load_store_za_zt0_base): ...this and extend to tuples.
+ (load_za_base, store_za_base): Update accordingly.
+ (expand_ldr_str_zt0): New function.
+ (svldr_zt_impl, svluti_lane_zt_impl, svread_za_impl, svstr_zt_impl)
+ (svsudot_za_impl, svwrite_za_impl, svzero_zt_impl): New classes.
+ (svadd_za, svadd_write_za, svbmopa_za, svbmops_za, svdot_za)
+ (svdot_lane_za, svldr_zt, svluti2_lane_zt, svluti4_lane_zt)
+ (svmla_za, svmla_lane_za, svmls_za, svmls_lane_za, svread_za)
+ (svstr_zt, svsub_za, svsub_write_za, svsudot_za, svsudot_lane_za)
+ (svsuvdot_lane_za, svusdot_za, svusdot_lane_za, svusvdot_lane_za)
+ (svvdot_lane_za, svwrite_za, svzero_zt): New functions.
+ * config/aarch64/aarch64-sve-builtins-sme.def: Add SME2 intrinsics.
+ * config/aarch64/aarch64-sve-builtins-sve2.h
+ (svbfmlslb, svbfmlslb_lane, svbfmlslt, svbfmlslt_lane, svclamp)
+ (svcvtn, svpext, svpsel, svqcvt, svqcvtn, svqrshr, svqrshrn)
+ (svqrshru, svqrshrun, svrevd, svunpk, svuzp, svuzpq, svzip)
+ (svzipq): Declare.
+ * config/aarch64/aarch64-sve-builtins-sve2.cc (svclamp_impl)
+ (svcvtn_impl, svpext_impl, svpsel_impl): New classes.
+ (svqrshl_impl::fold): Update for change to svrshl shape.
+ (svrshl_impl::fold): Punt on tuple forms.
+ (svsqadd_impl::expand): Update call to map_to_rtx_codes.
+ (svunpk_impl): New class.
+ (svbfmlslb, svbfmlslb_lane, svbfmlslt, svbfmlslt_lane, svclamp)
+ (svcvtn, svpext, svpsel, svqcvt, svqcvtn, svqrshr, svqrshrn)
+ (svqrshru, svqrshrun, svrevd, svunpk, svuzp, svuzpq, svzip)
+ (svzipq): New functions.
+ * config/aarch64/aarch64-sve-builtins-sve2.def: Add SME2 intrinsics.
+ * config/aarch64/aarch64-c.cc (aarch64_update_cpp_builtins): Define
+ or undefine __ARM_FEATURE_SME2.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.md (ZT0_REGNUM): New constant.
+ (LAST_FAKE_REGNUM): Bump to include it.
+ * config/aarch64/aarch64.h (FIXED_REGISTERS): Add an entry for ZT0.
+ (CALL_REALLY_USED_REGISTERS, REGISTER_NAMES): Likewise.
+ (REG_CLASS_CONTENTS): Likewise.
+ (machine_function): Add zt0_save_buffer.
+ (CUMULATIVE_ARGS): Add shared_zt0_flags;
+ * config/aarch64/aarch64.cc (aarch64_check_state_string): Handle zt0.
+ (aarch64_fntype_pstate_za, aarch64_fndecl_pstate_za): Likewise.
+ (aarch64_function_arg): Add the shared ZT0 flags as an extra
+ limb of the parallel.
+ (aarch64_init_cumulative_args): Initialize shared_zt0_flags.
+ (aarch64_extra_live_on_entry): Handle ZT0_REGNUM.
+ (aarch64_epilogue_uses): Likewise.
+ (aarch64_get_zt0_save_buffer, aarch64_save_zt0): New functions.
+ (aarch64_restore_zt0): Likewise.
+ (aarch64_start_call_args): Reject calls to functions that share
+ ZT0 from functions that have no ZT0 state. Save ZT0 around shared-ZA
+ calls that do not share ZT0.
+ (aarch64_expand_call): Handle ZT0. Reject calls to functions that
+ share ZT0 but not ZA from functions with ZA state.
+ (aarch64_end_call_args): Restore ZT0 after calls to shared-ZA functions
+ that do not share ZT0.
+ (aarch64_set_current_function): Require +sme2 for functions that
+ have ZT0 state.
+ (aarch64_function_attribute_inlinable_p): Don't allow functions to
+ be inlined if they have local zt0 state.
+ (AARCH64_IPA_CLOBBERS_ZT0): New constant.
+ (aarch64_update_ipa_fn_target_info): Record asms that clobber ZT0.
+ (aarch64_can_inline_p): Don't inline callees that clobber ZT0
+ into functions that have ZT0 state.
+ (aarch64_comp_type_attributes): Check for compatible ZT0 sharing.
+ (aarch64_optimize_mode_switching): Use mode switching if the
+ function has ZT0 state.
+ (aarch64_mode_emit_local_sme_state): Save and restore ZT0 around
+ calls to private-ZA functions.
+ (aarch64_mode_needed_local_sme_state): Require ZA to be active
+ for instructions that access ZT0.
+ (aarch64_mode_entry): Mark ZA as dead on entry if the function
+ only shares state other than "za" itself.
+ (aarch64_mode_exit): Likewise mark ZA as dead on return.
+ (aarch64_md_asm_adjust): Extend handling of ZA clobbers to ZT0.
+ * config/aarch64/aarch64-c.cc (aarch64_define_unconditional_macros):
+ Define __ARM_STATE_ZT0.
+ * config/aarch64/aarch64-sme.md (UNSPECV_ASM_UPDATE_ZT0): New unspecv.
+ (aarch64_asm_update_zt0): New insn.
+ (UNSPEC_RESTORE_ZT0): New unspec.
+ (aarch64_sme_ldr_zt0, aarch64_restore_zt0): New insns.
+ (aarch64_sme_str_zt0): Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-modes.def (VNx32BI): New mode.
+ * config/aarch64/aarch64-protos.h (aarch64_split_double_move): Declare.
+ * config/aarch64/aarch64-sve-builtins.cc
+ (register_tuple_type): Handle tuples of predicates.
+ (handle_arm_sve_h): Define svboolx2_t as a pair of two svbool_ts.
+ * config/aarch64/aarch64-sve.md (movvnx32bi): New insn.
+ * config/aarch64/aarch64.cc
+ (pure_scalable_type_info::piece::get_rtx): Use VNx32BI for pairs
+ of predicates.
+ (pure_scalable_type_info::add_piece): Don't try to form pairs of
+ predicates.
+ (VEC_STRUCT): Generalize comment.
+ (aarch64_classify_vector_mode): Handle VNx32BI.
+ (aarch64_array_mode): Likewise. Return BLKmode for arrays of
+ predicates that have no associated mode, rather than allowing
+ an integer mode to be chosen.
+ (aarch64_hard_regno_nregs): Handle VNx32BI.
+ (aarch64_hard_regno_mode_ok): Likewise.
+ (aarch64_split_double_move): New function, split out from...
+ (aarch64_split_128bit_move): ...here.
+ (aarch64_ptrue_reg): Tighten assert to aarch64_sve_pred_mode_p.
+ (aarch64_pfalse_reg): Likewise.
+ (aarch64_sve_same_pred_for_ptest_p): Likewise.
+ (aarch64_sme_mode_switch_regs::add_reg): Handle VNx32BI.
+ (aarch64_expand_mov_immediate): Restrict handling of boolean vector
+ constants to single-predicate modes.
+ (aarch64_classify_address): Handle VNx32BI, ensuring that both halves
+ can be addressed.
+ (aarch64_class_max_nregs): Handle VNx32BI.
+ (aarch64_member_type_forces_blk): Don't for BLKmode for svboolx2_t.
+ (aarch64_simd_valid_immediate): Allow all-zeros and all-ones for
+ VNx32BI.
+ (aarch64_mov_operand_p): Restrict predicate constant canonicalization
+ to single-predicate modes.
+ (aarch64_evpc_ext): Generalize exclusion to all predicate modes.
+ (aarch64_evpc_rev_local, aarch64_evpc_dup): Likewise.
+ * config/aarch64/constraints.md (PR_REGS): New predicate.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins-base.cc
+ (svreinterpret_impl::fold): Handle reinterprets between svbool_t
+ and svcount_t.
+ (svreinterpret_impl::expand): Likewise.
+ * config/aarch64/aarch64-sve-builtins-base.def (svreinterpret): Add
+ b<->c forms.
+ * config/aarch64/aarch64-sve-builtins.cc (TYPES_reinterpret_b): New
+ type suffix list.
+ (wrap_type_in_struct, register_type_decl): New functions, split out
+ from...
+ (register_tuple_type): ...here.
+ (register_builtin_types): Handle svcount_t.
+ (handle_arm_sve_h): Don't create tuples of svcount_t.
+ * config/aarch64/aarch64-sve-builtins.def (svcount_t): New type.
+ (c): New type suffix.
+ * config/aarch64/aarch64-sve-builtins.h (TYPE_count): New type class.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * doc/invoke.texi: Document +sme2.
+ * doc/sourcebuild.texi: Document aarch64_sme2.
+ * config/aarch64/aarch64-option-extensions.def (AARCH64_OPT_EXTENSION):
+ Add sme2.
+ * config/aarch64/aarch64.h (AARCH64_ISA_SME2, TARGET_SME2): New macros.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.cc (aarch64_function_ok_for_sibcall):
+ Enforce PSTATE.SM and PSTATE.ZA restrictions.
+ (aarch64_expand_epilogue): Save and restore the arguments
+ to a sibcall around any change to PSTATE.SM.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.cc: Include symbol-summary.h, ipa-prop.h,
+ and ipa-fnsummary.h
+ (aarch64_function_attribute_inlinable_p): New function.
+ (AARCH64_IPA_SM_FIXED, AARCH64_IPA_CLOBBERS_ZA): New constants.
+ (aarch64_need_ipa_fn_target_info): New function.
+ (aarch64_update_ipa_fn_target_info): Likewise.
+ (aarch64_can_inline_p): Restrict the previous ISA flag checks
+ to non-modal features. Prevent callees that require a particular
+ PSTATE.SM state from being inlined into callers that can't guarantee
+ that state. Also prevent callees that have ZA state from being
+ inlined into callers that don't. Finally, prevent callees that
+ clobber ZA from being inlined into callers that have ZA state.
+ (TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P): Define.
+ (TARGET_NEED_IPA_FN_TARGET_INFO): Likewise.
+ (TARGET_UPDATE_IPA_FN_TARGET_INFO): Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.cc: Include except.h
+ (aarch64_sme_mode_switch_regs::add_call_preserved_reg): New function.
+ (aarch64_sme_mode_switch_regs::add_call_preserved_regs): Likewise.
+ (aarch64_need_old_pstate_sm): Return true if the function has
+ a nonlocal-goto or exception receiver.
+ (aarch64_switch_pstate_sm_for_landing_pad): New function.
+ (aarch64_switch_pstate_sm_for_jump): Likewise.
+ (pass_switch_pstate_sm::gate): Enable the pass for all
+ streaming and streaming-compatible functions.
+ (pass_switch_pstate_sm::execute): Handle non-local gotos and their
+ receivers. Handle exception handler entry points.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.cc (aarch64_arm_attribute_table): Add
+ arm::locally_streaming.
+ (aarch64_fndecl_is_locally_streaming): New function.
+ (aarch64_fndecl_sm_state): Handle locally-streaming functions.
+ (aarch64_cfun_enables_pstate_sm): New function.
+ (aarch64_add_offset): Add an argument that specifies whether
+ the streaming vector length should be used instead of the
+ prevailing one.
+ (aarch64_split_add_offset, aarch64_add_sp, aarch64_sub_sp): Likewise.
+ (aarch64_allocate_and_probe_stack_space): Likewise.
+ (aarch64_expand_mov_immediate): Update calls accordingly.
+ (aarch64_need_old_pstate_sm): Return true for locally-streaming
+ streaming-compatible functions.
+ (aarch64_layout_frame): Force all call-preserved Z and P registers
+ to be saved and restored if the function switches PSTATE.SM in the
+ prologue.
+ (aarch64_get_separate_components): Disable shrink-wrapping of
+ such Z and P saves and restores.
+ (aarch64_use_late_prologue_epilogue): New function.
+ (aarch64_expand_prologue): Measure SVE lengths in the streaming
+ vector length for locally-streaming functions, then emit code
+ to enable streaming mode.
+ (aarch64_expand_epilogue): Likewise in reverse.
+ (TARGET_USE_LATE_PROLOGUE_EPILOGUE): Define.
+ * config/aarch64/aarch64-c.cc (aarch64_define_unconditional_macros):
+ Define __arm_locally_streaming.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * doc/invoke.texi: Document +sme-i16i64 and +sme-f64f64.
+ * config.gcc (aarch64*-*-*): Add arm_sme.h to the list of headers
+ to install and aarch64-sve-builtins-sme.o to the list of objects
+ to build.
+ * config/aarch64/aarch64-c.cc (aarch64_update_cpp_builtins): Define
+ or undefine TARGET_SME, TARGET_SME_I16I64 and TARGET_SME_F64F64.
+ (aarch64_pragma_aarch64): Handle arm_sme.h.
+ * config/aarch64/aarch64-option-extensions.def (sme-i16i64)
+ (sme-f64f64): New extensions.
+ * config/aarch64/aarch64-protos.h (aarch64_sme_vq_immediate)
+ (aarch64_addsvl_addspl_immediate_p, aarch64_output_addsvl_addspl)
+ (aarch64_output_sme_zero_za): Declare.
+ (aarch64_output_move_struct): Delete.
+ (aarch64_sme_ldr_vnum_offset): Declare.
+ (aarch64_sve::handle_arm_sme_h): Likewise.
+ * config/aarch64/aarch64.h (AARCH64_ISA_SM_ON): New macro.
+ (AARCH64_ISA_SME_I16I64, AARCH64_ISA_SME_F64F64): Likewise.
+ (TARGET_STREAMING, TARGET_STREAMING_SME): Likewise.
+ (TARGET_SME_I16I64, TARGET_SME_F64F64): Likewise.
+ * config/aarch64/aarch64.cc (aarch64_sve_rdvl_factor_p): Rename to...
+ (aarch64_sve_rdvl_addvl_factor_p): ...this.
+ (aarch64_sve_rdvl_immediate_p): Update accordingly.
+ (aarch64_rdsvl_immediate_p, aarch64_add_offset): Likewise.
+ (aarch64_sme_vq_immediate): Likewise. Make public.
+ (aarch64_sve_addpl_factor_p): New function.
+ (aarch64_sve_addvl_addpl_immediate_p): Use
+ aarch64_sve_rdvl_addvl_factor_p and aarch64_sve_addpl_factor_p.
+ (aarch64_addsvl_addspl_immediate_p): New function.
+ (aarch64_output_addsvl_addspl): Likewise.
+ (aarch64_cannot_force_const_mem): Return true for RDSVL immediates.
+ (aarch64_classify_index): Handle .Q scaling for VNx1TImode.
+ (aarch64_classify_address): Likewise for vnum offsets.
+ (aarch64_output_sme_zero_za): New function.
+ (aarch64_sme_ldr_vnum_offset_p): Likewise.
+ * config/aarch64/predicates.md (aarch64_addsvl_addspl_immediate):
+ New predicate.
+ (aarch64_pluslong_operand): Include it for SME.
+ * config/aarch64/constraints.md (Ucj, Uav): New constraints.
+ * config/aarch64/iterators.md (VNx1TI_ONLY): New mode iterator.
+ (SME_ZA_I, SME_ZA_SDI, SME_ZA_SDF_I, SME_MOP_BHI): Likewise.
+ (SME_MOP_HSDF): Likewise.
+ (UNSPEC_SME_ADDHA, UNSPEC_SME_ADDVA, UNSPEC_SME_FMOPA)
+ (UNSPEC_SME_FMOPS, UNSPEC_SME_LD1_HOR, UNSPEC_SME_LD1_VER)
+ (UNSPEC_SME_READ_HOR, UNSPEC_SME_READ_VER, UNSPEC_SME_SMOPA)
+ (UNSPEC_SME_SMOPS, UNSPEC_SME_ST1_HOR, UNSPEC_SME_ST1_VER)
+ (UNSPEC_SME_SUMOPA, UNSPEC_SME_SUMOPS, UNSPEC_SME_UMOPA)
+ (UNSPEC_SME_UMOPS, UNSPEC_SME_USMOPA, UNSPEC_SME_USMOPS)
+ (UNSPEC_SME_WRITE_HOR, UNSPEC_SME_WRITE_VER): New unspecs.
+ (elem_bits): Handle x2 and x4 structure modes, plus VNx1TI.
+ (Vetype, Vesize, VPRED): Handle VNx1TI.
+ (b): New mode attribute.
+ (SME_LD1, SME_READ, SME_ST1, SME_WRITE, SME_BINARY_SDI, SME_INT_MOP)
+ (SME_FP_MOP): New int iterators.
+ (optab): Handle SME unspecs.
+ (hv): New int attribute.
+ * config/aarch64/aarch64.md (*add<mode>3_aarch64): Handle ADDSVL
+ and ADDSPL.
+ * config/aarch64/aarch64-sme.md (UNSPEC_SME_LDR): New unspec.
+ (@aarch64_sme_<optab><mode>, @aarch64_sme_<optab><mode>_plus)
+ (aarch64_sme_ldr0, @aarch64_sme_ldrn<mode>): New patterns.
+ (UNSPEC_SME_STR): New unspec.
+ (@aarch64_sme_<optab><mode>, @aarch64_sme_<optab><mode>_plus)
+ (aarch64_sme_str0, @aarch64_sme_strn<mode>): New patterns.
+ (@aarch64_sme_<optab><v_int_container><mode>): Likewise.
+ (*aarch64_sme_<optab><v_int_container><mode>_plus): Likewise.
+ (@aarch64_sme_<optab><VNx1TI_ONLY:mode><SVE_FULL:mode>): Likewise.
+ (@aarch64_sme_<optab><v_int_container><mode>): Likewise.
+ (*aarch64_sme_<optab><v_int_container><mode>_plus): Likewise.
+ (@aarch64_sme_<optab><VNx1TI_ONLY:mode><SVE_FULL:mode>): Likewise.
+ (UNSPEC_SME_ZERO): New unspec.
+ (aarch64_sme_zero): New pattern.
+ (@aarch64_sme_<SME_BINARY_SDI:optab><mode>): Likewise.
+ (@aarch64_sme_<SME_INT_MOP:optab><mode>): Likewise.
+ (@aarch64_sme_<SME_FP_MOP:optab><mode>): Likewise.
+ * config/aarch64/aarch64-sve-builtins.def: Add ZA type suffixes.
+ Include aarch64-sve-builtins-sme.def.
+ (DEF_SME_ZA_FUNCTION): New macro.
+ * config/aarch64/aarch64-sve-builtins.h (CP_READ_ZA): New call
+ property.
+ (CP_WRITE_ZA): Likewise.
+ (PRED_za_m): New predication type.
+ (type_suffix_index): Handle DEF_SME_ZA_SUFFIX.
+ (type_suffix_info): Add vector_p and za_p fields.
+ (function_instance::num_za_tiles): New member function.
+ (function_builder::get_attributes): Add an aarch64_feature_flags
+ argument.
+ (function_expander::get_contiguous_base): Take a base argument
+ number, a vnum argument number, and an argument that indicates
+ whether the vnum parameter is a factor of the SME vector length
+ or the prevailing vector length.
+ (function_expander::add_integer_operand): Take a poly_int64.
+ (sve_switcher::sve_switcher): Take a base set of flags.
+ (sme_switcher): New class.
+ (scalar_types): Add a null entry for NUM_VECTOR_TYPES.
+ * config/aarch64/aarch64-sve-builtins.cc: Include
+ aarch64-sve-builtins-sme.h.
+ (pred_suffixes): Add an entry for PRED_za_m.
+ (type_suffixes): Initialize vector_p and za_p. Handle ZA suffixes.
+ (TYPES_all_za, TYPES_d_za, TYPES_za_bhsd_data, TYPES_za_all_data)
+ (TYPES_za_s_integer, TYPES_za_d_integer, TYPES_mop_base)
+ (TYPES_mop_base_signed, TYPES_mop_base_unsigned, TYPES_mop_i16i64)
+ (TYPES_mop_i16i64_signed, TYPES_mop_i16i64_unsigned, TYPES_za): New
+ type suffix macros.
+ (preds_m, preds_za_m): New predication lists.
+ (function_groups): Handle DEF_SME_ZA_FUNCTION.
+ (scalar_types): Add an entry for NUM_VECTOR_TYPES.
+ (find_type_suffix_for_scalar_type): Check positively for vectors
+ rather than negatively for predicates.
+ (check_required_extensions): Handle PSTATE.SM and PSTATE.ZA
+ requirements.
+ (report_out_of_range): Handle the case where the minimum and
+ maximum are the same.
+ (function_instance::reads_global_state_p): Return true for functions
+ that read ZA.
+ (function_instance::modifies_global_state_p): Return true for functions
+ that write to ZA.
+ (sve_switcher::sve_switcher): Add a base flags argument.
+ (function_builder::get_name): Handle "__arm_" prefixes.
+ (add_attribute): Add an overload that takes a namespaces.
+ (add_shared_state_attribute): New function.
+ (function_builder::get_attributes): Take the required feature flags
+ as argument. Add streaming and ZA attributes where appropriate.
+ (function_builder::add_unique_function): Update calls accordingly.
+ (function_resolver::check_gp_argument): Assert that the predication
+ isn't ZA _m predication.
+ (function_checker::function_checker): Don't bias the argument
+ number for ZA _m predication.
+ (function_expander::get_contiguous_base): Add arguments that
+ specify the base argument number, the vnum argument number,
+ and an argument that indicates whether the vnum parameter is
+ a factor of the SME vector length or the prevailing vector length.
+ Handle the SME case.
+ (function_expander::add_input_operand): Handle pmode_register_operand.
+ (function_expander::add_integer_operand): Take a poly_int64.
+ (init_builtins): Call handle_arm_sme_h for LTO.
+ (handle_arm_sve_h): Skip SME intrinsics.
+ (handle_arm_sme_h): New function.
+ * config/aarch64/aarch64-sve-builtins-functions.h
+ (read_write_za, write_za): New classes.
+ (unspec_based_sme_function, za_arith_function): New using aliases.
+ (quiet_za_arith_function): Likewise.
+ * config/aarch64/aarch64-sve-builtins-shapes.h
+ (binary_za_int_m, binary_za_m, binary_za_uint_m, bool_inherent)
+ (inherent_za, inherent_mask_za, ldr_za, load_za, read_za_m, store_za)
+ (str_za, unary_za_m, write_za_m): Declare.
+ * config/aarch64/aarch64-sve-builtins-shapes.cc (apply_predication):
+ Expect za_m functions to have an existing governing predicate.
+ (binary_za_m_base, binary_za_int_m_def, binary_za_m_def): New classes.
+ (binary_za_uint_m_def, bool_inherent_def, inherent_za_def): Likewise.
+ (inherent_mask_za_def, ldr_za_def, load_za_def, read_za_m_def)
+ (store_za_def, str_za_def, unary_za_m_def, write_za_m_def): Likewise.
+ * config/aarch64/arm_sme.h: New file.
+ * config/aarch64/aarch64-sve-builtins-sme.h: Likewise.
+ * config/aarch64/aarch64-sve-builtins-sme.cc: Likewise.
+ * config/aarch64/aarch64-sve-builtins-sme.def: Likewise.
+ * config/aarch64/t-aarch64 (aarch64-sve-builtins.o): Depend on
+ aarch64-sve-builtins-sme.def and aarch64-sve-builtins-sme.h.
+ (aarch64-sve-builtins-sme.o): New rule.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins.h
+ (function_shape::has_merge_argument_p): New member function.
+ * config/aarch64/aarch64-sve-builtins.cc:
+ (function_resolver::check_gp_argument): Use it.
+ (function_expander::get_fallback_value): Likewise.
+ * config/aarch64/aarch64-sve-builtins-shapes.cc
+ (apply_predication): Likewise.
+ (unary_convert_narrowt_def::has_merge_argument_p): New function.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins-functions.h
+ (unspec_based_function_base): Allow type suffix 1 to determine
+ the mode of the operation.
+ (unspec_based_function): Update accordingly.
+ (unspec_based_fused_function): Likewise.
+ (unspec_based_fused_lane_function): Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-modes.def: Add VNx1TI.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.h (W12_W15_REGNUM_P): New macro.
+ (W12_W15_REGS): New register class.
+ (REG_CLASS_NAMES, REG_CLASS_CONTENTS): Add entries for it.
+ * config/aarch64/aarch64.cc (aarch64_regno_regclass)
+ (aarch64_class_max_nregs, aarch64_register_move_cost): Handle
+ W12_W15_REGS.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-isa-modes.def (ZA_ON): New ISA mode.
+ * config/aarch64/aarch64-protos.h (aarch64_rdsvl_immediate_p)
+ (aarch64_output_rdsvl, aarch64_optimize_mode_switching)
+ (aarch64_restore_za): Declare.
+ * config/aarch64/constraints.md (UsR): New constraint.
+ * config/aarch64/aarch64.md (LOWERING_REGNUM, TPIDR_BLOCK_REGNUM)
+ (SME_STATE_REGNUM, TPIDR2_SETUP_REGNUM, ZA_FREE_REGNUM)
+ (ZA_SAVED_REGNUM, ZA_REGNUM, FIRST_FAKE_REGNUM): New constants.
+ (LAST_FAKE_REGNUM): Likewise.
+ (UNSPEC_SAVE_NZCV, UNSPEC_RESTORE_NZCV, UNSPEC_SME_VQ): New unspecs.
+ (arches): Add sme.
+ (arch_enabled): Handle it.
+ (*cb<optab><mode>1): Rename to...
+ (aarch64_cb<optab><mode>1): ...this.
+ (*movsi_aarch64): Add an alternative for RDSVL.
+ (*movdi_aarch64): Likewise.
+ (aarch64_save_nzcv, aarch64_restore_nzcv): New insns.
+ * config/aarch64/aarch64-sme.md (UNSPEC_SMSTOP_ZA)
+ (UNSPEC_INITIAL_ZERO_ZA, UNSPEC_TPIDR2_SAVE, UNSPEC_TPIDR2_RESTORE)
+ (UNSPEC_READ_TPIDR2, UNSPEC_WRITE_TPIDR2, UNSPEC_SETUP_LOCAL_TPIDR2)
+ (UNSPEC_RESTORE_ZA, UNSPEC_START_PRIVATE_ZA_CALL): New unspecs.
+ (UNSPEC_END_PRIVATE_ZA_CALL, UNSPEC_COMMIT_LAZY_SAVE): Likewise.
+ (UNSPECV_ASM_UPDATE_ZA): New unspecv.
+ (aarch64_tpidr2_save, aarch64_smstart_za, aarch64_smstop_za)
+ (aarch64_initial_zero_za, aarch64_setup_local_tpidr2)
+ (aarch64_clear_tpidr2, aarch64_write_tpidr2, aarch64_read_tpidr2)
+ (aarch64_tpidr2_restore, aarch64_restore_za, aarch64_asm_update_za)
+ (aarch64_start_private_za_call, aarch64_end_private_za_call)
+ (aarch64_commit_lazy_save): New patterns.
+ * config/aarch64/aarch64.h (AARCH64_ISA_ZA_ON, TARGET_ZA): New macros.
+ (FIXED_REGISTERS, REGISTER_NAMES): Add the new fake ZA registers.
+ (CALL_USED_REGISTERS): Replace with...
+ (CALL_REALLY_USED_REGISTERS): ...this and add the fake ZA registers.
+ (FIRST_PSEUDO_REGISTER): Bump to include the fake ZA registers.
+ (FAKE_REGS): New register class.
+ (REG_CLASS_NAMES): Update accordingly.
+ (REG_CLASS_CONTENTS): Likewise.
+ (machine_function::tpidr2_block): New member variable.
+ (machine_function::tpidr2_block_ptr): Likewise.
+ (machine_function::za_save_buffer): Likewise.
+ (machine_function::next_asm_update_za_id): Likewise.
+ (CUMULATIVE_ARGS::shared_za_flags): Likewise.
+ (aarch64_mode_entity, aarch64_local_sme_state): New enums.
+ (aarch64_tristate_mode): Likewise.
+ (OPTIMIZE_MODE_SWITCHING, NUM_MODES_FOR_MODE_SWITCHING): Define.
+ * config/aarch64/aarch64.cc (AARCH64_STATE_SHARED, AARCH64_STATE_IN)
+ (AARCH64_STATE_OUT): New constants.
+ (aarch64_attribute_shared_state_flags): New function.
+ (aarch64_lookup_shared_state_flags, aarch64_fndecl_has_new_state)
+ (aarch64_check_state_string, cmp_string_csts): Likewise.
+ (aarch64_merge_string_arguments, aarch64_check_arm_new_against_type)
+ (handle_arm_new, handle_arm_shared): Likewise.
+ (handle_arm_new_za_attribute): New
+ (aarch64_arm_attribute_table): Add new, preserves, in, out, and inout.
+ (aarch64_hard_regno_nregs): Handle FAKE_REGS.
+ (aarch64_hard_regno_mode_ok): Likewise.
+ (aarch64_fntype_shared_flags, aarch64_fntype_pstate_za): New functions.
+ (aarch64_fntype_isa_mode): Include aarch64_fntype_pstate_za.
+ (aarch64_fndecl_has_state, aarch64_fndecl_pstate_za): New functions.
+ (aarch64_fndecl_isa_mode): Include aarch64_fndecl_pstate_za.
+ (aarch64_cfun_incoming_pstate_za, aarch64_cfun_shared_flags)
+ (aarch64_cfun_has_new_state, aarch64_cfun_has_state): New functions.
+ (aarch64_sme_vq_immediate, aarch64_sme_vq_unspec_p): Likewise.
+ (aarch64_rdsvl_immediate_p, aarch64_output_rdsvl): Likewise.
+ (aarch64_expand_mov_immediate): Handle RDSVL immediates.
+ (aarch64_function_arg): Add the ZA sharing flags as a third limb
+ of the PARALLEL.
+ (aarch64_init_cumulative_args): Record the ZA sharing flags.
+ (aarch64_extra_live_on_entry): New function. Handle the new
+ ZA-related fake registers.
+ (aarch64_epilogue_uses): Handle the new ZA-related fake registers.
+ (aarch64_cannot_force_const_mem): Handle UNSPEC_SME_VQ constants.
+ (aarch64_get_tpidr2_block, aarch64_get_tpidr2_ptr): New functions.
+ (aarch64_init_tpidr2_block, aarch64_restore_za): Likewise.
+ (aarch64_layout_frame): Check whether the current function creates
+ new ZA state. Record that it clobbers LR if so.
+ (aarch64_expand_prologue): Handle functions that create new ZA state.
+ (aarch64_expand_epilogue): Likewise.
+ (aarch64_create_tpidr2_block): New function.
+ (aarch64_restore_za): Likewise.
+ (aarch64_start_call_args): Disallow calls to shared-ZA functions
+ from functions that have no ZA state. Emit a marker instruction
+ before calls to private-ZA functions from functions that have
+ SME state.
+ (aarch64_expand_call): Add return registers for state that is
+ managed via attributes. Record the use and clobber information
+ for the ZA registers.
+ (aarch64_end_call_args): New function.
+ (aarch64_regno_regclass): Handle FAKE_REGS.
+ (aarch64_class_max_nregs): Likewise.
+ (aarch64_override_options_internal): Require TARGET_SME for
+ functions that have ZA state.
+ (aarch64_conditional_register_usage): Handle FAKE_REGS.
+ (aarch64_mov_operand_p): Handle RDSVL immediates.
+ (aarch64_comp_type_attributes): Check that the ZA sharing flags
+ are equal.
+ (aarch64_merge_decl_attributes): New function.
+ (aarch64_optimize_mode_switching, aarch64_mode_emit_za_save_buffer)
+ (aarch64_mode_emit_local_sme_state, aarch64_mode_emit): Likewise.
+ (aarch64_insn_references_sme_state_p): Likewise.
+ (aarch64_mode_needed_local_sme_state): Likewise.
+ (aarch64_mode_needed_za_save_buffer, aarch64_mode_needed): Likewise.
+ (aarch64_mode_after_local_sme_state, aarch64_mode_after): Likewise.
+ (aarch64_local_sme_confluence, aarch64_mode_confluence): Likewise.
+ (aarch64_one_shot_backprop, aarch64_local_sme_backprop): Likewise.
+ (aarch64_mode_backprop, aarch64_mode_entry): Likewise.
+ (aarch64_mode_exit, aarch64_mode_eh_handler): Likewise.
+ (aarch64_mode_priority, aarch64_md_asm_adjust): Likewise.
+ (TARGET_END_CALL_ARGS, TARGET_MERGE_DECL_ATTRIBUTES): Define.
+ (TARGET_MODE_EMIT, TARGET_MODE_NEEDED, TARGET_MODE_AFTER): Likewise.
+ (TARGET_MODE_CONFLUENCE, TARGET_MODE_BACKPROP): Likewise.
+ (TARGET_MODE_ENTRY, TARGET_MODE_EXIT): Likewise.
+ (TARGET_MODE_EH_HANDLER, TARGET_MODE_PRIORITY): Likewise.
+ (TARGET_EXTRA_LIVE_ON_ENTRY): Likewise.
+ (TARGET_MD_ASM_ADJUST): Use aarch64_md_asm_adjust.
+ * config/aarch64/aarch64-c.cc (aarch64_define_unconditional_macros):
+ Define __arm_new, __arm_preserves,__arm_in, __arm_out, and __arm_inout.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-passes.def
+ (pass_late_thread_prologue_and_epilogue): New pass.
+ * config/aarch64/aarch64-sme.md: New file.
+ * config/aarch64/aarch64.md: Include it.
+ (*tb<optab><mode>1): Rename to...
+ (@aarch64_tb<optab><mode>): ...this.
+ (call, call_value, sibcall, sibcall_value): Don't require operand 2
+ to be a CONST_INT.
+ * config/aarch64/aarch64-protos.h (aarch64_emit_call_insn): Return
+ the insn.
+ (make_pass_switch_sm_state): Declare.
+ * config/aarch64/aarch64.h (TARGET_STREAMING_COMPATIBLE): New macro.
+ (CALL_USED_REGISTER): Mark VG as call-preserved.
+ (aarch64_frame::old_svcr_offset): New member variable.
+ (machine_function::call_switches_sm_state): Likewise.
+ (CUMULATIVE_ARGS::num_sme_mode_switch_args): Likewise.
+ (CUMULATIVE_ARGS::sme_mode_switch_args): Likewise.
+ * config/aarch64/aarch64.cc: Include tree-pass.h and cfgbuild.h.
+ (aarch64_cfun_incoming_pstate_sm): New function.
+ (aarch64_call_switches_pstate_sm): Likewise.
+ (aarch64_reg_save_mode): Return DImode for VG_REGNUM.
+ (aarch64_callee_isa_mode): New function.
+ (aarch64_insn_callee_isa_mode): Likewise.
+ (aarch64_guard_switch_pstate_sm): Likewise.
+ (aarch64_switch_pstate_sm): Likewise.
+ (aarch64_sme_mode_switch_regs): New class.
+ (aarch64_record_sme_mode_switch_args): New function.
+ (aarch64_finish_sme_mode_switch_args): Likewise.
+ (aarch64_function_arg): Handle the end marker by returning a
+ PARALLEL that contains the ABI cookie that we used previously
+ alongside the result of aarch64_finish_sme_mode_switch_args.
+ (aarch64_init_cumulative_args): Initialize num_sme_mode_switch_args.
+ (aarch64_function_arg_advance): If a call would switch SM state,
+ record all argument registers that would need to be saved around
+ the mode switch.
+ (aarch64_need_old_pstate_sm): New function.
+ (aarch64_layout_frame): Decide whether the frame needs to store the
+ incoming value of PSTATE.SM and allocate a save slot for it if so.
+ If a function switches SME state, arrange to save the old value
+ of the DWARF VG register. Handle the case where this is the only
+ register save slot above the FP.
+ (aarch64_save_callee_saves): Handles saves of the DWARF VG register.
+ (aarch64_get_separate_components): Prevent such saves from being
+ shrink-wrapped.
+ (aarch64_old_svcr_mem): New function.
+ (aarch64_read_old_svcr): Likewise.
+ (aarch64_guard_switch_pstate_sm): Likewise.
+ (aarch64_expand_prologue): Handle saves of the DWARF VG register.
+ Initialize any SVCR save slot.
+ (aarch64_expand_call): Allow the cookie to be PARALLEL that contains
+ both the UNSPEC_CALLEE_ABI value and a list of registers that need
+ to be preserved across a change to PSTATE.SM. If the call does
+ involve such a change to PSTATE.SM, record the registers that
+ would be clobbered by this process. Also emit an instruction
+ to mark the temporary change in VG. Update call_switches_pstate_sm.
+ (aarch64_emit_call_insn): Return the emitted instruction.
+ (aarch64_frame_pointer_required): New function.
+ (aarch64_conditional_register_usage): Prevent VG_REGNUM from being
+ treated as a register operand.
+ (aarch64_switch_pstate_sm_for_call): New function.
+ (pass_data_switch_pstate_sm): New pass variable.
+ (pass_switch_pstate_sm): New pass class.
+ (make_pass_switch_pstate_sm): New function.
+ (TARGET_FRAME_POINTER_REQUIRED): Define.
+ * config/aarch64/t-aarch64 (s-check-sve-md): Add aarch64-sme.md.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.h (TARGET_NON_STREAMING): New macro.
+ (TARGET_SVE2_AES, TARGET_SVE2_BITPERM): Use it.
+ (TARGET_SVE2_SHA3, TARGET_SVE2_SM4): Likewise.
+ * config/aarch64/aarch64-sve-builtins-base.def: Separate out
+ the functions that require PSTATE.SM to be 0 and guard them
+ with AARCH64_FL_SM_OFF.
+ * config/aarch64/aarch64-sve-builtins-sve2.def: Likewise.
+ * config/aarch64/aarch64-sve-builtins.cc (check_required_extensions):
+ Enforce AARCH64_FL_SM_OFF requirements.
+ * config/aarch64/aarch64-sve.md (aarch64_wrffr): Require
+ TARGET_NON_STREAMING
+ (aarch64_rdffr, aarch64_rdffr_z, *aarch64_rdffr_z_ptest): Likewise.
+ (*aarch64_rdffr_ptest, *aarch64_rdffr_z_cc, *aarch64_rdffr_cc)
+ (@aarch64_ld<fn>f1<mode>): Likewise.
+ (@aarch64_ld<fn>f1_<ANY_EXTEND:optab><SVE_HSDI:mode><SVE_PARTIAL_I:mode>)
+ (gather_load<mode><v_int_container>): Likewise
+ (mask_gather_load<mode><v_int_container>): Likewise.
+ (mask_gather_load<mode><v_int_container>): Likewise.
+ (*mask_gather_load<mode><v_int_container>_<su>xtw_unpacked): Likewise.
+ (*mask_gather_load<mode><v_int_container>_sxtw): Likewise.
+ (*mask_gather_load<mode><v_int_container>_uxtw): Likewise.
+ (@aarch64_gather_load_<ANY_EXTEND:optab><SVE_4HSI:mode><SVE_4BHI:mode>)
+ (@aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode>
+ <SVE_2BHSI:mode>): Likewise.
+ (*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode>
+ <SVE_2BHSI:mode>_<ANY_EXTEND2:su>xtw_unpacked)
+ (*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode>
+ <SVE_2BHSI:mode>_sxtw): Likewise.
+ (*aarch64_gather_load_<ANY_EXTEND:optab><SVE_2HSDI:mode>
+ <SVE_2BHSI:mode>_uxtw): Likewise.
+ (@aarch64_ldff1_gather<mode>, @aarch64_ldff1_gather<mode>): Likewise.
+ (*aarch64_ldff1_gather<mode>_sxtw): Likewise.
+ (*aarch64_ldff1_gather<mode>_uxtw): Likewise.
+ (@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx4_WIDE:mode>
+ <VNx4_NARROW:mode>): Likewise.
+ (@aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode>
+ <VNx2_NARROW:mode>): Likewise.
+ (*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode>
+ <VNx2_NARROW:mode>_sxtw): Likewise.
+ (*aarch64_ldff1_gather_<ANY_EXTEND:optab><VNx2_WIDE:mode>
+ <VNx2_NARROW:mode>_uxtw): Likewise.
+ (@aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx4SI_ONLY:mode>)
+ (@aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>)
+ (*aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>_sxtw)
+ (*aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>_uxtw)
+ (scatter_store<mode><v_int_container>): Likewise.
+ (mask_scatter_store<mode><v_int_container>): Likewise.
+ (*mask_scatter_store<mode><v_int_container>_<su>xtw_unpacked)
+ (*mask_scatter_store<mode><v_int_container>_sxtw): Likewise.
+ (*mask_scatter_store<mode><v_int_container>_uxtw): Likewise.
+ (@aarch64_scatter_store_trunc<VNx4_NARROW:mode><VNx4_WIDE:mode>)
+ (@aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>)
+ (*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_sxtw)
+ (*aarch64_scatter_store_trunc<VNx2_NARROW:mode><VNx2_WIDE:mode>_uxtw)
+ (@aarch64_sve_ld1ro<mode>, @aarch64_adr<mode>): Likewise.
+ (*aarch64_adr_sxtw, *aarch64_adr_uxtw_unspec): Likewise.
+ (*aarch64_adr_uxtw_and, @aarch64_adr<mode>_shift): Likewise.
+ (*aarch64_adr<mode>_shift, *aarch64_adr_shift_sxtw): Likewise.
+ (*aarch64_adr_shift_uxtw, @aarch64_sve_add_<optab><vsi2qi>): Likewise.
+ (@aarch64_sve_<sve_fp_op><mode>, fold_left_plus_<mode>): Likewise.
+ (mask_fold_left_plus_<mode>, @aarch64_sve_compact<mode>): Likewise.
+ * config/aarch64/aarch64-sve2.md (@aarch64_gather_ldnt<mode>)
+ (@aarch64_gather_ldnt_<ANY_EXTEND:optab><SVE_FULL_SDI:mode>
+ <SVE_PARTIAL_I:mode>): Likewise.
+ (@aarch64_sve2_histcnt<mode>, @aarch64_sve2_histseg<mode>): Likewise.
+ (@aarch64_pred_<SVE2_MATCH:sve_int_op><mode>): Likewise.
+ (*aarch64_pred_<SVE2_MATCH:sve_int_op><mode>_cc): Likewise.
+ (*aarch64_pred_<SVE2_MATCH:sve_int_op><mode>_ptest): Likewise.
+ * config/aarch64/iterators.md (SVE_FP_UNARY_INT): Make FEXPA
+ depend on TARGET_NON_STREAMING.
+ (SVE_BFLOAT_TERNARY_LONG): Likewise BFMMLA.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.h (TARGET_BASE_SIMD): New macro.
+ (TARGET_SIMD): Require PSTATE.SM to be 0.
+ (AARCH64_ISA_SM_OFF): New macro.
+ * config/aarch64/aarch64.cc (aarch64_array_mode_supported_p):
+ Allow Advanced SIMD structure modes for TARGET_BASE_SIMD.
+ (aarch64_print_operand): Support '%Z'.
+ (aarch64_secondary_reload): Expect SVE moves to be used for
+ Advanced SIMD modes if SVE is enabled and non-streaming
+ Advanced SIMD isn't.
+ (aarch64_register_move_cost): Likewise.
+ (aarch64_simd_container_mode): Extend Advanced SIMD mode
+ handling to TARGET_BASE_SIMD.
+ (aarch64_expand_cpymem): Expand commentary.
+ * config/aarch64/aarch64.md (arches): Add base_simd and nobase_simd.
+ (arch_enabled): Handle it.
+ (*mov<mode>_aarch64): Extend UMOV alternative to TARGET_BASE_SIMD.
+ (*movti_aarch64): Use an SVE move instruction if non-streaming
+ SIMD isn't available.
+ (*mov<TFD:mode>_aarch64): Likewise.
+ (load_pair_dw_tftf): Extend to TARGET_BASE_SIMD.
+ (store_pair_dw_tftf): Likewise.
+ (loadwb_pair<TX:mode>_<P:mode>): Likewise.
+ (storewb_pair<TX:mode>_<P:mode>): Likewise.
+ * config/aarch64/aarch64-simd.md (*aarch64_simd_mov<VDMOV:mode>):
+ Allow UMOV in streaming mode.
+ (*aarch64_simd_mov<VQMOV:mode>): Use an SVE move instruction
+ if non-streaming SIMD isn't available.
+ (aarch64_store_lane0<mode>): Depend on TARGET_FLOAT rather than
+ TARGET_SIMD.
+ (aarch64_simd_mov_from_<mode>low): Likewise. Use fmov if
+ Advanced SIMD is completely disabled.
+ (aarch64_simd_mov_from_<mode>high): Use SVE EXT instructions if
+ non-streaming SIMD isn't available.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * doc/invoke.texi: Document SME.
+ * doc/sourcebuild.texi: Document aarch64_sve.
+ * config/aarch64/aarch64-option-extensions.def (sme): Define.
+ * config/aarch64/aarch64.h (AARCH64_ISA_SME): New macro.
+ (TARGET_SME): Likewise.
+ * config/aarch64/aarch64.cc (aarch64_override_options_internal):
+ Ensure that SME is present when compiling streaming code.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-isa-modes.def: New file.
+ * config/aarch64/aarch64.h: Include it in the feature enumerations.
+ (AARCH64_FL_SM_STATE, AARCH64_FL_ISA_MODES): New constants.
+ (AARCH64_FL_DEFAULT_ISA_MODE): Likewise.
+ (AARCH64_ISA_MODE): New macro.
+ (CUMULATIVE_ARGS): Add an isa_mode field.
+ * config/aarch64/aarch64-protos.h (aarch64_gen_callee_cookie): Declare.
+ (aarch64_tlsdesc_abi_id): Return an arm_pcs.
+ * config/aarch64/aarch64.cc (attr_streaming_exclusions)
+ (aarch64_gnu_attributes, aarch64_gnu_attribute_table)
+ (aarch64_arm_attributes, aarch64_arm_attribute_table): New tables.
+ (aarch64_attribute_table): Redefine to include the gnu and arm
+ attributes.
+ (aarch64_fntype_pstate_sm, aarch64_fntype_isa_mode): New functions.
+ (aarch64_fndecl_pstate_sm, aarch64_fndecl_isa_mode): Likewise.
+ (aarch64_gen_callee_cookie, aarch64_callee_abi): Likewise.
+ (aarch64_insn_callee_cookie, aarch64_insn_callee_abi): Use them.
+ (aarch64_function_arg, aarch64_output_mi_thunk): Likewise.
+ (aarch64_init_cumulative_args): Initialize the isa_mode field.
+ (aarch64_output_mi_thunk): Use aarch64_gen_callee_cookie to get
+ the ABI cookie.
+ (aarch64_override_options): Add the ISA mode to the feature set.
+ (aarch64_temporary_target::copy_from_fndecl): Likewise.
+ (aarch64_fndecl_options, aarch64_handle_attr_arch): Likewise.
+ (aarch64_set_current_function): Maintain the correct ISA mode.
+ (aarch64_tlsdesc_abi_id): Return an arm_pcs.
+ (aarch64_comp_type_attributes): Handle arm::streaming and
+ arm::streaming_compatible.
+ * config/aarch64/aarch64-c.cc (aarch64_define_unconditional_macros):
+ Define __arm_streaming and __arm_streaming_compatible.
+ * config/aarch64/aarch64.md (tlsdesc_small_<mode>): Use
+ aarch64_gen_callee_cookie to get the ABI cookie.
+ * config/aarch64/t-aarch64 (TM_H): Add all feature-related .def files.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins-base.cc
+ (svreinterpret_impl::fold): Punt on tuple forms.
+ (svreinterpret_impl::expand): Use tuple_mode instead of vector_mode.
+ * config/aarch64/aarch64-sve-builtins-base.def (svreinterpret):
+ Extend to x1234 groups.
+ * config/aarch64/aarch64-sve-builtins-functions.h
+ (multi_vector_function::vectors_per_tuple): If the function has
+ a group suffix, get the number of vectors from there.
+ * config/aarch64/aarch64-sve-builtins-shapes.h (reinterpret): Declare.
+ * config/aarch64/aarch64-sve-builtins-shapes.cc (reinterpret_def)
+ (reinterpret): New function shape.
+ * config/aarch64/aarch64-sve-builtins.cc (function_groups): Handle
+ DEF_SVE_FUNCTION_GS.
+ * config/aarch64/aarch64-sve-builtins.def (DEF_SVE_FUNCTION_GS): New
+ macro.
+ (DEF_SVE_FUNCTION): Forward to DEF_SVE_FUNCTION_GS by default.
+ * config/aarch64/aarch64-sve-builtins.h
+ (function_instance::tuple_mode): New member function.
+ (function_base::vectors_per_tuple): Take the function instance
+ as argument and get the number from the group suffix.
+ (function_instance::vectors_per_tuple): Update accordingly.
+ * config/aarch64/iterators.md (SVE_FULLx2, SVE_FULLx3, SVE_FULLx4)
+ (SVE_ALL_STRUCT): New mode iterators.
+ (SVE_STRUCT): Redefine in terms of SVE_FULL*.
+ * config/aarch64/aarch64-sve.md (@aarch64_sve_reinterpret<mode>)
+ (*aarch64_sve_reinterpret<mode>): Extend to SVE structure modes.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins.cc
+ (function_resolver::require_derived_vector_type): Add a specific
+ error message for the case in which the caller wants a single
+ vector whose element type matches a previous tuyple argument.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins.h
+ (function_resolver::lookup_form): Add an overload that takes
+ an sve_type rather than type and group suffixes.
+ (function_resolver::resolve_to): Likewise.
+ (function_resolver::infer_vector_or_tuple_type): Return an sve_type.
+ (function_resolver::infer_tuple_type): Likewise.
+ (function_resolver::require_matching_vector_type): Take an sve_type
+ rather than a type_suffix_index.
+ (function_resolver::require_derived_vector_type): Likewise.
+ * config/aarch64/aarch64-sve-builtins.cc (num_vectors_to_group):
+ New function.
+ (function_resolver::lookup_form): Add an overload that takes
+ an sve_type rather than type and group suffixes.
+ (function_resolver::resolve_to): Likewise.
+ (function_resolver::infer_vector_or_tuple_type): Return an sve_type.
+ (function_resolver::infer_tuple_type): Likewise.
+ (function_resolver::infer_vector_type): Update accordingly.
+ (function_resolver::require_matching_vector_type): Take an sve_type
+ rather than a type_suffix_index.
+ (function_resolver::require_derived_vector_type): Likewise.
+ * config/aarch64/aarch64-sve-builtins-shapes.cc (get_def::resolve)
+ (set_def::resolve, store_def::resolve, tbl_tuple_def::resolve): Update
+ calls accordingly.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins.h
+ (function_resolver::require_matching_vector_type): Add a parameter
+ that specifies the number of the earlier argument that is being
+ matched against.
+ * config/aarch64/aarch64-sve-builtins.cc
+ (function_resolver::require_matching_vector_type): Likewise.
+ (require_derived_vector_type): Update calls accordingly.
+ (function_resolver::resolve_unary): Likewise.
+ (function_resolver::resolve_uniform): Likewise.
+ (function_resolver::resolve_uniform_opt_n): Likewise.
+ * config/aarch64/aarch64-sve-builtins-shapes.cc
+ (binary_long_lane_def::resolve): Likewise.
+ (clast_def::resolve, ternary_uint_def::resolve): Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins.h
+ (function_resolver::infer_sve_type): New member function.
+ (function_resolver::report_incorrect_num_vectors): Likewise.
+ * config/aarch64/aarch64-sve-builtins.cc
+ (function_resolver::infer_sve_type): New function,.
+ (function_resolver::report_incorrect_num_vectors): New function,
+ split out from...
+ (function_resolver::infer_vector_or_tuple_type): ...here. Use
+ infer_sve_type.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins.h (sve_type): New struct.
+ (sve_type::operator==): New function.
+ (function_resolver::get_vector_type): Delete.
+ (function_resolver::report_no_such_form): Take an sve_type rather
+ than a type_suffix_index.
+ * config/aarch64/aarch64-sve-builtins.cc (get_vector_type): New
+ function.
+ (function_resolver::get_vector_type): Delete.
+ (function_resolver::report_no_such_form): Take an sve_type rather
+ than a type_suffix_index.
+ (find_sve_type): New function, split out from...
+ (function_resolver::infer_vector_or_tuple_type): ...here.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins-shapes.cc (build_one): Take
+ a group suffix index parameter.
+ (build_32_64, build_all): Update accordingly. Iterate over all
+ group suffixes.
+ * config/aarch64/aarch64-sve-builtins-sve2.cc (svqrshl_impl::fold)
+ (svqshl_impl::fold, svrshl_impl::fold): Update function_instance
+ constructors.
+ * config/aarch64/aarch64-sve-builtins.cc (group_suffixes): New array.
+ (groups_none): New constant.
+ (function_groups): Initialize the groups field.
+ (function_instance::hash): Hash the group index.
+ (function_builder::get_name): Add the group suffix.
+ (function_builder::add_overloaded_functions): Iterate over all
+ group suffixes.
+ (function_resolver::lookup_form): Take a group suffix parameter.
+ (function_resolver::resolve_to): Likewise.
+ * config/aarch64/aarch64-sve-builtins.def (DEF_SVE_GROUP_SUFFIX): New
+ macro.
+ (x2, x3, x4): New group suffixes.
+ * config/aarch64/aarch64-sve-builtins.h (group_suffix_index): New enum.
+ (group_suffix_info): New structure.
+ (function_group_info::groups): New member variable.
+ (function_instance::group_suffix_id): Likewise.
+ (group_suffixes): New array.
+ (function_instance::operator==): Compare the group suffixes.
+ (function_instance::group_suffix): New function.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins.cc (function_groups): Remove
+ implied requirement on SVE.
+ * config/aarch64/aarch64-sve-builtins-base.def: Explicitly require SVE.
+ * config/aarch64/aarch64-sve-builtins-sve2.def: Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-protos.h (aarch64_sve_rdvl_immediate_p)
+ (aarch64_output_sve_rdvl): Declare.
+ * config/aarch64/aarch64.cc (aarch64_sve_cnt_factor_p): New
+ function, split out from...
+ (aarch64_sve_cnt_immediate_p): ...here.
+ (aarch64_sve_rdvl_factor_p): New function.
+ (aarch64_sve_rdvl_immediate_p): Likewise.
+ (aarch64_output_sve_rdvl): Likewise.
+ (aarch64_offset_temporaries): Rewrite the SVE handling to use RDVL
+ for some cases.
+ (aarch64_expand_mov_immediate): Handle RDVL immediates.
+ (aarch64_mov_operand_p): Likewise.
+ * config/aarch64/constraints.md (Usr): New constraint.
+ * config/aarch64/aarch64.md (*mov<SHORT:mode>_aarch64): Add an RDVL
+ alternative.
+ (*movsi_aarch64, *movdi_aarch64): Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins.h:
+ (function_checker::require_immediate_lane_index): Add an argument
+ for the index of the indexed vector argument.
+ * config/aarch64/aarch64-sve-builtins.cc
+ (function_checker::require_immediate_lane_index): Likewise.
+ * config/aarch64/aarch64-sve-builtins-shapes.cc
+ (ternary_bfloat_lane_base::check): Update accordingly.
+ (ternary_qq_lane_base::check): Likewise.
+ (binary_lane_def::check): Likewise.
+ (binary_long_lane_def::check): Likewise.
+ (ternary_lane_def::check): Likewise.
+ (ternary_lane_rotate_def::check): Likewise.
+ (ternary_long_lane_def::check): Likewise.
+ (ternary_qq_lane_rotate_def::check): Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * target.def (md_asm_adjust): Add a uses parameter.
+ * doc/tm.texi: Regenerate.
+ * cfgexpand.cc (expand_asm_loc): Update call to md_asm_adjust.
+ Handle any USEs created by the target.
+ (expand_asm_stmt): Likewise.
+ * recog.cc (asm_noperands): Handle asms with USEs.
+ (decode_asm_operands): Likewise.
+ * config/arm/aarch-common-protos.h (arm_md_asm_adjust): Add uses
+ parameter.
+ * config/arm/aarch-common.cc (arm_md_asm_adjust): Likewise.
+ * config/arm/arm.cc (thumb1_md_asm_adjust): Likewise.
+ * config/avr/avr.cc (avr_md_asm_adjust): Likewise.
+ * config/cris/cris.cc (cris_md_asm_adjust): Likewise.
+ * config/i386/i386.cc (ix86_md_asm_adjust): Likewise.
+ * config/mn10300/mn10300.cc (mn10300_md_asm_adjust): Likewise.
+ * config/nds32/nds32.cc (nds32_md_asm_adjust): Likewise.
+ * config/pdp11/pdp11.cc (pdp11_md_asm_adjust): Likewise.
+ * config/rs6000/rs6000.cc (rs6000_md_asm_adjust): Likewise.
+ * config/s390/s390.cc (s390_md_asm_adjust): Likewise.
+ * config/vax/vax.cc (vax_md_asm_adjust): Likewise.
+ * config/visium/visium.cc (visium_md_asm_adjust): Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * doc/tm.texi.in: Add TARGET_START_CALL_ARGS.
+ * doc/tm.texi: Regenerate.
+ * target.def (start_call_args): New hook.
+ (call_args, end_call_args): Add a parameter for the cumulative
+ argument information.
+ * hooks.h (hook_void_rtx_tree): Delete.
+ * hooks.cc (hook_void_rtx_tree): Likewise.
+ * targhooks.h (hook_void_CUMULATIVE_ARGS): Declare.
+ (hook_void_CUMULATIVE_ARGS_rtx_tree): Likewise.
+ * targhooks.cc (hook_void_CUMULATIVE_ARGS): New function.
+ (hook_void_CUMULATIVE_ARGS_rtx_tree): Likewise.
+ * calls.cc (expand_call): Call start_call_args before computing
+ and storing stack parameters. Pass the cumulative argument
+ information to call_args and end_call_args.
+ (emit_library_call_value_1): Likewise.
+ * config/nvptx/nvptx.cc (nvptx_call_args): Add a cumulative
+ argument parameter.
+ (nvptx_end_call_args): Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * doc/tm.texi.in: Add TARGET_EMIT_EPILOGUE_FOR_SIBCALL.
+ * doc/tm.texi: Regenerate.
+ * target.def (emit_epilogue_for_sibcall): New hook.
+ * calls.cc (can_implement_as_sibling_call_p): Use it.
+ * function.cc (thread_prologue_and_epilogue_insns): Likewise.
+ (reposition_prologue_and_epilogue_notes): Likewise.
+ * config/aarch64/aarch64-protos.h (aarch64_expand_epilogue): Take
+ an rtx_call_insn * rather than a bool.
+ * config/aarch64/aarch64.cc (aarch64_expand_epilogue): Likewise.
+ (TARGET_EMIT_EPILOGUE_FOR_SIBCALL): Define.
+ * config/aarch64/aarch64.md (epilogue): Update call.
+ (sibcall_epilogue): Delete.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * target.def (use_late_prologue_epilogue): New hook.
+ * doc/tm.texi.in: Add TARGET_USE_LATE_PROLOGUE_EPILOGUE.
+ * doc/tm.texi: Regenerate.
+ * passes.def (pass_late_thread_prologue_and_epilogue): New pass.
+ * tree-pass.h (make_pass_late_thread_prologue_and_epilogue): Declare.
+ * function.cc (pass_thread_prologue_and_epilogue::gate): New function.
+ (pass_data_late_thread_prologue_and_epilogue): New pass variable.
+ (pass_late_thread_prologue_and_epilogue): New pass class.
+ (make_pass_late_thread_prologue_and_epilogue): New function.
+
+2023-12-05 Kito Cheng <kito.cheng@sifive.com>
+
+ * common/config/riscv/riscv-common.cc
+ (riscv_subset_list::check_conflict_ext): Check zcd conflicts
+ with zcmt and zcmp.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ PR rtl-optimization/112278
+ * lra-int.h (lra_update_biggest_mode): New function.
+ * lra-coalesce.cc (merge_pseudos): Use it.
+ * lra-lives.cc (process_bb_lives): Likewise.
+ * lra.cc (new_insn_reg): Likewise.
+
+2023-12-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/112843
+ * gimple-lower-bitint.cc (gimple_lower_bitint): Change lhs of stmt
+ to lhs2 before building and inserting lhs = (cast) lhs2; assignment.
+ Adjust stmt operands before adjusting lhs.
+
+2023-12-05 xuli <xuli1@eswincomputing.com>
+
+ * config/riscv/riscv-v.cc (sew64_scalar_helper): Bugfix.
+
+2023-12-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112816
+ * config/i386/sse.md ((eq (eq (lshiftrt x elt_bits-1) 0) 0)): New
+ splitter to turn psrld $31; pcmpeq; pcmpeq into psrad $31.
+
+2023-12-05 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/autovec.md: Add blocker.
+ * config/riscv/riscv-protos.h (gather_scatter_valid_offset_p): New function.
+ * config/riscv/riscv-v.cc (gather_scatter_valid_offset_p): Ditto.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112827
+ PR tree-optimization/112848
+ * tree-scalar-evolution.cc (final_value_replacement_loop):
+ Compute the insert location for each insert.
+
+2023-12-05 liuhongt <hongtao.liu@intel.com>
+
+ * config/i386/i386.cc (ix86_vector_costs::add_stmt_cost):
+ Count sse_reg/gpr_regs for components not loaded from memory.
+ (ix86_vector_costs:ix86_vector_costs): New constructor.
+ (ix86_vector_costs::m_num_gpr_needed[3]): New private memeber.
+ (ix86_vector_costs::m_num_sse_needed[3]): Ditto.
+ (ix86_vector_costs::finish_cost): Estimate overall register
+ pressure cost.
+ (ix86_vector_costs::ix86_vect_estimate_reg_pressure): New
+ function.
+
+2023-12-05 liuhongt <hongtao.liu@intel.com>
+
+ * config/i386/sse.md (udot_prodv64qi): New expander.
+ (udot_prod<mode>): Emulates with VEC_UNPACKU_EXPR +
+ DOT_PROD (short, int).
+
+2023-12-05 Marek Polacek <polacek@redhat.com>
+
+ PR c++/107687
+ PR c++/110997
+ * doc/invoke.texi: Document -fno-immediate-escalation.
+
+2023-12-04 Andrew Pinski <quic_apinski@quicinc.com>
+
+ * match.pd (zero_one_valued_p): For convert
+ make sure type is not a signed 1-bit integer.
+
+2023-12-04 Jeff Law <jlaw@ventanamicro.com>
+
+ * config/microblaze/microblaze.md (movhi): Use %i for half-word
+ loads to properly select between lhu/lhui.
+
+2023-12-04 Robin Dapp <rdapp@ventanamicro.com>
+
+ * config/riscv/riscv-string.cc (expand_rawmemchr): Increment
+ source address by vl * element_size.
+
+2023-12-04 Robin Dapp <rdapp@ventanamicro.com>
+
+ * config/riscv/riscv-opts.h (enum riscv_stringop_strategy_enum):
+ Rename...
+ (enum stringop_strategy_enum): ... to this.
+ * config/riscv/riscv-string.cc (riscv_expand_block_move): New
+ wrapper expander handling the strategies and delegation.
+ (riscv_expand_block_move_scalar): Rename function and make
+ static.
+ (expand_block_move): Remove strategy handling.
+ * config/riscv/riscv.md: Call expander wrapper.
+ * config/riscv/riscv.opt: Rename.
+
+2023-12-04 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/112785
+ * function.h (get_new_clique): New inline function handling
+ last_clique overflow.
+ * cfgrtl.cc (duplicate_insn_chain): Use it.
+ * tree-cfg.cc (gimple_duplicate_bb): Likewise.
+ * tree-inline.cc (remap_dependence_clique): Likewise.
+
+2023-12-04 Christoph Müllner <christoph.muellner@vrull.eu>
+
+ PR target/112650
+ * doc/invoke.texi: Document riscv-strcmp-inline-limit.
+
+2023-12-04 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * config/riscv/vector.md: Fix incorrect overlap in v0.
+
+2023-12-04 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * config/riscv/vector.md: Add highest-number overlap support.
+
+2023-12-04 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112818
+ * tree-vect-stmts.cc (vectorizable_bswap): Check input and
+ output vector types have the same size.
+
+2023-12-04 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112827
+ * tree-scalar-evolution.cc (final_value_replacement_loop):
+ Do not release SSA name but keep a dead initialization around.
+
+2023-12-04 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * config/riscv/vector.md: Remove earlyclobber from widen reduction.
+
+2023-12-04 Indu Bhagat <indu.bhagat@oracle.com>
+
+ PR debug/112656
+ * btfout.cc (btf_asm_type): Fixup ctti_name for all
+ BTF types of kind BTF_KIND_FUNC_PROTO.
+
+2023-12-04 Indu Bhagat <indu.bhagat@oracle.com>
+
+ PR debug/112768
+ * btfout.cc (get_btf_type_name): New definition.
+ (btf_collect_datasec): Update dtd_name to the original type name
+ string.
+ (btf_asm_type_ref): Use the new get_btf_type_name function
+ instead.
+ (btf_asm_type): Likewise.
+ (btf_asm_func_type): Likewise.
+
+2023-12-04 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112837
+ * config/i386/i386.cc (ix86_elim_entry_set_got): Before checking
+ for UNSPEC_SET_GOT check that SET_SRC is UNSPEC. Use SET_SRC and
+ SET_DEST macros instead of XEXP, rename vec variable to set.
+
+2023-12-04 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112816
+ * config/i386/sse.md (signbit<mode>2): Force operands[1] into a REG.
+
+2023-12-04 Feng Wang <wangfeng@eswincomputing.com>
+
+ * common/config/riscv/riscv-common.cc: Add zvkb ISA info.
+ * config/riscv/riscv.opt: Add Mask(ZVKB)
+
+2023-12-04 Fei Gao <gaofei@eswincomputing.com>
+ Xiao Zeng <zengxiao@eswincomputing.com>
+
+ * config/riscv/riscv.md (*mov<GPR:mode><X:mode>cc):move to sfb.md
+ * config/riscv/sfb.md: New file.
+
+2023-12-04 Kito Cheng <kito.cheng@sifive.com>
+
+ * config/riscv/riscv-cores.def: Add sifive-x280.
+ * doc/invoke.texi (RISC-V Options): Add sifive-x280
+
+2023-12-04 Kito Cheng <kito.cheng@sifive.com>
+
+ * common/config/riscv/riscv-common.cc (riscv_implied_predicator_t): New.
+ (riscv_implied_info_t::riscv_implied_info_t): New.
+ (riscv_implied_info_t::match): New.
+ (riscv_implied_info): New entry for zcf.
+ (riscv_subset_list::handle_implied_ext): Use
+ riscv_implied_info_t::match.
+ (riscv_subset_list::check_implied_ext): Ditto.
+ (riscv_subset_list::handle_combine_ext): Ditto.
+ (riscv_subset_list::parse): Move zcf implication handling to
+ riscv_implied_infos.
+
+2023-12-04 Kito Cheng <kito.cheng@sifive.com>
+
+ * common/config/riscv/riscv-common.cc
+ (riscv_subset_list::check_conflict_ext): New.
+ (riscv_subset_list::parse): Move checking conflict ext. to
+ check_conflict_ext.
+ * config/riscv/riscv-subset.h:
+ Add riscv_subset_list::check_conflict_ext.
+
+2023-12-04 Hu, Lin1 <lin1.hu@intel.com>
+
+ * common/config/i386/cpuinfo.h (get_available_features): Move USER_MSR
+ to the correct location.
+
+2023-12-04 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/riscv.md: Rostify the constraints.
+
+2023-12-04 chenxiaolong <chenxiaolong@loongson.cn>
+
+ * doc/extend.texi: Add information about the intrinsic function of the vector
+ instruction.
+
+2023-12-03 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112807
+ * gimple-lower-bitint.cc (bitint_large_huge::lower_addsub_overflow):
+ When choosing type0 and type1 types, if prec3 has small/middle bitint
+ kind, use maximum of type0 and type1's precision instead of prec3.
+
+2023-12-03 Jeff Law <jlaw@ventanamicro.com>
+
+ * config/frv/frv.h (TRANSFER_FROM_TRAMPOLINE): Add prototype for exit.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * attribs.cc (comp_type_attributes): Pass the full TREE_PURPOSE
+ to lookup_attribute_spec, rather than just the name.
+ (remove_attributes_matching): Likewise.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * attribs.cc (find_same_attribute): New function.
+ (decl_attributes, comp_type_attributes): Use it when looking
+ up one list's attributes in another list.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * Makefile.in (GTFILES): Add attribs.cc.
+ * attribs.cc (gnu_namespace_cache): New variable.
+ (get_gnu_namespace): New function.
+ (lookup_attribute_spec): Use it instead of get_identifier ("gnu").
+ (get_attribute_namespace, attribs_cc_tests): Likewise.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * attribs.h (scoped_attribute_specs): New structure.
+ (register_scoped_attributes): Take a reference to a
+ scoped_attribute_specs instead of separate namespace and array
+ parameters.
+ * plugin.h (register_scoped_attributes): Likewise.
+ * attribs.cc (register_scoped_attributes): Likewise.
+ (attribute_tables): Change into an array of scoped_attribute_specs
+ pointers. Reduce to 1 element for frontends and 1 element for targets.
+ (empty_attribute_table): Delete.
+ (check_attribute_tables): Update for changes to attribute_tables.
+ Use a hash_set to identify duplicates.
+ (handle_ignored_attributes_option): Update for above changes.
+ (init_attributes): Likewise.
+ (excl_pair): Delete.
+ (test_attribute_exclusions): Update for above changes. Don't
+ enforce symmetry for standard attributes in the top-level namespace.
+ * langhooks-def.h (LANG_HOOKS_COMMON_ATTRIBUTE_TABLE): Delete.
+ (LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE): Likewise.
+ (LANG_HOOKS_INITIALIZER): Update accordingly.
+ (LANG_HOOKS_ATTRIBUTE_TABLE): Define to an empty constructor.
+ * langhooks.h (lang_hooks::common_attribute_table): Delete.
+ (lang_hooks::format_attribute_table): Likewise.
+ (lang_hooks::attribute_table): Redefine to an array of
+ scoped_attribute_specs pointers.
+ * target-def.h (TARGET_GNU_ATTRIBUTES): New macro.
+ * target.def (attribute_spec): Redefine to return an array of
+ scoped_attribute_specs pointers.
+ * tree-inline.cc (function_attribute_inlinable_p): Update accordingly.
+ * doc/tm.texi: Regenerate.
+ * config/aarch64/aarch64.cc (aarch64_attribute_table): Define using
+ TARGET_GNU_ATTRIBUTES.
+ * config/alpha/alpha.cc (vms_attribute_table): Likewise.
+ * config/avr/avr.cc (avr_attribute_table): Likewise.
+ * config/bfin/bfin.cc (bfin_attribute_table): Likewise.
+ * config/bpf/bpf.cc (bpf_attribute_table): Likewise.
+ * config/csky/csky.cc (csky_attribute_table): Likewise.
+ * config/epiphany/epiphany.cc (epiphany_attribute_table): Likewise.
+ * config/gcn/gcn.cc (gcn_attribute_table): Likewise.
+ * config/h8300/h8300.cc (h8300_attribute_table): Likewise.
+ * config/loongarch/loongarch.cc (loongarch_attribute_table): Likewise.
+ * config/m32c/m32c.cc (m32c_attribute_table): Likewise.
+ * config/m32r/m32r.cc (m32r_attribute_table): Likewise.
+ * config/m68k/m68k.cc (m68k_attribute_table): Likewise.
+ * config/mcore/mcore.cc (mcore_attribute_table): Likewise.
+ * config/microblaze/microblaze.cc (microblaze_attribute_table):
+ Likewise.
+ * config/mips/mips.cc (mips_attribute_table): Likewise.
+ * config/msp430/msp430.cc (msp430_attribute_table): Likewise.
+ * config/nds32/nds32.cc (nds32_attribute_table): Likewise.
+ * config/nvptx/nvptx.cc (nvptx_attribute_table): Likewise.
+ * config/riscv/riscv.cc (riscv_attribute_table): Likewise.
+ * config/rl78/rl78.cc (rl78_attribute_table): Likewise.
+ * config/rx/rx.cc (rx_attribute_table): Likewise.
+ * config/s390/s390.cc (s390_attribute_table): Likewise.
+ * config/sh/sh.cc (sh_attribute_table): Likewise.
+ * config/sparc/sparc.cc (sparc_attribute_table): Likewise.
+ * config/stormy16/stormy16.cc (xstormy16_attribute_table): Likewise.
+ * config/v850/v850.cc (v850_attribute_table): Likewise.
+ * config/visium/visium.cc (visium_attribute_table): Likewise.
+ * config/arc/arc.cc (arc_attribute_table): Likewise. Move further
+ down file.
+ * config/arm/arm.cc (arm_attribute_table): Update for above changes,
+ using...
+ (arm_gnu_attributes, arm_gnu_attribute_table): ...these new globals.
+ * config/i386/i386-options.h (ix86_attribute_table): Delete.
+ (ix86_gnu_attribute_table): Declare.
+ * config/i386/i386-options.cc (ix86_attribute_table): Replace with...
+ (ix86_gnu_attributes, ix86_gnu_attribute_table): ...these two globals.
+ * config/i386/i386.cc (ix86_attribute_table): Define as an array of
+ scoped_attribute_specs pointers.
+ * config/ia64/ia64.cc (ia64_attribute_table): Update for above changes,
+ using...
+ (ia64_gnu_attributes, ia64_gnu_attribute_table): ...these new globals.
+ * config/rs6000/rs6000.cc (rs6000_attribute_table): Update for above
+ changes, using...
+ (rs6000_gnu_attributes, rs6000_gnu_attribute_table): ...these new
+ globals.
+
+2023-12-02 Roger Sayle <roger@nextmovesoftware.com>
+
+ * config/riscv/riscv-vsetvl.cc (csetvl_info::parse_insn): Rename
+ local variable from demand_flags to dflags, to avoid conflicting
+ with (enumeration) type of the same name.
+
+2023-12-02 Li Wei <liwei@loongson.cn>
+
+ * config/loongarch/loongarch.cc (loongarch_is_odd_extraction):
+ Supplementary function prototype.
+ (loongarch_is_even_extraction): Adjust.
+ (loongarch_try_expand_lsx_vshuf_const): Adjust.
+ (loongarch_is_extraction_permutation): Adjust.
+ (loongarch_expand_vec_perm_const_2): Adjust.
+
+2023-12-02 Li Wei <liwei@loongson.cn>
+
+ * config/loongarch/loongarch.md (v2di): Used to simplify the
+ following templates.
+ (popcount<mode>2): New.
+
+2023-12-02 Li Wei <liwei@loongson.cn>
+
+ * config/loongarch/loongarch.h (CTZ_DEFINED_VALUE_AT_ZERO): Add
+ description.
+ (CLZ_DEFINED_VALUE_AT_ZERO): Remove duplicate definition.
+
+2023-12-02 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112801
+ * config/riscv/vector.md: Add !TARGET_64BIT.
+
+2023-12-02 Pan Li <pan2.li@intel.com>
+
+ PR target/112743
+ * config/riscv/riscv.cc (riscv_legitimize_move): Take the
+ exist (U *mode) and handle DFmode like DImode when EEW is
+ 32bits for ZVE32F.
+
+2023-12-01 Andrew MacLeod <amacleod@redhat.com>
+
+ * gimple-range-fold.h (range_compatible_p): Relocate.
+ * value-range.h (range_compatible_p): Here.
+ * range-op-mixed.h (operand_equal::operand_check_p): Call
+ range_compatible_p rather than comparing precision.
+ (operand_not_equal::operand_check_p): Ditto.
+ (operand_not_lt::operand_check_p): Ditto.
+ (operand_not_le::operand_check_p): Ditto.
+ (operand_not_gt::operand_check_p): Ditto.
+ (operand_not_ge::operand_check_p): Ditto.
+ (operand_plus::operand_check_p): Ditto.
+ (operand_abs::operand_check_p): Ditto.
+ (operand_minus::operand_check_p): Ditto.
+ (operand_negate::operand_check_p): Ditto.
+ (operand_mult::operand_check_p): Ditto.
+ (operand_bitwise_not::operand_check_p): Ditto.
+ (operand_bitwise_xor::operand_check_p): Ditto.
+ (operand_bitwise_and::operand_check_p): Ditto.
+ (operand_bitwise_or::operand_check_p): Ditto.
+ (operand_min::operand_check_p): Ditto.
+ (operand_max::operand_check_p): Ditto.
+ * range-op.cc (operand_lshift::operand_check_p): Ditto.
+ (operand_rshift::operand_check_p): Ditto.
+ (operand_logical_and::operand_check_p): Ditto.
+ (operand_logical_or::operand_check_p): Ditto.
+ (operand_logical_not::operand_check_p): Ditto.
+
+2023-12-01 Vladimir N. Makarov <vmakarov@redhat.com>
+
+ PR target/112445
+ * lra.h (lra): Add one more arg.
+ * lra-int.h (lra_verbose, lra_dump_insns): New externals.
+ (lra_dump_insns_if_possible): Ditto.
+ * lra.cc (lra_dump_insns): Dump all insns.
+ (lra_dump_insns_if_possible): Dump all insns for lra_verbose >= 7.
+ (lra_verbose): New global.
+ (lra): Add new arg. Setup lra_verbose from its value.
+ * lra-assigns.cc (lra_split_hard_reg_for): Dump insns if rtl
+ was changed.
+ * lra-remat.cc (lra_remat): Dump insns if rtl was changed.
+ * lra-constraints.cc (lra_inheritance): Dump insns.
+ (lra_constraints, lra_undo_inheritance): Dump insns if rtl
+ was changed.
+ (remove_inheritance_pseudos): Use restore reg if it is set up.
+ * ira.cc: (lra): Pass internal_flag_ira_verbose.
+
+2023-12-01 Jakub Jelinek <jakub@redhat.com>
+
+ * doc/extend.texi (__builtin_addc, __builtin_addcl, __builtin_addcll,
+ __builtin_subc, __builtin_subcl, __builtin_subcll,
+ __builtin_stdc_bit_width, __builtin_stdc_count_ones,
+ __builtin_stdc_count_zeros, __builtin_stdc_first_leading_one,
+ __builtin_stdc_first_leading_zero, __builtin_stdc_first_trailing_one,
+ __builtin_stdc_first_trailing_zero, __builtin_stdc_has_single_bit,
+ __builtin_stdc_leading_ones, __builtin_stdc_leading_zeros,
+ __builtin_stdc_trailing_ones, __builtin_stdc_trailing_zeros,
+ __builtin_nvptx_brev, __builtin_nvptx_brevll, __builtin_darn,
+ __builtin_darn_raw, __builtin_ia32_vec_ext_v2di,
+ __builtin_ia32_crc32qi, __builtin_ia32_crc32hi,
+ __builtin_ia32_crc32si, __builtin_ia32_crc32di): Put {}s around
+ return type with spaces in it.
+ (__builtin_rx_mvfachi, __builtin_rx_mvfacmi): Remove superfluous
+ whitespace.
+
+2023-12-01 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic-core.h (emit_diagnostic_valist): New overload decl.
+ * diagnostic-format-sarif.cc (sarif_builder::make_result_object):
+ When we have metadata, call its maybe_add_sarif_properties vfunc.
+ * diagnostic-metadata.h (class sarif_object): Forward decl.
+ (diagnostic_metadata::~diagnostic_metadata): New.
+ (diagnostic_metadata::maybe_add_sarif_properties): New vfunc.
+ * diagnostic.cc (emit_diagnostic_valist): New overload.
+
+2023-12-01 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/103533
+ * doc/extend.texi: Remove stray reference to
+ -fanalyzer-checker=taint.
+
+2023-12-01 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * config/riscv/vector.md: Support highpart overlap for vx/vf.
+
+2023-12-01 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * config/riscv/vector.md: Support highpart overlap for indexed load.
+
+2023-12-01 Richard Biener <rguenther@suse.de>
+
+ * tree-vectorizer.h (vect_get_vec_defs): Re-order arguments.
+ * tree-vect-stmts.cc (vect_get_vec_defs): Likewise.
+ (vectorizable_condition): Update caller.
+ (vectorizable_comparison_1): Likewise.
+ (vectorizable_conversion): Specify the vector type to be
+ used for invariant/external defs.
+ * tree-vect-loop.cc (vect_transform_reduction): Update caller.
+
+2023-12-01 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112770
+ * gimple-lower-bitint.cc (gimple_lower_bitint): When adjusting
+ lhs of middle _BitInt setter which ends bb, insert cast on
+ the fallthru edge rather than after stmt.
+
+2023-12-01 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112771
+ * gimple-lower-bitint.cc (bitint_large_huge::handle_operand_addr):
+ Use mp = 1 if it is zero.
+
+2023-12-01 Jose E. Marchesi <jose.marchesi@oracle.com>
+
+ * config/bpf/bpf.cc (bpf_asm_named_section): New function.
+ (TARGET_ASM_NAMED_SECTION): Set to bpf_asm_named_section.
+
+2023-12-01 Di Zhao <dizhao@os.amperecomputing.com>
+
+ * config/aarch64/aarch64-tuning-flags.def
+ (AARCH64_EXTRA_TUNING_OPTION): New tuning option to avoid
+ cross-loop FMA.
+ * config/aarch64/aarch64.cc
+ (aarch64_override_options_internal): Set
+ param_avoid_fma_max_bits according to tuning option.
+ * config/aarch64/tuning_models/ampere1.h (ampere1_tunings):
+ Modify tunings related with FMA.
+ * config/aarch64/tuning_models/ampere1a.h (ampere1a_tunings):
+ Likewise.
+ * config/aarch64/tuning_models/ampere1b.h (ampere1b_tunings):
+ Likewise.
+
+2023-12-01 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins.h
+ (function_expander::result_mode): New member function.
+ * config/aarch64/aarch64-sve-builtins-base.cc
+ (svld234_impl::expand): Use it.
+ * config/aarch64/aarch64-sve-builtins.cc
+ (function_expander::get_reg_target): Likewise.
+
+2023-12-01 Jakub Jelinek <jakub@redhat.com>
+
+ * gimple-lower-bitint.cc (range_to_prec): Don't return -1 for
+ signed types.
+ (bitint_large_huge::lower_addsub_overflow): Fix up computation of
+ prec2.
+ (bitint_large_huge::lower_mul_overflow): Likewise.
+
+2023-12-01 Jakub Jelinek <jakub@redhat.com>
+
+ * gimple-lower-bitint.cc (bitint_large_huge::finish_arith_overflow):
+ When replacing use_stmt which is gsi_stmt (m_gsi), update m_gsi to
+ the new statement.
+
+2023-12-01 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112750
+ * gimple-lower-bitint.cc (bitint_large_huge::lower_addsub_overflow):
+ Use NE_EXPR rather than EQ_EXPR for g2 if !single_comparison and
+ adjust probabilities.
+
+2023-12-01 Xi Ruoyao <xry111@xry111.site>
+
+ * doc/install.texi: Deem srcdir == objdir broken, but objdir
+ as a subdirectory of srcdir fine.
+
+2023-12-01 Juergen Christ <jchrist@linux.ibm.com>
+
+ PR target/112753
+ * config/s390/s390.cc (s390_md_asm_adjust): Return after dealing
+ with the outputs, if no further processing of long doubles is
+ required.
+
+2023-12-01 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112725
+ * config/s390/s390.cc (s390_invalid_arg_for_unprototyped_fn): Return
+ NULL for __builtin_classify_type calls with vector arguments.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * doc/invoke.texi (Warning Options): Document
+ -Wdeclaration-missing-parameter-type.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * doc/invoke.texi (Warning Options): Document changes.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * doc/invoke.texi (Warning Options): Document that
+ -Wreturn-mismatch is a permerror in C99 and later.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ PR c/91093
+ PR c/96284
+ * doc/invoke.texi (Warning Options): Document changes.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * doc/invoke.texi (Warning Options): Document changes.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * doc/invoke.texi (Warning Options): Document changes.
+
+2023-12-01 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112776
+ * config/riscv/riscv-vsetvl.cc (pre_vsetvl::pre_global_vsetvl_info): Fix ratio.
+
+2023-11-30 Wilco Dijkstra <wilco.dijkstra@arm.com>
+
+ PR target/111404
+ * config/aarch64/aarch64.cc (aarch64_split_compare_and_swap):
+ For 128-bit store the loaded value and loop if needed.
+
+2023-11-30 Wilco Dijkstra <wilco.dijkstra@arm.com>
+
+ PR target/103100
+ * config/aarch64/aarch64.md (cpymemdi): Remove pattern condition.
+ (setmemdi): Likewise.
+ * config/aarch64/aarch64.cc (aarch64_expand_cpymem): Support
+ strict-align. Cleanup condition for using MOPS.
+ (aarch64_expand_setmem): Likewise.
+
+2023-11-30 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112767
+ * tree-scalar-evolution.cc (final_value_replacement_loop):
+ Propagate constants to immediate uses immediately.
+
+2023-11-30 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112766
+ * gimple-predicate-analysis.cc (find_var_cmp_const):
+ Support continuing the iteration and report every candidate.
+ (uninit_analysis::overlap): Iterate over all flag var
+ candidates.
+
+2023-11-30 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * config/riscv/vector.md: Add widening overlap of vf2/vf4.
+
+2023-11-30 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * config/riscv/vector.md: Remove earlyclobber for wx/wf instructions.
+
+2023-11-30 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112733
+ * wide-int.cc (wi::mul_internal): Don't allocate twice as much
+ space for u, v and r as needed.
+ (divmod_internal_2): Change return type from void to int, for n == 1
+ return 1, otherwise before writing b_dividend into b_remainder set
+ n to MIN (n, m) and at the end return it.
+ (wi::divmod_internal): Don't allocate 4 times as much space for
+ b_quotient, b_remainder, b_dividend and b_divisor. Set n to
+ result of divmod_internal_2.
+ (wide_int_cc_tests): Add test for unsigned widest_int
+ wi::multiple_of_p of 1 and -128.
+
+2023-11-30 liuhongt <hongtao.liu@intel.com>
+
+ * config/i386/sse.md (sdot_prodv64qi): New expander.
+ (sseunpackmodelower): New mode attr.
+ (sdot_prod<mode>): Emulate sdot_prodv*qi with sodt_prov*hi
+ when TARGET_VNNIINT8 is not available.
+
+2023-11-30 liuhongt <hongtao.liu@intel.com>
+
+ * config/i386/sse.md: (reduc_plus_scal_<mode>): Use
+ vec_extract_lo instead of subreg.
+ (reduc_<code>_scal_<mode>): Ditto.
+ (reduc_<code>_scal_<mode>): Ditto.
+ (reduc_<code>_scal_<mode>): Ditto.
+ (reduc_<code>_scal_<mode>): Ditto.
+
+2023-11-30 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * config/riscv/vector.md: Add widenning overlap.
+
+2023-11-30 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/constraints.md (TARGET_VECTOR ? V_REGS : NO_REGS): Fix constraint.
+ * config/riscv/riscv.md (no,W21,W42,W84,W41,W81,W82): Rename vconstraint into group_overlap.
+ (no,yes): Ditto.
+ (none,W21,W42,W84,W43,W86,W87): Ditto.
+ * config/riscv/vector.md: Ditto.
+
+2023-11-30 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/vector.md: Support highpart overlap for vext.vf2
+
+2023-11-29 Philipp Tomsich <philipp.tomsich@vrull.eu>
+
+ * config/aarch64/aarch64-cores.def (AARCH64_CORE): Add ampere-1b
+ * config/aarch64/aarch64-cost-tables.h: Add ampere1b_extra_costs
+ * config/aarch64/aarch64-tune.md: Regenerate
+ * config/aarch64/aarch64.cc: Include ampere1b tuning model
+ * doc/invoke.texi: Document -mcpu=ampere1b
+ * config/aarch64/tuning_models/ampere1b.h: New file.
+
+2023-11-29 David Faust <david.faust@oracle.com>
+
+ * config/bpf/bpf.h (ASM_COMMENT_START): Change from ';' to '#'.
+
+2023-11-29 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112725
+ * config/rs6000/rs6000.cc (invalid_arg_for_unprototyped_fn): Return
+ NULL for __builtin_classify_type calls with vector arguments.
+
+2023-11-29 Andrew MacLeod <amacleod@redhat.com>
+
+ PR tree-optimization/111922
+ * ipa-cp.cc (ipa_vr_operation_and_type_effects): Check the
+ operands are valid before calling fold_range.
+
+2023-11-29 Andrew MacLeod <amacleod@redhat.com>
+
+ * range-op-mixed.h (operator_equal::operand_check_p): New.
+ (operator_not_equal::operand_check_p): New.
+ (operator_lt::operand_check_p): New.
+ (operator_le::operand_check_p): New.
+ (operator_gt::operand_check_p): New.
+ (operator_ge::operand_check_p): New.
+ (operator_plus::operand_check_p): New.
+ (operator_abs::operand_check_p): New.
+ (operator_minus::operand_check_p): New.
+ (operator_negate::operand_check_p): New.
+ (operator_mult::operand_check_p): New.
+ (operator_bitwise_not::operand_check_p): New.
+ (operator_bitwise_xor::operand_check_p): New.
+ (operator_bitwise_and::operand_check_p): New.
+ (operator_bitwise_or::operand_check_p): New.
+ (operator_min::operand_check_p): New.
+ (operator_max::operand_check_p): New.
+ * range-op.cc (range_op_handler::fold_range): Check operand
+ parameter types.
+ (range_op_handler::op1_range): Ditto.
+ (range_op_handler::op2_range): Ditto.
+ (range_op_handler::operand_check_p): New.
+ (range_operator::operand_check_p): New.
+ (operator_lshift::operand_check_p): New.
+ (operator_rshift::operand_check_p): New.
+ (operator_logical_and::operand_check_p): New.
+ (operator_logical_or::operand_check_p): New.
+ (operator_logical_not::operand_check_p): New.
+ * range-op.h (range_operator::operand_check_p): New.
+ (range_op_handler::operand_check_p): New.
+
+2023-11-29 Martin Jambor <mjambor@suse.cz>
+
+ PR tree-optimization/112711
+ PR tree-optimization/112721
+ * tree-sra.cc (build_access_from_call_arg): New parameter
+ CAN_BE_RETURNED, disqualify any candidate passed by reference if it is
+ true. Adjust leading comment.
+ (scan_function): Pass appropriate value to CAN_BE_RETURNED of
+ build_access_from_call_arg.
+
+2023-11-29 Thomas Schwinge <thomas@codesourcery.com>
+
+ * doc/sourcebuild.texi (Final Actions): Document
+ 'only_for_offload_target' wrapper.
+
+2023-11-29 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ PR testsuite/112729
+ * doc/sourcebuild.texi (Effective-Target Keywords, Environment
+ attributes): Document cfi.
+
+2023-11-29 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/110237
+ * internal-fn.cc (expand_partial_load_optab_fn): Clear
+ MEM_EXPR and MEM_OFFSET.
+ (expand_partial_store_optab_fn): Likewise.
+
+2023-11-29 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112733
+ * fold-const.cc (multiple_of_p): Pass SIGNED rather than
+ UNSIGNED for wi::multiple_of_p on widest_int arguments.
+
+2023-11-29 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+ kito-cheng <kito.cheng@sifive.com>
+ kito-cheng <kito.cheng@gmail.com>
+
+ PR target/112431
+ * config/riscv/constraints.md (TARGET_VECTOR ? V_REGS : NO_REGS): New register filters.
+ * config/riscv/riscv.md (no,W21,W42,W84,W41,W81,W82): Ditto.
+ (no,yes): Ditto.
+ * config/riscv/vector.md: Support highpart register overlap for vwcvt.
+
+2023-11-29 xuli <xuli1@eswincomputing.com>
+
+ * config/riscv/riscv.cc (riscv_option_override): Eliminate warning.
+
+2023-11-29 Jakub Jelinek <jakub@redhat.com>
+
+ PR bootstrap/111601
+ * fold-mem-offsets.cc (get_uses): Ignore DEBUG_INSN uses. Otherwise,
+ punt if use is in a different basic block from INSN or appears before
+ INSN in the same basic block. Formatting fixes.
+ (get_single_def_in_bb): Formatting fixes.
+ (fold_offsets_1, pass_fold_mem_offsets::execute): Comment formatting
+ fixes.
+
+2023-11-29 Xi Ruoyao <xry111@xry111.site>
+
+ * config/loongarch/simd.md (LSX_SCALAR_FRINT): New int iterator.
+ (VLSX_FOR_FMODE): New mode attribute.
+ (<simd_for_scalar_frint_pattern><mode>2): New expander,
+ expanding to vreplvei.{w/d} + frint{rp/rz/rm/rne}.{s.d}.
+
+2023-11-29 Xi Ruoyao <xry111@xry111.site>
+
+ * config/loongarch/loongarch.md (lrint_allow_inexact): Remove.
+ (<lrint_pattern><ANYF:mode><ANYFI:mode>2): Check if <LRINT>
+ == UNSPEC_FTINT instead of <lrint_allow_inexact>.
+
+2023-11-29 Xi Ruoyao <xry111@xry111.site>
+
+ * config/loongarch/lsx.md (bitimm): Move to ...
+ (UNSPEC_LSX_VROTR): Remove.
+ (lsx_vrotr_<lsxfmt>): Remove.
+ (lsx_vrotri_<lsxfmt>): Remove.
+ * config/loongarch/lasx.md (UNSPEC_LASX_XVROTR): Remove.
+ (lsx_vrotr_<lsxfmt>): Remove.
+ (lsx_vrotri_<lsxfmt>): Remove.
+ * config/loongarch/simd.md (bitimm): ... here. Expand it to
+ cover LASX modes.
+ (vrotr<mode>3): New define_insn.
+ (vrotri<mode>3): New define_insn.
+ * config/loongarch/loongarch-builtins.cc:
+ (CODE_FOR_lsx_vrotr_b): Use standard pattern name.
+ (CODE_FOR_lsx_vrotr_h): Likewise.
+ (CODE_FOR_lsx_vrotr_w): Likewise.
+ (CODE_FOR_lsx_vrotr_d): Likewise.
+ (CODE_FOR_lasx_xvrotr_b): Likewise.
+ (CODE_FOR_lasx_xvrotr_h): Likewise.
+ (CODE_FOR_lasx_xvrotr_w): Likewise.
+ (CODE_FOR_lasx_xvrotr_d): Likewise.
+ (CODE_FOR_lsx_vrotri_b): Define to standard pattern name.
+ (CODE_FOR_lsx_vrotri_h): Likewise.
+ (CODE_FOR_lsx_vrotri_w): Likewise.
+ (CODE_FOR_lsx_vrotri_d): Likewise.
+ (CODE_FOR_lasx_xvrotri_b): Likewise.
+ (CODE_FOR_lasx_xvrotri_h): Likewise.
+ (CODE_FOR_lasx_xvrotri_w): Likewise.
+ (CODE_FOR_lasx_xvrotri_d): Likewise.
+
+2023-11-29 Xi Ruoyao <xry111@xry111.site>
+
+ * config/loongarch/simd.md (muh): New code attribute mapping
+ any_extend to smul_highpart or umul_highpart.
+ (<su>mul<mode>3_highpart): New define_insn.
+ * config/loongarch/lsx.md (UNSPEC_LSX_VMUH_S): Remove.
+ (UNSPEC_LSX_VMUH_U): Remove.
+ (lsx_vmuh_s_<lsxfmt>): Remove.
+ (lsx_vmuh_u_<lsxfmt>): Remove.
+ * config/loongarch/lasx.md (UNSPEC_LASX_XVMUH_S): Remove.
+ (UNSPEC_LASX_XVMUH_U): Remove.
+ (lasx_xvmuh_s_<lasxfmt>): Remove.
+ (lasx_xvmuh_u_<lasxfmt>): Remove.
+ * config/loongarch/loongarch-builtins.cc (CODE_FOR_lsx_vmuh_b):
+ Redefine to standard pattern name.
+ (CODE_FOR_lsx_vmuh_h): Likewise.
+ (CODE_FOR_lsx_vmuh_w): Likewise.
+ (CODE_FOR_lsx_vmuh_d): Likewise.
+ (CODE_FOR_lsx_vmuh_bu): Likewise.
+ (CODE_FOR_lsx_vmuh_hu): Likewise.
+ (CODE_FOR_lsx_vmuh_wu): Likewise.
+ (CODE_FOR_lsx_vmuh_du): Likewise.
+ (CODE_FOR_lasx_xvmuh_b): Likewise.
+ (CODE_FOR_lasx_xvmuh_h): Likewise.
+ (CODE_FOR_lasx_xvmuh_w): Likewise.
+ (CODE_FOR_lasx_xvmuh_d): Likewise.
+ (CODE_FOR_lasx_xvmuh_bu): Likewise.
+ (CODE_FOR_lasx_xvmuh_hu): Likewise.
+ (CODE_FOR_lasx_xvmuh_wu): Likewise.
+ (CODE_FOR_lasx_xvmuh_du): Likewise.
+
+2023-11-29 Xi Ruoyao <xry111@xry111.site>
+
+ PR target/112578
+ * config/loongarch/lsx.md (UNSPEC_LSX_VFTINT_S,
+ UNSPEC_LSX_VFTINTRNE, UNSPEC_LSX_VFTINTRP,
+ UNSPEC_LSX_VFTINTRM, UNSPEC_LSX_VFRINTRNE_S,
+ UNSPEC_LSX_VFRINTRNE_D, UNSPEC_LSX_VFRINTRZ_S,
+ UNSPEC_LSX_VFRINTRZ_D, UNSPEC_LSX_VFRINTRP_S,
+ UNSPEC_LSX_VFRINTRP_D, UNSPEC_LSX_VFRINTRM_S,
+ UNSPEC_LSX_VFRINTRM_D): Remove.
+ (ILSX, FLSX): Move into ...
+ (VIMODE): Move into ...
+ (FRINT_S, FRINT_D): Remove.
+ (frint_pattern_s, frint_pattern_d, frint_suffix): Remove.
+ (lsx_vfrint_<flsxfmt>, lsx_vftint_s_<ilsxfmt>_<flsxfmt>,
+ lsx_vftintrne_w_s, lsx_vftintrne_l_d, lsx_vftintrp_w_s,
+ lsx_vftintrp_l_d, lsx_vftintrm_w_s, lsx_vftintrm_l_d,
+ lsx_vfrintrne_s, lsx_vfrintrne_d, lsx_vfrintrz_s,
+ lsx_vfrintrz_d, lsx_vfrintrp_s, lsx_vfrintrp_d,
+ lsx_vfrintrm_s, lsx_vfrintrm_d,
+ <FRINT_S:frint_pattern_s>v4sf2,
+ <FRINT_D:frint_pattern_d>v2df2, round<mode>2,
+ fix_trunc<mode>2): Remove.
+ * config/loongarch/lasx.md: Likewise.
+ * config/loongarch/simd.md: New file.
+ (ILSX, ILASX, FLSX, FLASX, VIMODE): ... here.
+ (IVEC, FVEC): New mode iterators.
+ (VIMODE): ... here. Extend it to work for all LSX/LASX vector
+ modes.
+ (x, wu, simd_isa, WVEC, vimode, simdfmt, simdifmt_for_f,
+ elebits): New mode attributes.
+ (UNSPEC_SIMD_FRINTRP, UNSPEC_SIMD_FRINTRZ, UNSPEC_SIMD_FRINT,
+ UNSPEC_SIMD_FRINTRM, UNSPEC_SIMD_FRINTRNE): New unspecs.
+ (SIMD_FRINT): New int iterator.
+ (simd_frint_rounding, simd_frint_pattern): New int attributes.
+ (<simd_isa>_<x>vfrint<simd_frint_rounding>_<simdfmt>): New
+ define_insn template for frint instructions.
+ (<simd_isa>_<x>vftint<simd_frint_rounding>_<simdifmt_for_f>_<simdfmt>):
+ Likewise, but for ftint instructions.
+ (<simd_frint_pattern><mode>2): New define_expand with
+ flag_fp_int_builtin_inexact checked.
+ (l<simd_frint_pattern><mode><vimode>2): Likewise.
+ (ftrunc<mode>2): New define_expand. It does not require
+ flag_fp_int_builtin_inexact.
+ (fix_trunc<mode><vimode>2): New define_insn_and_split. It does
+ not require flag_fp_int_builtin_inexact.
+ (include): Add lsx.md and lasx.md.
+ * config/loongarch/loongarch.md (include): Include simd.md,
+ instead of including lsx.md and lasx.md directly.
+ * config/loongarch/loongarch-builtins.cc
+ (CODE_FOR_lsx_vftint_w_s, CODE_FOR_lsx_vftint_l_d,
+ CODE_FOR_lasx_xvftint_w_s, CODE_FOR_lasx_xvftint_l_d):
+ Remove.
+
+2023-11-29 Alexandre Oliva <oliva@adacore.com>
+
+ * doc/extend.texi (hardbool): New type attribute.
+ * doc/invoke.texi (-ftrivial-auto-var-init): Document
+ representation vs values.
+
+2023-11-29 Alexandre Oliva <oliva@adacore.com>
+
+ * expr.cc (emit_block_move_hints): Take ctz of len. Obey
+ -finline-stringops. Use oriented or sized loop.
+ (emit_block_move): Take ctz of len, and pass it on.
+ (emit_block_move_via_sized_loop): New.
+ (emit_block_move_via_oriented_loop): New.
+ (emit_block_move_via_loop): Take incr. Move an incr-sized
+ block per iteration.
+ (emit_block_cmp_via_cmpmem): Take ctz of len. Obey
+ -finline-stringops.
+ (emit_block_cmp_via_loop): New.
+ * expr.h (emit_block_move): Add ctz of len defaulting to zero.
+ (emit_block_move_hints): Likewise.
+ (emit_block_cmp_hints): Likewise.
+ * builtins.cc (expand_builtin_memory_copy_args): Pass ctz of
+ len to emit_block_move_hints.
+ (try_store_by_multiple_pieces): Support starting with a loop.
+ (expand_builtin_memcmp): Pass ctz of len to
+ emit_block_cmp_hints.
+ (expand_builtin): Allow inline expansion of memset, memcpy,
+ memmove and memcmp if requested.
+ * common.opt (finline-stringops): New.
+ (ilsop_fn): New enum.
+ * flag-types.h (enum ilsop_fn): New.
+ * doc/invoke.texi (-finline-stringops): Add.
+
+2023-11-29 Pan Li <pan2.li@intel.com>
+
+ PR target/112743
+ * config/riscv/riscv-string.cc (expand_block_move): Add
+ precondition check for exact_div.
+
+2023-11-28 Roger Sayle <roger@nextmovesoftware.com>
+
+ * config/arc/arc.md: Make output template whitespace consistent.
+
+2023-11-28 Jose E. Marchesi <jose.marchesi@oracle.com>
+
+ * varasm.cc (assemble_external_libcall): Refer in assert only ifdef
+ ASM_OUTPUT_EXTERNAL.
+
+2023-11-28 Andrew Pinski <quic_apinski@quicinc.com>
+
+ PR tree-optimization/112738
+ * match.pd (`(nop_convert)-(convert)a`): Reject
+ when the outer type is boolean.
+
+2023-11-28 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/112732
+ * tree.cc (build_opaque_vector_type): Reset TYPE_ALIAS_SET
+ of the newly built type.
+
+2023-11-28 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/112494
+ * config/i386/i386.md (cmpstrnqi_1): Set FLAGS_REG to its previous
+ value when operand 2 equals zero.
+ (*cmpstrnqi_1): Ditto.
+ (*cmpstrnqi_1 peephole2): Ditto.
+
+2023-11-28 Cupertino Miranda <cupertino.miranda@oracle.com>
+
+ Revert:
+ 2023-11-28 Cupertino Miranda <cupertino.miranda@oracle.com>
+
+ * config/bpf/bpf.cc (bpf_output_call): Report error in case the
+ function call is for a builtin.
+ (bpf_external_libcall): Added target hook to detect and report
+ error when other external calls that are not builtins.
+
+2023-11-28 Jose E. Marchesi <jose.marchesi@oracle.com>
+
+ PR target/109253
+ * varasm.cc (pending_libcall_symbols): New variable.
+ (process_pending_assemble_externals): Process
+ pending_libcall_symbols.
+ (assemble_external_libcall): Defer emitting external libcall
+ symbols to process_pending_assemble_externals.
+
+2023-11-28 Cupertino Miranda <cupertino.miranda@oracle.com>
+
+ * btfout.cc (btf_calc_num_vbytes): Fixed logic for enum64.
+ (btf_asm_enum_const): Corrected logic for enum64 and smaller
+ than 4 bytes values.
+
+2023-11-28 Cupertino Miranda <cupertino.miranda@oracle.com>
+
+ * config/bpf/bpf.cc (bpf_output_call): Report error in case the
+ function call is for a builtin.
+ (bpf_external_libcall): Added target hook to detect and report
+ error when other external calls that are not builtins.
+
+2023-11-28 Cupertino Miranda <cupertino.miranda@oracle.com>
+
+ * config/bpf/bpf.cc (bpf_use_by_pieces_infrastructure_p): Added
+ function to bypass default behaviour.
+ * config/bpf/bpf.h (COMPARE_MAX_PIECES): Defined to 1024 bytes.
+
+2023-11-28 Cupertino Miranda <cupertino.miranda@oracle.com>
+
+ * config/bpf/core-builtins.cc (core_mark_as_access_index):
+ Corrected check.
+
+2023-11-28 Cupertino Miranda <cupertino.miranda@oracle.com>
+
+ * config/bpf/core-builtins.cc
+ (bpf_resolve_overloaded_core_builtin): Removed call.
+ (execute_lower_bpf_core): Added all to remove_parser_plugin.
+
+2023-11-28 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112694
+ * config/riscv/riscv-v.cc (expand_vec_perm_const): Disallow poly size (1, 1) VLA SLP.
+
+2023-11-28 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/112719
+ * match.pd (parity(X)^parity(Y) -> parity(X^Y)): Handle case of
+ mismatched types.
+ * gimple-match-exports.cc (build_call_internal): Add special-case for
+ bit query ifns on large/huge BITINT_TYPE before bitint lowering.
+
+2023-11-28 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/112719
+ * match.pd (popcount (X) + popcount (Y) -> POPCOUNT (X | Y)): Deal
+ with argument types with different precisions.
+
+2023-11-28 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/109077
+ * Makefile.in (PLUGIN_HEADERS): Add analyzer headers.
+ (install-plugin): Keep the directory structure for files in
+ "analyzer".
+
+2023-11-28 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112713
+ * config/riscv/riscv-vsetvl.cc (pre_vsetvl::compute_lcm_local_properties): Fix regression.
+
+2023-11-28 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic-show-locus.cc (layout::maybe_add_location_range):
+ Don't print annotation lines for ranges when there's no column
+ info.
+ (selftest::test_one_liner_no_column): New.
+ (selftest::test_diagnostic_show_locus_one_liner): Call it.
+
+2023-11-28 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic.cc (diagnostic_get_location_text): Convert to...
+ (diagnostic_context::get_location_text): ...this, and convert
+ return type from char * to label_text.
+ (diagnostic_build_prefix): Update for above change.
+ (default_diagnostic_start_span_fn): Likewise.
+ (selftest::assert_location_text): Likewise.
+ * diagnostic.h (diagnostic_context::get_location_text): New decl.
+
+2023-11-27 Andrew Pinski <quic_apinski@quicinc.com>
+
+ * config/aarch64/aarch64.cc (aarch64_if_then_else_costs):
+ Handle csinv/csinc case of 1/-1.
+
+2023-11-27 Prathamesh Kulkarni <prathamesh.kulkarni@linaro.org>
+ Richard Sandiford <richard.sandiford@arm.com>
+
+ PR middle-end/111754
+ * fold-const.cc (fold_vec_perm_cst): Set result's encoding to sel's
+ encoding, and set res_nelts_per_pattern to 2 if sel contains stepped
+ sequence but input vectors do not.
+ (test_nunits_min_2): New test Case 8.
+ (test_nunits_min_4): New tests Case 8 and Case 9.
+
+2023-11-27 Szabolcs Nagy <szabolcs.nagy@arm.com>
+
+ * config/aarch64/aarch64.cc (aarch64_needs_frame_chain): Do not
+ force frame chain for eh_return.
+
+2023-11-27 Szabolcs Nagy <szabolcs.nagy@arm.com>
+
+ * config/aarch64/aarch64-protos.h (aarch64_eh_return_handler_rtx):
+ Remove.
+ * config/aarch64/aarch64.cc (aarch64_return_address_signing_enabled):
+ Sign return address even in functions with eh_return.
+ (aarch64_expand_epilogue): Conditionally return with br or ret.
+ (aarch64_eh_return_handler_rtx): Remove.
+ * config/aarch64/aarch64.h (EH_RETURN_TAKEN_RTX): Define.
+ (EH_RETURN_STACKADJ_RTX): Change to R5.
+ (EH_RETURN_HANDLER_RTX): Change to R6.
+ * df-scan.cc: Handle EH_RETURN_TAKEN_RTX.
+ * doc/tm.texi: Regenerate.
+ * doc/tm.texi.in: Document EH_RETURN_TAKEN_RTX.
+ * except.cc (expand_eh_return): Handle EH_RETURN_TAKEN_RTX.
+
+2023-11-27 Thomas Schwinge <thomas@codesourcery.com>
+
+ * config.gcc <amdgcn-*-amdhsa> (extra_gcc_objs): Don't set.
+ * config/gcn/driver-gcn.cc: Remove.
+ * config/gcn/gcn-hsa.h (ASM_SPEC, EXTRA_SPEC_FUNCTIONS): Remove
+ 'last_arg' spec function.
+ * config/gcn/t-gcn-hsa (driver-gcn.o): Remove.
+
+2023-11-27 Thomas Schwinge <thomas@codesourcery.com>
+
+ PR target/112669
+ * config/gcn/gcn.opt (march=, mtune=): Tag as 'Negative' of
+ themselves.
+
+2023-11-27 Samuel Thibault <samuel.thibault@gnu.org>
+
+ * config/i386/gnu.h: Use PIE_SPEC, add static-pie case.
+ * config/i386/gnu64.h: Use PIE_SPEC, add static-pie case.
+
+2023-11-27 Samuel Thibault <samuel.thibault@gnu.org>
+
+ * config/i386/t-gnu64: New file.
+ * config.gcc [x86_64-*-gnu*]: Add i386/t-gnu64 to
+ tmake_file.
+
+2023-11-27 Richard Sandiford <richard.sandiford@arm.com>
+
+ PR target/106326
+ * config/aarch64/aarch64-sve-builtins.h (is_ptrue): Declare.
+ * config/aarch64/aarch64-sve-builtins.cc (is_ptrue): New function.
+ (gimple_folder::redirect_pred_x): Likewise.
+ (gimple_folder::fold): Use it.
+
+2023-11-27 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64-sve-builtins.h (vector_cst_all_same): Declare.
+ * config/aarch64/aarch64-sve-builtins.cc (vector_cst_all_same): New
+ function, a generalized replacement of...
+ * config/aarch64/aarch64-sve-builtins-base.cc
+ (svlast_impl::vect_all_same): ...this.
+ (svlast_impl::fold): Update accordingly.
+
+2023-11-27 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112653
+ * gimple-ssa.h (gimple_df): Add escaped_return solution.
+ * tree-ssa.cc (init_tree_ssa): Reset it.
+ (delete_tree_ssa): Likewise.
+ * tree-ssa-structalias.cc (escaped_return_id): New.
+ (find_func_aliases): Handle non-IPA return stmts by
+ adding to ESCAPED_RETURN.
+ (set_uids_in_ptset): Adjust HEAP escaping to also cover
+ escapes through return.
+ (init_base_vars): Initialize ESCAPED_RETURN.
+ (compute_points_to_sets): Replace ESCAPED post-processing
+ with recording the ESCAPED_RETURN solution.
+ * tree-ssa-alias.cc (ref_may_alias_global_p_1): Check
+ the ESCAPED_RETUNR solution.
+ (dump_alias_info): Dump it.
+ * cfgexpand.cc (update_alias_info_with_stack_vars): Update it.
+ * ipa-icf.cc (sem_item_optimizer::fixup_points_to_sets):
+ Likewise.
+ * tree-inline.cc (expand_call_inline): Reset it.
+ * tree-parloops.cc (parallelize_loops): Likewise.
+ * tree-sra.cc (maybe_add_sra_candidate): Check it.
+
+2023-11-27 Richard Biener <rguenther@suse.de>
+ Richard Sandiford <richard.sandiford@arm.com>
+
+ PR tree-optimization/112661
+ * tree-vect-slp.cc (vect_get_and_check_slp_defs): Defer duplicate-and-
+ interleave test to...
+ (vect_build_slp_tree_2): ...here, once we have all the operands.
+ Skip the test for uniform vectors.
+ (vect_create_constant_vectors): Detect uniform vectors. Avoid
+ redundant conversions in that case. Use gimple_build_vector_from_val
+ to build the vector.
+
+2023-11-27 Richard Sandiford <richard.sandiford@arm.com>
+
+ * attribs.cc (excl_hash_traits): Delete.
+ (test_attribute_exclusions): Use pair_hash and nofree_string_hash
+ instead.
+
+2023-11-27 Andrew Stubbs <ams@codesourcery.com>
+
+ * config/gcn/gcn.cc (gcn_vectorize_vec_perm_const): Disallow TImode.
+
+2023-11-27 Stefan Schulze Frielinghaus <stefansf@linux.ibm.com>
+
+ * config/s390/s390-builtin-types.def (BT_FN_UV8HI_UV8HI_UINT):
+ Add missing builtin type.
+
+2023-11-27 Stefan Schulze Frielinghaus <stefansf@linux.ibm.com>
+
+ * config/s390/s390-builtin-types.def: Remove types.
+ * config/s390/s390-builtins.def (O_U64): Remove 64-bit literal support.
+ Don't restrict s390_vec_rli and s390_verll[bhfg] to immediates.
+ * config/s390/s390.cc (s390_const_operand_ok): Remove 64-bit
+ literal support.
+
+2023-11-27 Alex Coplan <alex.coplan@arm.com>
+ Iain Sandoe <iain@sandoe.co.uk>
+
+ PR c++/60512
+ * doc/cpp.texi: Document __has_{feature,extension}.
+
+2023-11-27 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112706
+ * match.pd (ptr + o ==/!=/- ptr + o'): New patterns.
+
+2023-11-27 Stefan Schulze Frielinghaus <stefansf@linux.ibm.com>
+
+ * config/s390/s390-builtin-types.def: Add/remove types.
+ * config/s390/s390-builtins.def
+ (s390_vclfnhs,s390_vclfnls,s390_vcrnfs,s390_vcfn,s390_vcnf):
+ Replace type V8HI with UV8HI.
+
+2023-11-27 Stefan Schulze Frielinghaus <stefansf@linux.ibm.com>
+
+ * config/s390/s390-builtins.def
+ (s390_vcefb,s390_vcdgb,s390_vcelfb,s390_vcdlgb,s390_vcfeb,s390_vcgdb,
+ s390_vclfeb,s390_vclgdb): Remove flags for non-existing operands
+ 2 and 3.
+
+2023-11-27 Stefan Schulze Frielinghaus <stefansf@linux.ibm.com>
+
+ * config/s390/s390.md (*cmphi_ccu): For immediate operand 1 make
+ use of constraint n instead of D and chop of high bits in the
+ output template.
+
+2023-11-27 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112300
+ * config.gcc (mips*-sde-elf*): Append to tm_defines rather than
+ overwriting them.
+
+2023-11-27 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * config/riscv/autovec.md
+ (mask_len_gather_load<RATIO1:mode><RATIO1:mode>):
+ Remove gather_scatter_valid_offset_mode_p.
+ (mask_len_gather_load<mode><mode>): Ditto.
+ (mask_len_scatter_store<RATIO1:mode><RATIO1:mode>): Ditto.
+ (mask_len_scatter_store<mode><mode>): Ditto.
+ * config/riscv/predicates.md (const_1_or_8_operand): New predicate.
+ (vector_gs_scale_operand_64): Remove.
+ * config/riscv/riscv-protos.h (gather_scatter_valid_offset_mode_p): Remove.
+ * config/riscv/riscv-v.cc (expand_gather_scatter): Refine code.
+ (gather_scatter_valid_offset_mode_p): Remove.
+ * config/riscv/vector-iterators.md: Fix iterator bugs.
+
+2023-11-27 Tsukasa OI <research_trasio@irq.a4lg.com>
+
+ * common/config/riscv/riscv-common.cc
+ (riscv_ext_version_table): Set version to ratified 2.0.
+ (riscv_subset_list::parse_std_ext): Allow RV64E.
+ * config.gcc: Parse base ISA 'rv64e' and ABI 'lp64e'.
+ * config/riscv/arch-canonicalize: Parse base ISA 'rv64e'.
+ * config/riscv/riscv-c.cc (riscv_cpu_cpp_builtins):
+ Define different macro per XLEN. Add handling for ABI_LP64E.
+ * config/riscv/riscv-d.cc (riscv_d_handle_target_float_abi):
+ Add handling for ABI_LP64E.
+ * config/riscv/riscv-opts.h (enum riscv_abi_type): Add ABI_LP64E.
+ * config/riscv/riscv.cc (riscv_option_override): Enhance error
+ handling to support RV64E and LP64E.
+ (riscv_conditional_register_usage): Change "RV32E" in a comment
+ to "RV32E/RV64E".
+ * config/riscv/riscv.h
+ (UNITS_PER_FP_ARG): Add handling for ABI_LP64E.
+ (STACK_BOUNDARY): Ditto.
+ (ABI_STACK_BOUNDARY): Ditto.
+ (MAX_ARGS_IN_REGISTERS): Ditto.
+ (ABI_SPEC): Add support for "lp64e".
+ * config/riscv/riscv.opt: Parse -mabi=lp64e as ABI_LP64E.
+ * doc/invoke.texi: Add documentation of the LP64E ABI.
+
+2023-11-27 Jose E. Marchesi <jose.marchesi@oracle.com>
+
+ * config/bpf/bpf-helpers.h: Remove.
+ * config.gcc: Adapt accordingly.
+
+2023-11-27 Guo Jie <guojie@loongson.cn>
+
+ * config/loongarch/loongarch.cc (loongarch_split_plus_constant):
+ avoid left shift of negative value -0x8000.
+
+2023-11-27 Guo Jie <guojie@loongson.cn>
+
+ * config/loongarch/loongarch.cc
+ (enum loongarch_load_imm_method): Add new method.
+ (loongarch_build_integer): Add relevant implementations for
+ new method.
+ (loongarch_move_integer): Ditto.
+
2023-11-26 Alexander Monakov <amonakov@ispras.ru>
* sort.cc: Use 'sorting networks' in comments.
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 2d1ad39..76ff872 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20231127
+20231206
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 753f2f3..68410a8 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1557,6 +1557,7 @@ OBJS = \
ipa-reference.o \
ipa-ref.o \
ipa-utils.o \
+ ipa-strub.o \
ipa.o \
ira.o \
ira-build.o \
@@ -2816,7 +2817,8 @@ GTFILES = $(CPPLIB_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
$(srcdir)/symtab-thunks.h $(srcdir)/symtab-thunks.cc \
$(srcdir)/symtab-clones.h \
$(srcdir)/reload.h $(srcdir)/caller-save.cc $(srcdir)/symtab.cc \
- $(srcdir)/alias.cc $(srcdir)/bitmap.cc $(srcdir)/cselib.cc $(srcdir)/cgraph.cc \
+ $(srcdir)/alias.cc $(srcdir)/attribs.cc \
+ $(srcdir)/bitmap.cc $(srcdir)/cselib.cc $(srcdir)/cgraph.cc \
$(srcdir)/ipa-prop.cc $(srcdir)/ipa-cp.cc $(srcdir)/ipa-utils.h \
$(srcdir)/ipa-param-manipulation.h $(srcdir)/ipa-sra.cc \
$(srcdir)/ipa-modref.h $(srcdir)/ipa-modref.cc \
@@ -2878,6 +2880,7 @@ GTFILES = $(CPPLIB_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
$(srcdir)/sanopt.cc \
$(srcdir)/sancov.cc \
$(srcdir)/ipa-devirt.cc \
+ $(srcdir)/ipa-strub.cc \
$(srcdir)/internal-fn.h \
$(srcdir)/calls.cc \
$(srcdir)/omp-general.h \
@@ -3821,7 +3824,7 @@ PLUGIN_HEADERS = $(TREE_H) $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
hash-set.h dominance.h cfg.h cfgrtl.h cfganal.h cfgbuild.h cfgcleanup.h \
lcm.h cfgloopmanip.h file-prefix-map.h builtins.def $(INSN_ATTR_H) \
pass-instances.def params.list $(srcdir)/../include/gomp-constants.h \
- $(EXPR_H)
+ $(EXPR_H) $(srcdir)/analyzer/*.h
# generate the 'build fragment' b-header-vars
s-header-vars: Makefile
@@ -3844,8 +3847,9 @@ install-gengtype: installdirs gengtype$(exeext) gtype.state
# Install the headers needed to build a plugin.
install-plugin: installdirs lang.install-plugin s-header-vars install-gengtype
-# We keep the directory structure for files in config, common/config or
-# c-family and .def files. All other files are flattened to a single directory.
+# We keep the directory structure for files in analyzer, config, common/config
+# or c-family and .def files.
+# All other files are flattened to a single directory.
$(mkinstalldirs) $(DESTDIR)$(plugin_includedir)
headers=`echo $(sort $(PLUGIN_HEADERS)) $$(cd $(srcdir); echo *.h *.def) | tr ' ' '\012' | sort -u`; \
srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`; \
@@ -3857,7 +3861,8 @@ install-plugin: installdirs lang.install-plugin s-header-vars install-gengtype
else continue; \
fi; \
case $$path in \
- "$(srcdir)"/config/* | "$(srcdir)"/common/config/* \
+ "$(srcdir)"/analyzer/* \
+ | "$(srcdir)"/config/* | "$(srcdir)"/common/config/* \
| "$(srcdir)"/c-family/* | "$(srcdir)"/*.def ) \
base=`echo "$$path" | sed -e "s|$$srcdirstrip/||"`;; \
*) base=`basename $$path` ;; \
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index 6fb7510..48a5bb7 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,284 @@
+2023-12-06 Alexandre Oliva <oliva@adacore.com>
+
+ * gcc-interface/trans.cc: Include ipa-strub.h.
+ (gigi): Make internal decls for targets of compiler-generated
+ calls strub-callable too.
+ (build_raise_check): Likewise.
+ * gcc-interface/utils.cc: Include ipa-strub.h.
+ (handle_strub_attribute): New.
+ (gnat_internal_attribute_table): Add strub.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc-interface/utils.cc (gnat_internal_attribute_table): Add extra
+ braces to work around PR 16333 in older compilers.
+
+2023-12-05 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * adaint.c: Include <signal.h>.
+ * expect.c: Include <string.h>.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc-interface/gigi.h (gnat_internal_attribute_table): Change
+ type to scoped_attribute_specs.
+ * gcc-interface/utils.cc (gnat_internal_attribute_table): Likewise,
+ using...
+ (gnat_internal_attributes): ...this as the underlying array.
+ * gcc-interface/misc.cc (gnat_attribute_table): New global.
+ (LANG_HOOKS_ATTRIBUTE_TABLE): Use it.
+
+2023-12-01 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * adaint.c [__APPLE__]: Include <signal.h>, <sys/time.h>.
+ * terminals.c [!_WIN32]: Include <signal.h>.
+ [__APPLE__]: Include <util.h>.
+ Fix typos.
+
+2023-11-30 Eric Botcazou <ebotcazou@adacore.com>
+
+ * checks.ads (Apply_Predicate_Check): Add Deref boolean parameter.
+ * checks.adb (Apply_Predicate_Check): Revert latest change. Use
+ Loc local variable to hold the source location. Use a common code
+ path for the generic processing and make a dereference if Deref is
+ True.
+ * exp_ch4.adb (Expand_Allocator_Expression): Compute Aggr_In_Place
+ earlier. If it is true, do not call Apply_Predicate_Check on the
+ expression on entry but on the temporary on exit with a
+ dereference.
+ * sem_res.adb (Resolve_Actuals): Add explicit parameter
+ association in call to Apply_Predicate_Check.
+
+2023-11-30 Steve Baird <baird@adacore.com>
+
+ * exp_put_image.adb (Put_Image_Enabled): Return True in more
+ cases. In particular, when testing to see if a type occurs in a
+ predefined unit, test the type's code unit
+ (obtained by calling Get_Code_Unit). In the case of type within a
+ user-defined instance of a predefined generic, Is_Predefined_Unit
+ will return True for the type and False for the type's code unit.
+
+2023-11-30 Yannick Moy <moy@adacore.com>
+
+ * contracts.adb (Analyze_Entry_Or_Subprogram_Body_Contract):
+ Remove checking on volatility. Remove handling of SPARK_Mode, not
+ needed anymore.
+ (Analyze_Entry_Or_Subprogram_Contract): Remove checking on
+ volatility.
+ (Check_Type_Or_Object_External_Properties): Same.
+ (Analyze_Object_Contract): Same.
+ * freeze.adb (Freeze_Record_Type): Same. Also remove checking on
+ synchronized types and ghost types.
+ * sem_ch12.adb (Instantiate_Object): Remove checking on
+ volatility.
+ (Instantiate_Type): Same.
+ * sem_ch3.adb (Access_Type_Declaration): Same.
+ (Derived_Type_Declaration): Remove checking related to untagged
+ partial view.
+ (Process_Discriminants): Remove checking on volatility.
+ * sem_ch5.adb (Analyze_Loop_Parameter_Specification): Same.
+ * sem_ch6.adb (Analyze_Procedure_Call): Fix use of SPARK_Mode
+ where GNATprove_Mode was intended.
+ * sem_disp.adb (Inherited_Subprograms): Protect against Empty
+ node.
+ * sem_prag.adb (Analyze_Global_In_Decl_Part): Remove checking on
+ volatility.
+ (Analyze_Pragma): Same.
+ * sem_res.adb (Flag_Effectively_Volatile_Objects): Remove.
+ (Resolve_Actuals): Remove checking on volatility.
+ (Resolve_Entity_Name): Same.
+ * sem_util.adb (Check_Nonvolatile_Function_Profile): Remove.
+ (Check_Volatility_Compatibility): Remove.
+ * sem_util.ads: Same.
+
+2023-11-30 Sheri Bernstein <bernstein@adacore.com>
+
+ * libgnat/i-cstrin.adb (Free): Rewrite code so there is only one
+ return, to remove Improper_Returns violation.
+ (Position_Of_Nul): Add pragma to exempt Improper_Returns
+ violation.
+ (To_Chars_Ptr): Likewise.
+ (Value): Likewise
+
+2023-11-30 Viljar Indus <indus@adacore.com>
+
+ * sem_prag.adb (Validate_Compile_Time_Warning_Errors): Avoid
+ checking compile time warnings and errors if backend has not been
+ activated.
+
+2023-11-30 Yannick Moy <moy@adacore.com>
+
+ * checks.adb, exp_aggr.adb, exp_ch4.ads, exp_ch5.adb,
+ exp_util.adb, exp_util.ads, inline.adb, sem_ch13.adb,
+ sem_ch6.adb, sem_ch8.adb, sem_prag.adb, sem_util.ads: Fix comments
+ and typos.
+
+2023-11-30 Javier Miranda <miranda@adacore.com>
+
+ * freeze.adb (Declared_In_Expanded_Body): New subprogram.
+ (In_Expanded_Body): Minor code cleanup.
+ (Freeze_Expression): Code cleanup plus factorize in a new function
+ the code that identifies entities declared in the body of expander
+ generated subprograms, since such case must be checked also for
+ other node kinds when climbing the tree to locate the place to
+ insert the freezing node.
+
+2023-11-30 Steve Baird <baird@adacore.com>
+
+ * sem_ch8.adb (Find_Direct_Name): In the case of a resolving a
+ name that occurs within an instantiation, add code to detect and
+ filter out unwanted candidate resolutions. The filtering is
+ performed via a call to Remove_Interp.
+
+2023-11-30 Steve Baird <baird@adacore.com>
+
+ * libgnarl/a-rttiev.ads: add a comment
+
+2023-11-30 Steve Baird <baird@adacore.com>
+
+ * sem_ch12.adb (Validate_Discriminated_Formal_Type): Replace
+ Entity_Id equality test with a call to Subtypes_Match. Distinct
+ subtypes which are statically matching should pass this test.
+ (Check_Discriminated_Formal): Replace Entity_Id equality test with
+ a call to Subtypes_Statically_Match (preceded by a check that the
+ preconditions for the call are satisfied).
+
+2023-11-30 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_ch4.adb (Expand_Allocator_Expression): Add Special_Return
+ boolean constant to hold the value of For_Special_Return_Object
+ for the allocator and use it throughout the procedure.
+
+2023-11-30 Eric Botcazou <ebotcazou@adacore.com>
+
+ * checks.adb (Apply_Predicate_Check): Also deal specifically with
+ an expression that is a qualified aggregate in an allocator.
+
+2023-11-30 Steve Baird <baird@adacore.com>
+
+ * sem_ch4.adb (Constant_Indexing_OK): As a temporary stopgap,
+ return False in the case of an unanalyzed prefixed-view call.
+
+2023-11-28 Simon Wright <simon@pushface.org>
+
+ PR ada/111909
+ * adaint.c
+ (__gnat_get_file_names_case_sensitive): Split out the __APPLE__
+ check and remove the checks for __arm__, __arm64__. For Apple,
+ file names are by default case-insensitive unless TARGET_OS_IOS is
+ set.
+
+2023-11-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/decl.cc (annotate_value): Apply the same processing
+ for parameters as for variables.
+
+2023-11-28 Marc Poulhiès <poulhies@adacore.com>
+
+ * gcc-interface/utils2.cc (build_simple_component_ref): Add
+ comment on assertion.
+
+2023-11-28 Steve Baird <baird@adacore.com>
+
+ * exp_attr.adb (Expand_N_Attribute_Reference): In the case of a
+ Reduce attribute reference, fix bugs in initializing Accum_Typ.
+ The previous version was incorrect in the case where E1 refers to
+ the first of multiple possible overload resolution candidates and
+ that candidate does not turn out to be the right one. The previous
+ version also had code to compute Accum_Typ via a different method
+ if the initial computation turned out to yield a universal numeric
+ type. Delete that initial computation and use the second method in
+ all cases.
+
+2023-11-28 Gary Dismukes <dismukes@adacore.com>
+
+ * sem_aggr.adb (Add_Discriminant_Values): Remove this procedure.
+ (Propagate_Discriminants): Remove this procedure.
+ (Resolve_Record_Aggregate): Remove code (the Capture_Discriminants
+ block statement) related to propagating discriminants and
+ generating initializations for subcomponents of a
+ discriminant-dependent box-defaulted subcomponent of a nonprivate
+ record type with discriminants, and handle all top-level
+ components that have a non-null base init proc directly, by
+ calling Add_Association with "Is_Box_Present => True". Also,
+ combine that elsif clause with the immediately preceding elsif
+ clause, since they now both contain the same statement (calls to
+ Add_Association with the same actuals).
+
+2023-11-28 Bob Duff <duff@adacore.com>
+
+ * sem_util.adb (Check_Result_And_Post_State): Disable this when
+ we're in an instance. Misc cleanup.
+
+2023-11-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_ch7.ads (Expand_Cleanup_Actions): Move declaration to the
+ Finalization Management section.
+ * exp_ch7.adb (Transient Scope Management): Move description down to
+ after that of the general finalization and make a few changes.
+ (Insert_Actions_In_Scope_Around): Call Process_Transients_In_Scope
+ only if cleanups are being handled.
+ (Process_Transients_In_Scope): Remove redundant test on Clean.
+ * exp_util.ads (Within_Case_Or_If_Expression): Adjust description.
+ * exp_util.adb (Within_Case_Or_If_Expression): Only return true if
+ within the dependent expressions of the conditional expressions.
+
+2023-11-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ * doc/gnat_rm/the_implementation_of_standard_i_o.rst: Fix a couple
+ occurrences of incorrect quoting.
+ * gnat_rm.texi: Regenerate.
+
+2023-11-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_ch6.adb (Build_Flag_For_Function): New function made up of the
+ code building the special flag for return object present...
+ (Expand_N_Extended_Return_Statement): ...in there. Replace the code
+ with a call to Build_Flag_For_Function. Add assertion for the flag.
+ (Expand_Non_Function_Return): For a nested return, if the return
+ object needs finalization actions, update the special flag.
+
+2023-11-28 Sebastian Poeplau <poeplau@adacore.com>
+
+ * einfo-utils.ads, einfo-utils.adb (Is_Address_Compatible_Type):
+ New function.
+
+2023-11-28 Gary Dismukes <dismukes@adacore.com>
+
+ * exp_aggr.adb (Expand_Container_Aggregate): Apply a conversion to the
+ size temp object passed as the second actual parameter on the call to
+ the New_Indexed_Subp function, to convert it to the index type of the
+ container type (taken from the first formal parameter of the function).
+
+2023-11-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ * sem_attr.adb (Eval_Attribute): Do not proceed in a spec expression
+ for nonstatic representation attributes of a scalar subtype when the
+ subtype is not frozen.
+ * sem_ch3.adb (Analyze_Object_Declaration): Do not freeze the type
+ of the object in a spec expression.
+
+2023-11-28 Richard Kenner <kenner@adacore.com>
+
+ * exp_unst.adb (Note_Uplevel_Bound): Treat
+ N_Unchecked_Type_Conversion like N_Type_Conversion.
+
+2023-11-28 Yannick Moy <moy@adacore.com>
+
+ * libgnat/s-imgboo.adb: Remove with_clause now in spec file.
+ * libgnat/s-imgboo.ads: Remove dependency on System.Val_Bool.
+ (Image_Boolean): Replace call to Value_Boolean by passing value V
+ to updated ghost function Is_Boolean_Image_Ghost.
+ * libgnat/s-valboo.ads (Is_Boolean_Image_Ghost): Move to other
+ unit.
+ (Value_Boolean.): Update precondition.
+ * libgnat/s-valspe.ads (Is_Boolean_Image_Ghost): Move here. Add
+ new parameter for expected boolean value.
+
+2023-11-28 Tucker Taft <taft@adacore.com>
+
+ * sem_scil.adb: Handle discriminant specification.
+
2023-11-21 Eric Botcazou <ebotcazou@adacore.com>
* gcc-interface/trans.cc (Loop_Statement_to_gnu): Always use the
diff --git a/gcc/ada/adaint.c b/gcc/ada/adaint.c
index 4ab9565..61dc368 100644
--- a/gcc/ada/adaint.c
+++ b/gcc/ada/adaint.c
@@ -85,6 +85,9 @@
#if defined (__APPLE__)
#include <unistd.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <TargetConditionals.h>
#endif
#if defined (__hpux__)
@@ -240,6 +243,7 @@ UINT __gnat_current_ccs_encoding;
#define DIR_SEPARATOR '\\'
#else
+#include <signal.h>
#include <utime.h>
#endif
@@ -613,11 +617,18 @@ __gnat_get_file_names_case_sensitive (void)
else
{
/* By default, we suppose filesystems aren't case sensitive on
- Windows and Darwin (but they are on arm-darwin). */
-#if defined (WINNT) || defined (__DJGPP__) \
- || (defined (__APPLE__) && !(defined (__arm__) || defined (__arm64__)))
+ Windows or DOS. */
+#if defined (WINNT) || defined (__DJGPP__)
file_names_case_sensitive_cache = 0;
+#elif defined (__APPLE__)
+ /* By default, macOS volumes are case-insensitive, iOS
+ volumes are case-sensitive. */
+#if TARGET_OS_IOS
+ file_names_case_sensitive_cache = 1;
#else
+ file_names_case_sensitive_cache = 0;
+#endif
+#else /* Neither Windows nor Apple. */
file_names_case_sensitive_cache = 1;
#endif
}
diff --git a/gcc/ada/checks.adb b/gcc/ada/checks.adb
index 6525982..d59d44f 100644
--- a/gcc/ada/checks.adb
+++ b/gcc/ada/checks.adb
@@ -1664,7 +1664,7 @@ package body Checks is
end if;
-- If the expressions for the discriminants are identical
- -- and it is side-effect free (for now just an entity),
+ -- and it is side-effect-free (for now just an entity),
-- this may be a shared constraint, e.g. from a subtype
-- without a constraint introduced as a generic actual.
-- Examine other discriminants if any.
@@ -2720,15 +2720,20 @@ package body Checks is
---------------------------
procedure Apply_Predicate_Check
- (N : Node_Id;
- Typ : Entity_Id;
- Fun : Entity_Id := Empty)
+ (N : Node_Id;
+ Typ : Entity_Id;
+ Deref : Boolean := False;
+ Fun : Entity_Id := Empty)
is
- Par : Node_Id;
- S : Entity_Id;
+ Loc : constant Source_Ptr := Sloc (N);
+ Check_Disabled : constant Boolean :=
+ not Predicate_Enabled (Typ)
+ or else not Predicate_Check_In_Scope (N);
+
+ Expr : Node_Id;
+ Par : Node_Id;
+ S : Entity_Id;
- Check_Disabled : constant Boolean := not Predicate_Enabled (Typ)
- or else not Predicate_Check_In_Scope (N);
begin
S := Current_Scope;
while Present (S) and then not Is_Subprogram (S) loop
@@ -2757,7 +2762,7 @@ package body Checks is
if not Check_Disabled then
Insert_Action (N,
- Make_Raise_Storage_Error (Sloc (N),
+ Make_Raise_Storage_Error (Loc,
Reason => SE_Infinite_Recursion));
return;
end if;
@@ -2824,19 +2829,9 @@ package body Checks is
Par := Parent (Par);
end if;
- -- For an entity of the type, generate a call to the predicate
- -- function, unless its type is an actual subtype, which is not
- -- visible outside of the enclosing subprogram.
-
- if Is_Entity_Name (N)
- and then not Is_Actual_Subtype (Typ)
- then
- Insert_Action (N,
- Make_Predicate_Check
- (Typ, New_Occurrence_Of (Entity (N), Sloc (N))));
- return;
+ -- Try to avoid creating a temporary if the expression is an aggregate
- elsif Nkind (N) in N_Aggregate | N_Extension_Aggregate then
+ if Nkind (N) in N_Aggregate | N_Extension_Aggregate then
-- If the expression is an aggregate in an assignment, apply the
-- check to the LHS after the assignment, rather than create a
@@ -2871,21 +2866,36 @@ package body Checks is
then
Insert_Action_After (Par,
Make_Predicate_Check (Typ,
- New_Occurrence_Of (Defining_Identifier (Par), Sloc (N))));
+ New_Occurrence_Of (Defining_Identifier (Par), Loc)));
return;
end if;
end if;
end if;
- -- If the expression is not an entity it may have side effects,
- -- and the following call will create an object declaration for
- -- it. We disable checks during its analysis, to prevent an
- -- infinite recursion.
+ -- For an entity of the type, generate a call to the predicate
+ -- function, unless its type is an actual subtype, which is not
+ -- visible outside of the enclosing subprogram.
- Insert_Action (N,
- Make_Predicate_Check
- (Typ, Duplicate_Subexpr (N)), Suppress => All_Checks);
+ if Is_Entity_Name (N) and then not Is_Actual_Subtype (Typ) then
+ Expr := New_Occurrence_Of (Entity (N), Loc);
+
+ -- If the expression is not an entity, it may have side effects
+
+ else
+ Expr := Duplicate_Subexpr (N);
+ end if;
+
+ -- Make the dereference if requested
+
+ if Deref then
+ Expr := Make_Explicit_Dereference (Loc, Prefix => Expr);
+ end if;
+
+ -- Disable checks to prevent an infinite recursion
+
+ Insert_Action
+ (N, Make_Predicate_Check (Typ, Expr), Suppress => All_Checks);
end Apply_Predicate_Check;
-----------------------
diff --git a/gcc/ada/checks.ads b/gcc/ada/checks.ads
index 64f0809..8fd3802 100644
--- a/gcc/ada/checks.ads
+++ b/gcc/ada/checks.ads
@@ -256,13 +256,14 @@ package Checks is
-- results.
procedure Apply_Predicate_Check
- (N : Node_Id;
- Typ : Entity_Id;
- Fun : Entity_Id := Empty);
+ (N : Node_Id;
+ Typ : Entity_Id;
+ Deref : Boolean := False;
+ Fun : Entity_Id := Empty);
-- N is an expression to which a predicate check may need to be applied for
- -- Typ, if Typ has a predicate function. When N is an actual in a call, Fun
- -- is the function being called, which is used to generate a better warning
- -- if the call leads to an infinite recursion.
+ -- Typ if Typ has a predicate function, after dereference if Deref is True.
+ -- When N is an actual in a call, Fun is the function being called, which
+ -- is used to generate a warning if the call leads to infinite recursion.
procedure Apply_Type_Conversion_Checks (N : Node_Id);
-- N is an N_Type_Conversion node. A type conversion actually involves
diff --git a/gcc/ada/contracts.adb b/gcc/ada/contracts.adb
index b6e756f..fa0d59a 100644
--- a/gcc/ada/contracts.adb
+++ b/gcc/ada/contracts.adb
@@ -590,10 +590,6 @@ package body Contracts is
Items : constant Node_Id := Contract (Body_Id);
Spec_Id : constant Entity_Id := Unique_Defining_Entity (Body_Decl);
- Saved_SM : constant SPARK_Mode_Type := SPARK_Mode;
- Saved_SMP : constant Node_Id := SPARK_Mode_Pragma;
- -- Save the SPARK_Mode-related data to restore on exit
-
begin
-- When a subprogram body declaration is illegal, its defining entity is
-- left unanalyzed. There is nothing left to do in this case because the
@@ -628,39 +624,11 @@ package body Contracts is
Analyze_Entry_Or_Subprogram_Contract (Corresponding_Spec (Body_Decl));
end if;
- -- Due to the timing of contract analysis, delayed pragmas may be
- -- subject to the wrong SPARK_Mode, usually that of the enclosing
- -- context. To remedy this, restore the original SPARK_Mode of the
- -- related subprogram body.
-
- Set_SPARK_Mode (Body_Id);
-
-- Ensure that the contract cases or postconditions mention 'Result or
-- define a post-state.
Check_Result_And_Post_State (Body_Id);
- -- A stand-alone nonvolatile function body cannot have an effectively
- -- volatile formal parameter or return type (SPARK RM 7.1.3(9)). This
- -- check is relevant only when SPARK_Mode is on, as it is not a standard
- -- legality rule. The check is performed here because Volatile_Function
- -- is processed after the analysis of the related subprogram body. The
- -- check only applies to source subprograms and not to generated TSS
- -- subprograms.
-
- if SPARK_Mode = On
- and then Ekind (Body_Id) in E_Function | E_Generic_Function
- and then Comes_From_Source (Spec_Id)
- and then not Is_Volatile_Function (Body_Id)
- then
- Check_Nonvolatile_Function_Profile (Body_Id);
- end if;
-
- -- Restore the SPARK_Mode of the enclosing context after all delayed
- -- pragmas have been analyzed.
-
- Restore_SPARK_Mode (Saved_SM, Saved_SMP);
-
-- Capture all global references in a generic subprogram body now that
-- the contract has been analyzed.
@@ -865,20 +833,6 @@ package body Contracts is
Check_Result_And_Post_State (Subp_Id);
end if;
- -- A nonvolatile function cannot have an effectively volatile formal
- -- parameter or return type (SPARK RM 7.1.3(9)). This check is relevant
- -- only when SPARK_Mode is on, as it is not a standard legality rule.
- -- The check is performed here because pragma Volatile_Function is
- -- processed after the analysis of the related subprogram declaration.
-
- if SPARK_Mode = On
- and then Ekind (Subp_Id) in E_Function | E_Generic_Function
- and then Comes_From_Source (Subp_Id)
- and then not Is_Volatile_Function (Subp_Id)
- then
- Check_Nonvolatile_Function_Profile (Subp_Id);
- end if;
-
-- Restore the SPARK_Mode of the enclosing context after all delayed
-- pragmas have been analyzed.
@@ -902,19 +856,16 @@ package body Contracts is
(Type_Or_Obj_Id : Entity_Id)
is
Is_Type_Id : constant Boolean := Is_Type (Type_Or_Obj_Id);
- Decl_Kind : constant String :=
- (if Is_Type_Id then "type" else "object");
-- Local variables
- AR_Val : Boolean := False;
- AW_Val : Boolean := False;
- ER_Val : Boolean := False;
- EW_Val : Boolean := False;
- NC_Val : Boolean;
- Seen : Boolean := False;
- Prag : Node_Id;
- Obj_Typ : Entity_Id;
+ AR_Val : Boolean := False;
+ AW_Val : Boolean := False;
+ ER_Val : Boolean := False;
+ EW_Val : Boolean := False;
+ NC_Val : Boolean;
+ Seen : Boolean := False;
+ Prag : Node_Id;
-- Start of processing for Check_Type_Or_Object_External_Properties
@@ -922,8 +873,6 @@ package body Contracts is
-- Analyze all external properties
if Is_Type_Id then
- Obj_Typ := Type_Or_Obj_Id;
-
-- If the parent type of a derived type is volatile
-- then the derived type inherits volatility-related flags.
@@ -940,8 +889,6 @@ package body Contracts is
end if;
end;
end if;
- else
- Obj_Typ := Etype (Type_Or_Obj_Id);
end if;
Prag := Get_Pragma (Type_Or_Obj_Id, Pragma_Async_Readers);
@@ -1027,96 +974,6 @@ package body Contracts is
if Present (Prag) then
Analyze_External_Property_In_Decl_Part (Prag, NC_Val);
end if;
-
- -- The following checks are relevant only when SPARK_Mode is on, as
- -- they are not standard Ada legality rules. Internally generated
- -- temporaries are ignored, as well as return objects.
-
- if SPARK_Mode = On
- and then Comes_From_Source (Type_Or_Obj_Id)
- and then not Is_Return_Object (Type_Or_Obj_Id)
- then
- if Is_Effectively_Volatile (Type_Or_Obj_Id) then
-
- -- The declaration of an effectively volatile object or type must
- -- appear at the library level (SPARK RM 7.1.3(3), C.6(6)).
-
- if not Is_Library_Level_Entity (Type_Or_Obj_Id) then
- Error_Msg_Code := GEC_Volatile_At_Library_Level;
- Error_Msg_N
- ("effectively volatile "
- & Decl_Kind
- & " & must be declared at library level '[[]']",
- Type_Or_Obj_Id);
-
- -- An object of a discriminated type cannot be effectively
- -- volatile except for protected objects (SPARK RM 7.1.3(5)).
-
- elsif Has_Discriminants (Obj_Typ)
- and then not Is_Protected_Type (Obj_Typ)
- then
- Error_Msg_N
- ("discriminated " & Decl_Kind & " & cannot be volatile",
- Type_Or_Obj_Id);
- end if;
-
- -- An object decl shall be compatible with respect to volatility
- -- with its type (SPARK RM 7.1.3(2)).
-
- if not Is_Type_Id then
- if Is_Effectively_Volatile (Obj_Typ) then
- Check_Volatility_Compatibility
- (Type_Or_Obj_Id, Obj_Typ,
- "volatile object", "its type",
- Srcpos_Bearer => Type_Or_Obj_Id);
- end if;
-
- -- A component of a composite type (in this case, the composite
- -- type is an array type) shall be compatible with respect to
- -- volatility with the composite type (SPARK RM 7.1.3(6)).
-
- elsif Is_Array_Type (Obj_Typ) then
- Check_Volatility_Compatibility
- (Component_Type (Obj_Typ), Obj_Typ,
- "component type", "its enclosing array type",
- Srcpos_Bearer => Obj_Typ);
-
- -- A component of a composite type (in this case, the composite
- -- type is a record type) shall be compatible with respect to
- -- volatility with the composite type (SPARK RM 7.1.3(6)).
-
- elsif Is_Record_Type (Obj_Typ) then
- declare
- Comp : Entity_Id := First_Component (Obj_Typ);
- begin
- while Present (Comp) loop
- Check_Volatility_Compatibility
- (Etype (Comp), Obj_Typ,
- "record component " & Get_Name_String (Chars (Comp)),
- "its enclosing record type",
- Srcpos_Bearer => Comp);
- Next_Component (Comp);
- end loop;
- end;
- end if;
-
- -- The type or object is not effectively volatile
-
- else
- -- A non-effectively volatile type cannot have effectively
- -- volatile components (SPARK RM 7.1.3(6)).
-
- if Is_Type_Id
- and then not Is_Effectively_Volatile (Type_Or_Obj_Id)
- and then Has_Effectively_Volatile_Component (Type_Or_Obj_Id)
- then
- Error_Msg_N
- ("non-volatile type & cannot have effectively volatile"
- & " components",
- Type_Or_Obj_Id);
- end if;
- end if;
- end if;
end Check_Type_Or_Object_External_Properties;
-----------------------------
@@ -1263,12 +1120,6 @@ package body Contracts is
if Yields_Synchronized_Object (Obj_Typ) then
Error_Msg_N ("ghost object & cannot be synchronized", Obj_Id);
- -- A Ghost object cannot be effectively volatile (SPARK RM 6.9(7) and
- -- SPARK RM 6.9(19)).
-
- elsif SPARK_Mode = On and then Is_Effectively_Volatile (Obj_Id) then
- Error_Msg_N ("ghost object & cannot be volatile", Obj_Id);
-
-- A Ghost object cannot be imported or exported (SPARK RM 6.9(7)).
-- One exception to this is the object that represents the dispatch
-- table of a Ghost tagged type, as the symbol needs to be exported.
diff --git a/gcc/ada/doc/gnat_rm/the_implementation_of_standard_i_o.rst b/gcc/ada/doc/gnat_rm/the_implementation_of_standard_i_o.rst
index e0b9e0c..f6d884d 100644
--- a/gcc/ada/doc/gnat_rm/the_implementation_of_standard_i_o.rst
+++ b/gcc/ada/doc/gnat_rm/the_implementation_of_standard_i_o.rst
@@ -70,7 +70,7 @@ library streams facility; where
*
All files are opened using ``fopen``.
*
- All input/output operations use ``fread``/`fwrite`.
+ All input/output operations use ``fread``/``fwrite``.
There is no internal buffering of any kind at the Ada library level. The only
buffering is that provided at the system level in the implementation of the
@@ -127,8 +127,7 @@ The records of a Direct_IO file are simply written to the file in index
sequence, with the first record starting at offset zero, and subsequent
records following. There is no control information of any kind. For
example, if 32-bit integers are being written, each record takes
-4-bytes, so the record at index ``K`` starts at offset
-(``K``-1)*4.
+4-bytes, so the record at index ``K`` starts at offset ``(K-1)*4``.
There is no limit on the size of Direct_IO files, they are expanded as
necessary to accommodate whatever records are written to the file.
diff --git a/gcc/ada/einfo-utils.adb b/gcc/ada/einfo-utils.adb
index 88f4d4b..46177ac 100644
--- a/gcc/ada/einfo-utils.adb
+++ b/gcc/ada/einfo-utils.adb
@@ -201,6 +201,11 @@ package body Einfo.Utils is
and then Ekind (Directly_Designated_Type (Id)) = E_Subprogram_Type;
end Is_Access_Subprogram_Type;
+ function Is_Address_Compatible_Type (Id : E) return B is
+ begin
+ return Is_Descendant_Of_Address (Id) or else Id = Standard_Address;
+ end Is_Address_Compatible_Type;
+
function Is_Aggregate_Type (Id : E) return B is
begin
return Ekind (Id) in Aggregate_Kind;
diff --git a/gcc/ada/einfo-utils.ads b/gcc/ada/einfo-utils.ads
index 742ca22..5589276 100644
--- a/gcc/ada/einfo-utils.ads
+++ b/gcc/ada/einfo-utils.ads
@@ -96,6 +96,8 @@ package Einfo.Utils is
function Is_Access_Type (Id : E) return B with Inline;
function Is_Access_Protected_Subprogram_Type (Id : E) return B with Inline;
function Is_Access_Subprogram_Type (Id : E) return B with Inline;
+ function Is_Address_Compatible_Type (Id : E) return B with Inline;
+ -- Check whether the type represents an address
function Is_Aggregate_Type (Id : E) return B with Inline;
function Is_Anonymous_Access_Type (Id : E) return B with Inline;
function Is_Array_Type (Id : E) return B with Inline;
diff --git a/gcc/ada/exp_aggr.adb b/gcc/ada/exp_aggr.adb
index 691430a..2d02bad 100644
--- a/gcc/ada/exp_aggr.adb
+++ b/gcc/ada/exp_aggr.adb
@@ -1936,7 +1936,7 @@ package body Exp_Aggr is
Aggr_Low : constant Node_Id := Duplicate_Subexpr_No_Checks (Aggr_L);
Aggr_High : constant Node_Id := Duplicate_Subexpr_No_Checks (Aggr_H);
- -- After Duplicate_Subexpr these are side-effect free
+ -- After Duplicate_Subexpr these are side-effect-free
Assoc : Node_Id;
Choice : Node_Id;
@@ -6984,8 +6984,14 @@ package body Exp_Aggr is
Parameter_Associations =>
New_List (
Make_Integer_Literal (Loc, 1),
- New_Occurrence_Of
- (Defining_Identifier (Siz_Decl), Loc))));
+ Make_Type_Conversion (Loc,
+ Subtype_Mark =>
+ New_Occurrence_Of
+ (Etype (First_Formal (Entity (New_Indexed_Subp))),
+ Loc),
+ Expression => New_Occurrence_Of
+ (Defining_Identifier (Siz_Decl),
+ Loc)))));
end if;
Append (Init_Stat, Aggr_Code);
diff --git a/gcc/ada/exp_attr.adb b/gcc/ada/exp_attr.adb
index dddc054..66fd684 100644
--- a/gcc/ada/exp_attr.adb
+++ b/gcc/ada/exp_attr.adb
@@ -6039,7 +6039,7 @@ package body Exp_Attr is
E2 : constant Node_Id := Next (E1);
Bnn : constant Entity_Id := Make_Temporary (Loc, 'B', N);
- Accum_Typ : Entity_Id;
+ Accum_Typ : Entity_Id := Empty;
New_Loop : Node_Id;
function Build_Stat (Comp : Node_Id) return Node_Id;
@@ -6058,7 +6058,6 @@ package body Exp_Attr is
begin
if Nkind (E1) = N_Attribute_Reference then
- Accum_Typ := Entity (Prefix (E1));
Stat := Make_Assignment_Statement (Loc,
Name => New_Occurrence_Of (Bnn, Loc),
Expression => Make_Attribute_Reference (Loc,
@@ -6069,14 +6068,12 @@ package body Exp_Attr is
Comp)));
elsif Ekind (Entity (E1)) = E_Procedure then
- Accum_Typ := Etype (First_Formal (Entity (E1)));
Stat := Make_Procedure_Call_Statement (Loc,
Name => New_Occurrence_Of (Entity (E1), Loc),
Parameter_Associations => New_List (
New_Occurrence_Of (Bnn, Loc),
Comp));
else
- Accum_Typ := Etype (Entity (E1));
Stat := Make_Assignment_Statement (Loc,
Name => New_Occurrence_Of (Bnn, Loc),
Expression => Make_Function_Call (Loc,
@@ -6137,12 +6134,9 @@ package body Exp_Attr is
Statements =>
New_List (Build_Stat (Relocate_Node (Expr))));
- -- If the reducer subprogram is a universal operator, then
- -- we still look at the context to find the type for now.
+ -- Look at the context to find the type.
- if Is_Universal_Numeric_Type (Accum_Typ) then
- Accum_Typ := Etype (N);
- end if;
+ Accum_Typ := Etype (N);
end;
else
@@ -6172,43 +6166,40 @@ package body Exp_Attr is
Statements => New_List (
Build_Stat (New_Occurrence_Of (Elem, Loc))));
- -- If the reducer subprogram is a universal operator, then
- -- we need to look at the prefix to find the type. This is
+ -- Look at the prefix to find the type. This is
-- modeled on Analyze_Iterator_Specification in Sem_Ch5.
- if Is_Universal_Numeric_Type (Accum_Typ) then
- declare
- Ptyp : constant Entity_Id :=
- Base_Type (Etype (Prefix (N)));
+ declare
+ Ptyp : constant Entity_Id :=
+ Base_Type (Etype (Prefix (N)));
- begin
- if Is_Array_Type (Ptyp) then
- Accum_Typ := Component_Type (Ptyp);
-
- elsif Has_Aspect (Ptyp, Aspect_Iterable) then
- declare
- Element : constant Entity_Id :=
- Get_Iterable_Type_Primitive
- (Ptyp, Name_Element);
- begin
- if Present (Element) then
- Accum_Typ := Etype (Element);
- end if;
- end;
-
- else
- declare
- Element : constant Node_Id :=
- Find_Value_Of_Aspect
- (Ptyp, Aspect_Iterator_Element);
- begin
- if Present (Element) then
- Accum_Typ := Entity (Element);
- end if;
- end;
- end if;
- end;
- end if;
+ begin
+ if Is_Array_Type (Ptyp) then
+ Accum_Typ := Component_Type (Ptyp);
+
+ elsif Has_Aspect (Ptyp, Aspect_Iterable) then
+ declare
+ Element : constant Entity_Id :=
+ Get_Iterable_Type_Primitive
+ (Ptyp, Name_Element);
+ begin
+ if Present (Element) then
+ Accum_Typ := Etype (Element);
+ end if;
+ end;
+
+ else
+ declare
+ Element : constant Node_Id :=
+ Find_Value_Of_Aspect
+ (Ptyp, Aspect_Iterator_Element);
+ begin
+ if Present (Element) then
+ Accum_Typ := Entity (Element);
+ end if;
+ end;
+ end if;
+ end;
end;
end if;
diff --git a/gcc/ada/exp_ch4.adb b/gcc/ada/exp_ch4.adb
index f04ac61..99be96d 100644
--- a/gcc/ada/exp_ch4.adb
+++ b/gcc/ada/exp_ch4.adb
@@ -555,15 +555,14 @@ package body Exp_Ch4 is
---------------------------------
procedure Expand_Allocator_Expression (N : Node_Id) is
- Loc : constant Source_Ptr := Sloc (N);
- Exp : constant Node_Id := Expression (Expression (N));
- PtrT : constant Entity_Id := Etype (N);
- DesigT : constant Entity_Id := Designated_Type (PtrT);
-
- -- Local variables
+ Loc : constant Source_Ptr := Sloc (N);
+ Exp : constant Node_Id := Expression (Expression (N));
+ Indic : constant Node_Id := Subtype_Mark (Expression (N));
+ T : constant Entity_Id := Entity (Indic);
+ PtrT : constant Entity_Id := Etype (N);
+ DesigT : constant Entity_Id := Designated_Type (PtrT);
+ Special_Return : constant Boolean := For_Special_Return_Object (N);
- Indic : constant Node_Id := Subtype_Mark (Expression (N));
- T : constant Entity_Id := Entity (Indic);
Adj_Call : Node_Id;
Aggr_In_Place : Boolean;
Node : Node_Id;
@@ -576,8 +575,6 @@ package body Exp_Ch4 is
TagR : Node_Id := Empty;
-- Target reference for tag assignment
- -- Start of processing for Expand_Allocator_Expression
-
begin
-- Handle call to C++ constructor
@@ -597,7 +594,15 @@ package body Exp_Ch4 is
Apply_Constraint_Check (Exp, T, No_Sliding => True);
- Apply_Predicate_Check (Exp, T);
+ Aggr_In_Place := Is_Delayed_Aggregate (Exp);
+
+ -- If the expression is an aggregate to be built in place, then we need
+ -- to delay applying predicate checks, because this would result in the
+ -- creation of a temporary, which is illegal for limited types,
+
+ if not Aggr_In_Place then
+ Apply_Predicate_Check (Exp, T);
+ end if;
-- Check that any anonymous access discriminants are suitable
-- for use in an allocator.
@@ -658,8 +663,6 @@ package body Exp_Ch4 is
return;
end if;
- Aggr_In_Place := Is_Delayed_Aggregate (Exp);
-
-- Case of tagged type or type requiring finalization
if Is_Tagged_Type (T) or else Needs_Finalization (T) then
@@ -902,7 +905,7 @@ package body Exp_Ch4 is
-- Likewise if the allocator is made for a special return object
- elsif For_Special_Return_Object (N) then
+ elsif Special_Return then
null;
elsif Is_Tagged_Type (T) and then not Is_Class_Wide_Type (T) then
@@ -944,7 +947,7 @@ package body Exp_Ch4 is
and then not Is_Inherently_Limited_Type (T)
and then not Aggr_In_Place
and then Nkind (Exp) /= N_Function_Call
- and then not For_Special_Return_Object (N)
+ and then not Special_Return
then
-- An unchecked conversion is needed in the classwide case because
-- the designated type can be an ancestor of the subtype mark of
@@ -971,6 +974,10 @@ package body Exp_Ch4 is
Rewrite (N, New_Occurrence_Of (Temp, Loc));
Analyze_And_Resolve (N, PtrT);
+ if Aggr_In_Place then
+ Apply_Predicate_Check (N, T, Deref => True);
+ end if;
+
-- Ada 2005 (AI-251): Displace the pointer to reference the record
-- component containing the secondary dispatch table of the interface
-- type.
@@ -1011,6 +1018,10 @@ package body Exp_Ch4 is
Rewrite (N, New_Occurrence_Of (Temp, Loc));
Analyze_And_Resolve (N, PtrT);
+ if Aggr_In_Place then
+ Apply_Predicate_Check (N, T, Deref => True);
+ end if;
+
elsif Is_Access_Type (T) and then Can_Never_Be_Null (T) then
Install_Null_Excluding_Check (Exp);
diff --git a/gcc/ada/exp_ch4.ads b/gcc/ada/exp_ch4.ads
index 39177cd..e240380 100644
--- a/gcc/ada/exp_ch4.ads
+++ b/gcc/ada/exp_ch4.ads
@@ -97,7 +97,7 @@ package Exp_Ch4 is
-- individually to yield the required Boolean result. Loc is the
-- location for the generated nodes. Typ is the type of the record, and
-- Lhs, Rhs are the record expressions to be compared, these
- -- expressions need not be analyzed but have to be side-effect free.
+ -- expressions need not be analyzed but have to be side-effect-free.
-- Nod provides the Sloc value for generated code.
procedure Expand_Set_Membership (N : Node_Id);
diff --git a/gcc/ada/exp_ch5.adb b/gcc/ada/exp_ch5.adb
index d946f6d..bc61243 100644
--- a/gcc/ada/exp_ch5.adb
+++ b/gcc/ada/exp_ch5.adb
@@ -162,7 +162,7 @@ package body Exp_Ch5 is
procedure Expand_Assign_With_Target_Names (N : Node_Id);
-- (AI12-0125): N is an assignment statement whose RHS contains occurrences
-- of @ that designate the value of the LHS of the assignment. If the LHS
- -- is side-effect free the target names can be replaced with a copy of the
+ -- is side-effect-free the target names can be replaced with a copy of the
-- LHS; otherwise the semantics of the assignment is described in terms of
-- a procedure with an in-out parameter, and expanded as such.
@@ -2304,7 +2304,7 @@ package body Exp_Ch5 is
Name => Relocate_Node (LHS),
Expression => New_RHS));
- -- The left-hand side is not a direct name, but is side-effect free.
+ -- The left-hand side is not a direct name, but is side-effect-free.
-- Capture its value in a temporary to avoid generating a procedure.
-- We don't do this optimization if the target object's type may need
-- finalization actions, because we don't want extra finalizations to
diff --git a/gcc/ada/exp_ch6.adb b/gcc/ada/exp_ch6.adb
index d480240..a2b5cdc 100644
--- a/gcc/ada/exp_ch6.adb
+++ b/gcc/ada/exp_ch6.adb
@@ -194,6 +194,10 @@ package body Exp_Ch6 is
-- the activation Chain. Note: Master_Actual can be Empty, but only if
-- there are no tasks.
+ function Build_Flag_For_Function (Func_Id : Entity_Id) return Entity_Id;
+ -- Generate code to declare a boolean flag initialized to False in the
+ -- function Func_Id and return the entity for the flag.
+
function Caller_Known_Size
(Func_Call : Node_Id;
Result_Subt : Entity_Id) return Boolean;
@@ -909,6 +913,53 @@ package body Exp_Ch6 is
end if;
end BIP_Suffix_Kind;
+ -----------------------------
+ -- Build_Flag_For_Function --
+ -----------------------------
+
+ function Build_Flag_For_Function (Func_Id : Entity_Id) return Entity_Id is
+ Flag_Decl : Node_Id;
+ Flag_Id : Entity_Id;
+ Func_Bod : Node_Id;
+ Loc : Source_Ptr;
+
+ begin
+ -- Recover the function body
+
+ Func_Bod := Unit_Declaration_Node (Func_Id);
+
+ if Nkind (Func_Bod) = N_Subprogram_Declaration then
+ Func_Bod := Parent (Parent (Corresponding_Body (Func_Bod)));
+ end if;
+
+ if Nkind (Func_Bod) = N_Function_Specification then
+ Func_Bod := Parent (Func_Bod); -- one more level for child units
+ end if;
+
+ pragma Assert (Nkind (Func_Bod) = N_Subprogram_Body);
+
+ Loc := Sloc (Func_Bod);
+
+ -- Create a flag to track the function state
+
+ Flag_Id := Make_Temporary (Loc, 'F');
+
+ -- Insert the flag at the beginning of the function declarations,
+ -- generate:
+ -- Fnn : Boolean := False;
+
+ Flag_Decl :=
+ Make_Object_Declaration (Loc,
+ Defining_Identifier => Flag_Id,
+ Object_Definition => New_Occurrence_Of (Standard_Boolean, Loc),
+ Expression => New_Occurrence_Of (Standard_False, Loc));
+
+ Prepend_To (Declarations (Func_Bod), Flag_Decl);
+ Analyze (Flag_Decl);
+
+ return Flag_Id;
+ end Build_Flag_For_Function;
+
---------------------------
-- Build_In_Place_Formal --
---------------------------
@@ -5615,49 +5666,14 @@ package body Exp_Ch6 is
-- perform the appropriate cleanup should it fail to return. The state
-- of the function itself is tracked through a flag which is coupled
-- with the scope finalizer. There is one flag per each return object
- -- in case of multiple returns.
-
- if Needs_Finalization (Etype (Ret_Obj_Id)) then
- declare
- Flag_Decl : Node_Id;
- Flag_Id : Entity_Id;
- Func_Bod : Node_Id;
-
- begin
- -- Recover the function body
-
- Func_Bod := Unit_Declaration_Node (Func_Id);
-
- if Nkind (Func_Bod) = N_Subprogram_Declaration then
- Func_Bod := Parent (Parent (Corresponding_Body (Func_Bod)));
- end if;
-
- if Nkind (Func_Bod) = N_Function_Specification then
- Func_Bod := Parent (Func_Bod); -- one more level for child units
- end if;
-
- pragma Assert (Nkind (Func_Bod) = N_Subprogram_Body);
-
- -- Create a flag to track the function state
-
- Flag_Id := Make_Temporary (Loc, 'F');
- Set_Status_Flag_Or_Transient_Decl (Ret_Obj_Id, Flag_Id);
+ -- in case of multiple extended returns. Note that the flag has already
+ -- been created if the extended return contains a nested return.
- -- Insert the flag at the beginning of the function declarations,
- -- generate:
- -- Fnn : Boolean := False;
-
- Flag_Decl :=
- Make_Object_Declaration (Loc,
- Defining_Identifier => Flag_Id,
- Object_Definition =>
- New_Occurrence_Of (Standard_Boolean, Loc),
- Expression =>
- New_Occurrence_Of (Standard_False, Loc));
-
- Prepend_To (Declarations (Func_Bod), Flag_Decl);
- Analyze (Flag_Decl);
- end;
+ if Needs_Finalization (Etype (Ret_Obj_Id))
+ and then No (Status_Flag_Or_Transient_Decl (Ret_Obj_Id))
+ then
+ Set_Status_Flag_Or_Transient_Decl
+ (Ret_Obj_Id, Build_Flag_For_Function (Func_Id));
end if;
-- Build a simple_return_statement that returns the return object when
@@ -5722,6 +5738,8 @@ package body Exp_Ch6 is
Status_Flag_Or_Transient_Decl (Ret_Obj_Id);
begin
+ pragma Assert (Present (Flag_Id));
+
-- Generate:
-- Fnn := True;
@@ -6387,14 +6405,44 @@ package body Exp_Ch6 is
-- return of the previously declared return object.
elsif Kind = E_Return_Statement then
- Rewrite (N,
- Make_Simple_Return_Statement (Loc,
- Expression =>
- New_Occurrence_Of (First_Entity (Scope_Id), Loc)));
- Set_Comes_From_Extended_Return_Statement (N);
- Set_Return_Statement_Entity (N, Scope_Id);
- Expand_Simple_Function_Return (N);
- return;
+ declare
+ Ret_Obj_Id : constant Entity_Id := First_Entity (Scope_Id);
+
+ Flag_Id : Entity_Id;
+
+ begin
+ -- Apply the same processing as Expand_N_Extended_Return_Statement
+ -- if the returned object needs finalization actions. Note that we
+ -- are invoked before Expand_N_Extended_Return_Statement but there
+ -- may be multiple nested returns within the extended one.
+
+ if Needs_Finalization (Etype (Ret_Obj_Id)) then
+ if Present (Status_Flag_Or_Transient_Decl (Ret_Obj_Id)) then
+ Flag_Id := Status_Flag_Or_Transient_Decl (Ret_Obj_Id);
+ else
+ Flag_Id :=
+ Build_Flag_For_Function (Return_Applies_To (Scope_Id));
+ Set_Status_Flag_Or_Transient_Decl (Ret_Obj_Id, Flag_Id);
+ end if;
+
+ -- Generate:
+ -- Fnn := True;
+
+ Insert_Action (N,
+ Make_Assignment_Statement (Loc,
+ Name =>
+ New_Occurrence_Of (Flag_Id, Loc),
+ Expression => New_Occurrence_Of (Standard_True, Loc)));
+ end if;
+
+ Rewrite (N,
+ Make_Simple_Return_Statement (Loc,
+ Expression => New_Occurrence_Of (Ret_Obj_Id, Loc)));
+ Set_Comes_From_Extended_Return_Statement (N);
+ Set_Return_Statement_Entity (N, Scope_Id);
+ Expand_Simple_Function_Return (N);
+ return;
+ end;
end if;
pragma Assert (Is_Entry (Scope_Id));
diff --git a/gcc/ada/exp_ch7.adb b/gcc/ada/exp_ch7.adb
index f8c12b7..f5d9b0f 100644
--- a/gcc/ada/exp_ch7.adb
+++ b/gcc/ada/exp_ch7.adb
@@ -70,59 +70,6 @@ with Uintp; use Uintp;
package body Exp_Ch7 is
- --------------------------------
- -- Transient Scope Management --
- --------------------------------
-
- -- A transient scope is needed when certain temporary objects are created
- -- by the compiler. These temporary objects are allocated on the secondary
- -- stack and/or need finalization, and the transient scope is responsible
- -- for finalizing the objects and reclaiming the memory of the secondary
- -- stack at the appropriate time. They are generally objects allocated to
- -- store the result of a function returning an unconstrained or controlled
- -- value. Expressions needing to be wrapped in a transient scope may appear
- -- in three different contexts which lead to different kinds of transient
- -- scope expansion:
-
- -- 1. In a simple statement (procedure call, assignment, ...). In this
- -- case the instruction is wrapped into a transient block. See
- -- Wrap_Transient_Statement for details.
-
- -- 2. In an expression of a control structure (test in a IF statement,
- -- expression in a CASE statement, ...). See Wrap_Transient_Expression
- -- for details.
-
- -- 3. In a expression of an object_declaration. No wrapping is possible
- -- here, so the finalization actions, if any, are done right after the
- -- declaration and the secondary stack deallocation is done in the
- -- proper enclosing scope. See Wrap_Transient_Declaration for details.
-
- --------------------------------------------------
- -- Transient Blocks and Finalization Management --
- --------------------------------------------------
-
- procedure Insert_Actions_In_Scope_Around
- (N : Node_Id;
- Clean : Boolean;
- Manage_SS : Boolean);
- -- Insert the before-actions kept in the scope stack before N, and the
- -- after-actions after N, which must be a member of a list. If flag Clean
- -- is set, insert any cleanup actions. If flag Manage_SS is set, insert
- -- calls to mark and release the secondary stack.
-
- function Make_Transient_Block
- (Loc : Source_Ptr;
- Action : Node_Id;
- Par : Node_Id) return Node_Id;
- -- Action is a single statement or object declaration. Par is the proper
- -- parent of the generated block. Create a transient block whose name is
- -- the current scope and the only handled statement is Action. If Action
- -- involves controlled objects or secondary stack usage, the corresponding
- -- cleanup actions are performed at the end of the block.
-
- procedure Store_Actions_In_Scope (AK : Scope_Action_Kind; L : List_Id);
- -- Shared processing for Store_xxx_Actions_In_Scope
-
-----------------------------
-- Finalization Management --
-----------------------------
@@ -292,6 +239,84 @@ package body Exp_Ch7 is
-- Build the deep Initialize/Adjust/Finalize for a record Typ with
-- Has_Component_Component set and store them using the TSS mechanism.
+ --------------------------------
+ -- Transient Scope Management --
+ --------------------------------
+
+ -- A transient scope is needed when certain temporary objects are created
+ -- by the compiler. These temporary objects are allocated on the secondary
+ -- stack and/or need finalization, and the transient scope is responsible
+ -- for finalizing the objects and reclaiming the memory of the secondary
+ -- stack at the appropriate time. They are generally objects allocated to
+ -- store the result of a function returning an unconstrained or controlled
+ -- value. Expressions needing to be wrapped in a transient scope may appear
+ -- in three different contexts, which lead to different kinds of transient
+ -- scope expansion:
+
+ -- 1. In a simple statement (procedure call, assignment, ...). In this
+ -- case the statement is wrapped into a transient block, which takes
+ -- care of the finalization actions as well as the secondary stack
+ -- deallocation, See Wrap_Transient_Statement for details.
+
+ -- 2. In an expression of a control structure (test in a If statement,
+ -- expression in a Case statement, ...). In this case the expression
+ -- is replaced by a temporary and the enclosing statement is wrapped
+ -- into a transient block, which takes care of the finalization actions
+ -- and the secondary stack deallocation. See Wrap_Transient_Expression
+ -- for details.
+
+ -- 3. In an expression of an object declaration. No wrapping is possible
+ -- here, so the finalization actions performed on the normal path, if
+ -- any, are done right after the declaration, and those performed on
+ -- the exceptional path, as well as the secondary stack deallocation,
+ -- are deferred to the enclosing scope. See Wrap_Transient_Declaration
+ -- for details.
+
+ -- A transient scope is created by calling Establish_Transient_Scope on the
+ -- node that needs to be serviced by it (the serviced node can subsequently
+ -- be retrieved by invoking Node_To_Be_Wrapped when the current scope is a
+ -- transient scope). Once this has been done, the normal processing of the
+ -- Insert_Actions procedures is blocked and the procedures are redirected
+ -- to the Store_xxx_Actions_In_Scope procedures and Store_Actions_In_Scope
+ -- is ultimately invoked to store the pending actions.
+
+ -- A transient scope is finalized by calling one of the Wrap_Transient_xxx
+ -- procedures depending on the context as explained above. They ultimately
+ -- invoke Insert_Actions_In_Scope_Around as per the following picture:
+
+ -- Wrap_Transient_Expression Wrap_Transient_Statement
+ -- | |
+ -- V V
+ -- Make_Transient_Block
+ -- |
+ -- Wrap_Transient_Declaration |
+ -- | |
+ -- V V
+ -- Insert_Actions_In_Scope_Around
+
+ procedure Insert_Actions_In_Scope_Around
+ (N : Node_Id;
+ Clean : Boolean;
+ Manage_SS : Boolean);
+ -- Insert the before-actions kept in the scope stack before N, and the
+ -- after-actions after N, which must be a member of a list. If Clean is
+ -- true, insert any cleanup actions kept in the scope stack and generate
+ -- required finalization actions for the before-actions and after-actions.
+ -- If Manage_SS is true, insert calls to mark/release the secondary stack.
+
+ function Make_Transient_Block
+ (Loc : Source_Ptr;
+ Action : Node_Id;
+ Par : Node_Id) return Node_Id;
+ -- Action is a single statement or object declaration. Par is the proper
+ -- parent of the generated block. Create a transient block whose name is
+ -- the current scope and the only handled statement is Action. If Action
+ -- involves controlled objects or secondary stack usage, the corresponding
+ -- cleanup actions are performed at the end of the block.
+
+ procedure Store_Actions_In_Scope (AK : Scope_Action_Kind; L : List_Id);
+ -- Shared processing for Store_xxx_Actions_In_Scope
+
-------------------------------------------
-- Unnesting procedures for CCG and LLVM --
-------------------------------------------
@@ -5641,9 +5666,7 @@ package body Exp_Ch7 is
Blk_Ins := Last_Object;
end if;
- if Clean then
- Insert_List_After_And_Analyze (Blk_Ins, Act_Cleanup);
- end if;
+ Insert_List_After_And_Analyze (Blk_Ins, Act_Cleanup);
-- Examine all objects in the list First_Object .. Last_Object
@@ -5824,13 +5847,15 @@ package body Exp_Ch7 is
(Last_Obj, Build_SS_Release_Call (Loc, Mark_Id));
end if;
- -- Check for transient objects associated with Target and generate the
- -- appropriate finalization actions for them.
+ -- If we are handling cleanups, check for transient objects associated
+ -- with Target and generate the required finalization actions for them.
- Process_Transients_In_Scope
- (First_Object => First_Obj,
- Last_Object => Last_Obj,
- Related_Node => Target);
+ if Clean then
+ Process_Transients_In_Scope
+ (First_Object => First_Obj,
+ Last_Object => Last_Obj,
+ Related_Node => Target);
+ end if;
-- Reset the action lists
diff --git a/gcc/ada/exp_ch7.ads b/gcc/ada/exp_ch7.ads
index a131e55..105aa7e 100644
--- a/gcc/ada/exp_ch7.ads
+++ b/gcc/ada/exp_ch7.ads
@@ -176,6 +176,12 @@ package Exp_Ch7 is
-- triggered by an abort, E_Id denotes the defining identifier of a local
-- exception occurrence, Raised_Id is the entity of a local boolean flag.
+ procedure Expand_Cleanup_Actions (N : Node_Id);
+ -- Expand the necessary stuff into a scope to enable finalization of local
+ -- objects and deallocation of transient data when exiting the scope. N is
+ -- one of N_Block_Statement, N_Subprogram_Body, N_Task_Body, N_Entry_Body,
+ -- or N_Extended_Return_Statement.
+
function Make_Adjust_Call
(Obj_Ref : Node_Id;
Typ : Entity_Id;
@@ -275,12 +281,6 @@ package Exp_Ch7 is
-- Transient Scope Management --
--------------------------------
- procedure Expand_Cleanup_Actions (N : Node_Id);
- -- Expand the necessary stuff into a scope to enable finalization of local
- -- objects and deallocation of transient data when exiting the scope. N is
- -- one of N_Block_Statement, N_Subprogram_Body, N_Task_Body, N_Entry_Body,
- -- or N_Extended_Return_Statement.
-
procedure Establish_Transient_Scope
(N : Node_Id;
Manage_Sec_Stack : Boolean);
diff --git a/gcc/ada/exp_put_image.adb b/gcc/ada/exp_put_image.adb
index 6684d41..a30f609 100644
--- a/gcc/ada/exp_put_image.adb
+++ b/gcc/ada/exp_put_image.adb
@@ -1023,7 +1023,7 @@ package body Exp_Put_Image is
null;
elsif Is_Derived_Type (Typ) then
return Put_Image_Enabled (Etype (Base_Type (Typ)));
- elsif In_Predefined_Unit (Typ) then
+ elsif Is_Predefined_Unit (Get_Code_Unit (Typ)) then
return False;
end if;
end if;
diff --git a/gcc/ada/exp_unst.adb b/gcc/ada/exp_unst.adb
index b01cfc1..7acb065 100644
--- a/gcc/ada/exp_unst.adb
+++ b/gcc/ada/exp_unst.adb
@@ -643,7 +643,9 @@ package body Exp_Unst is
-- Conversion case
- elsif Nkind (N) = N_Type_Conversion then
+ elsif Nkind (N) in
+ N_Type_Conversion | N_Unchecked_Type_Conversion
+ then
Note_Uplevel_Bound (Expression (N), Ref);
end if;
end Note_Uplevel_Bound;
diff --git a/gcc/ada/exp_util.adb b/gcc/ada/exp_util.adb
index 3952a16..17fde44 100644
--- a/gcc/ada/exp_util.adb
+++ b/gcc/ada/exp_util.adb
@@ -237,7 +237,7 @@ package body Exp_Util is
function Side_Effect_Free_Attribute (Name : Name_Id) return Boolean;
-- Return True if the evaluation of the given attribute is considered
- -- side-effect free, independently of its prefix and expressions.
+ -- side-effect-free, independently of its prefix and expressions.
-------------------------------------
-- Activate_Atomic_Synchronization --
@@ -9356,7 +9356,7 @@ package body Exp_Util is
begin
-- Build-in-place calls usually appear in 'reference format. Note that
-- the accessibility check machinery may add an extra 'reference due to
- -- side effect removal.
+ -- side-effect removal.
while Nkind (Call) = N_Reference loop
Call := Prefix (Call);
@@ -12062,7 +12062,7 @@ package body Exp_Util is
then
return;
- -- No action needed for side-effect free expressions
+ -- No action needed for side-effect-free expressions
elsif Check_Side_Effects
and then Side_Effect_Free (Exp, Name_Req, Variable_Ref)
@@ -12087,15 +12087,15 @@ package body Exp_Util is
Scope_Suppress.Suppress := (others => True);
- -- If this is a side-effect free attribute reference whose expressions
- -- are also side-effect free and whose prefix is not a name, remove the
+ -- If this is a side-effect-free attribute reference whose expressions
+ -- are also side-effect-free and whose prefix is not a name, remove the
-- side effects of the prefix. A copy of the prefix is required in this
-- case and it is better not to make an additional one for the attribute
-- itself, because the return type of many of them is universal integer,
-- which is a very large type for a temporary.
-- The prefix of an attribute reference Reduce may be syntactically an
-- aggregate, but will be expanded into a loop, so no need to remove
- -- side-effects.
+ -- side effects.
if Nkind (Exp) = N_Attribute_Reference
and then Side_Effect_Free_Attribute (Attribute_Name (Exp))
@@ -12329,7 +12329,7 @@ package body Exp_Util is
-- Otherwise we generate a reference to the expression
else
- -- When generating C code we cannot consider side effect free object
+ -- When generating C code we cannot consider side-effect-free object
-- declarations that have discriminants and are initialized by means
-- of a function call since on this target there is no secondary
-- stack to store the return value and the expander may generate an
@@ -13681,12 +13681,12 @@ package body Exp_Util is
function Safe_Prefixed_Reference (N : Node_Id) return Boolean;
-- The argument N is a construct where the Prefix is dereferenced if it
-- is an access type and the result is a variable. The call returns True
- -- if the construct is side effect free (not considering side effects in
+ -- if the construct is side-effect-free (not considering side effects in
-- other than the prefix which are to be tested by the caller).
function Within_In_Parameter (N : Node_Id) return Boolean;
-- Determines if N is a subcomponent of a composite in-parameter. If so,
- -- N is not side-effect free when the actual is global and modifiable
+ -- N is not side-effect-free when the actual is global and modifiable
-- indirectly from within a subprogram, because it may be passed by
-- reference. The front-end must be conservative here and assume that
-- this may happen with any array or record type. On the other hand, we
@@ -13703,7 +13703,7 @@ package body Exp_Util is
function Safe_Prefixed_Reference (N : Node_Id) return Boolean is
begin
- -- If prefix is not side effect free, definitely not safe
+ -- If prefix is not side-effect-free, definitely not safe
if not Side_Effect_Free (Prefix (N), Name_Req, Variable_Ref) then
return False;
@@ -13805,7 +13805,7 @@ package body Exp_Util is
then
return False;
- -- All other cases are side effect free
+ -- All other cases are side-effect-free
else
return True;
@@ -13846,7 +13846,7 @@ package body Exp_Util is
-- However, we would prefer to consider that they are side effects,
-- since the back end CSE does not work very well on expressions which
-- can raise Constraint_Error. On the other hand if we don't consider
- -- them to be side effect free, then we get some awkward expansions
+ -- them to be side-effect-free, then we get some awkward expansions
-- in -gnato mode, resulting in code insertions at a point where we
-- do not have a clear model for performing the insertions.
@@ -13854,7 +13854,7 @@ package body Exp_Util is
if Is_Entity_Name (N) then
- -- A type reference is always side effect free
+ -- A type reference is always side-effect-free
if Is_Type (Entity (N)) then
return True;
@@ -13875,12 +13875,12 @@ package body Exp_Util is
return True;
end if;
- -- A value known at compile time is always side effect free
+ -- A value known at compile time is always side-effect-free
elsif Compile_Time_Known_Value (N) then
return True;
- -- A variable renaming is not side-effect free, because the renaming
+ -- A variable renaming is not side-effect-free, because the renaming
-- will function like a macro in the front-end in some cases, and an
-- assignment can modify the component designated by N, so we need to
-- create a temporary for it.
@@ -13914,7 +13914,7 @@ package body Exp_Util is
return Safe_Prefixed_Reference (RO);
-- In all other cases, designated object cannot be changed so
- -- we are side effect free.
+ -- we are side-effect-free.
else
return True;
@@ -13954,8 +13954,8 @@ package body Exp_Util is
case Nkind (N) is
- -- An attribute reference is side-effect free if its expressions
- -- are side-effect free and its prefix is side-effect free or is
+ -- An attribute reference is side-effect-free if its expressions
+ -- are side-effect-free and its prefix is side-effect-free or is
-- an entity reference.
when N_Attribute_Reference =>
@@ -13967,8 +13967,8 @@ package body Exp_Util is
or else
Side_Effect_Free (Prefix (N), Name_Req, Variable_Ref));
- -- A binary operator is side effect free if and both operands are
- -- side effect free. For this purpose binary operators include
+ -- A binary operator is side-effect-free if and both operands are
+ -- side-effect-free. For this purpose binary operators include
-- short circuit forms.
when N_Binary_Op
@@ -13989,14 +13989,14 @@ package body Exp_Util is
else Side_Effect_Free
(Alternatives (N), Name_Req, Variable_Ref));
- -- An explicit dereference is side effect free only if it is
- -- a side effect free prefixed reference.
+ -- An explicit dereference is side-effect-free only if it is
+ -- a side-effect-free prefixed reference.
when N_Explicit_Dereference =>
return Safe_Prefixed_Reference (N);
- -- An expression with action is side effect free if its expression
- -- is side effect free and it has no actions.
+ -- An expression with action is side-effect-free if its expression
+ -- is side-effect-free and it has no actions.
when N_Expression_With_Actions =>
return
@@ -14004,14 +14004,14 @@ package body Exp_Util is
and then Side_Effect_Free
(Expression (N), Name_Req, Variable_Ref);
- -- A call to _rep_to_pos is side effect free, since we generate
+ -- A call to _rep_to_pos is side-effect-free, since we generate
-- this pure function call ourselves. Moreover it is critically
-- important to make this exception, since otherwise we can have
- -- discriminants in array components which don't look side effect
+ -- discriminants in array components which don't look side-effect
-- free in the case of an array whose index type is an enumeration
-- type with an enumeration rep clause.
- -- All other function calls are not side effect free
+ -- All other function calls are not side-effect-free
when N_Function_Call =>
return
@@ -14021,8 +14021,8 @@ package body Exp_Util is
(First (Parameter_Associations (N)),
Name_Req, Variable_Ref);
- -- An IF expression is side effect free if it's of a scalar type, and
- -- all its components are all side effect free (conditions and then
+ -- An IF expression is side-effect-free if it's of a scalar type, and
+ -- all its components are all side-effect-free (conditions and then
-- actions and else actions). We restrict to scalar types, since it
-- is annoying to deal with things like (if A then B else C)'First
-- where the type involved is a string type.
@@ -14033,9 +14033,9 @@ package body Exp_Util is
and then Side_Effect_Free
(Expressions (N), Name_Req, Variable_Ref);
- -- An indexed component is side effect free if it is a side
+ -- An indexed component is side-effect-free if it is a side
-- effect free prefixed reference and all the indexing
- -- expressions are side effect free.
+ -- expressions are side-effect-free.
when N_Indexed_Component =>
return
@@ -14043,7 +14043,7 @@ package body Exp_Util is
and then Safe_Prefixed_Reference (N);
-- A type qualification, type conversion, or unchecked expression is
- -- side effect free if the expression is side effect free.
+ -- side-effect-free if the expression is side-effect-free.
when N_Qualified_Expression
| N_Type_Conversion
@@ -14051,35 +14051,35 @@ package body Exp_Util is
=>
return Side_Effect_Free (Expression (N), Name_Req, Variable_Ref);
- -- A selected component is side effect free only if it is a side
+ -- A selected component is side-effect-free only if it is a side
-- effect free prefixed reference.
when N_Selected_Component =>
return Safe_Prefixed_Reference (N);
- -- A range is side effect free if the bounds are side effect free
+ -- A range is side-effect-free if the bounds are side-effect-free
when N_Range =>
return Side_Effect_Free (Low_Bound (N), Name_Req, Variable_Ref)
and then
Side_Effect_Free (High_Bound (N), Name_Req, Variable_Ref);
- -- A slice is side effect free if it is a side effect free
- -- prefixed reference and the bounds are side effect free.
+ -- A slice is side-effect-free if it is a side-effect-free
+ -- prefixed reference and the bounds are side-effect-free.
when N_Slice =>
return
Side_Effect_Free (Discrete_Range (N), Name_Req, Variable_Ref)
and then Safe_Prefixed_Reference (N);
- -- A unary operator is side effect free if the operand
- -- is side effect free.
+ -- A unary operator is side-effect-free if the operand
+ -- is side-effect-free.
when N_Unary_Op =>
return Side_Effect_Free (Right_Opnd (N), Name_Req, Variable_Ref);
- -- An unchecked type conversion is side effect free only if it
- -- is safe and its argument is side effect free.
+ -- An unchecked type conversion is side-effect-free only if it
+ -- is safe and its argument is side-effect-free.
when N_Unchecked_Type_Conversion =>
return
@@ -14087,7 +14087,7 @@ package body Exp_Util is
and then Side_Effect_Free
(Expression (N), Name_Req, Variable_Ref);
- -- A literal is side effect free
+ -- A literal is side-effect-free
when N_Character_Literal
| N_Integer_Literal
@@ -14096,7 +14096,7 @@ package body Exp_Util is
=>
return True;
- -- An aggregate is side effect free if all its values are compile
+ -- An aggregate is side-effect-free if all its values are compile
-- time known.
when N_Aggregate =>
@@ -14112,7 +14112,7 @@ package body Exp_Util is
end case;
end Side_Effect_Free;
- -- A list is side effect free if all elements of the list are side
+ -- A list is side-effect-free if all elements of the list are side
-- effect free.
function Side_Effect_Free
@@ -14401,6 +14401,7 @@ package body Exp_Util is
----------------------------------
function Within_Case_Or_If_Expression (N : Node_Id) return Boolean is
+ Nod : Node_Id;
Par : Node_Id;
begin
@@ -14408,9 +14409,17 @@ package body Exp_Util is
-- can be expanded into Expression_With_Actions, hence the test of the
-- original node.
- Par := Parent (N);
+ Nod := N;
+ Par := Parent (Nod);
+
while Present (Par) loop
- if Nkind (Original_Node (Par)) in N_Case_Expression | N_If_Expression
+ if Nkind (Original_Node (Par)) = N_Case_Expression
+ and then Nod /= Expression (Original_Node (Par))
+ then
+ return True;
+
+ elsif Nkind (Original_Node (Par)) = N_If_Expression
+ and then Nod /= First (Expressions (Original_Node (Par)))
then
return True;
@@ -14430,7 +14439,8 @@ package body Exp_Util is
return False;
end if;
- Par := Parent (Par);
+ Nod := Par;
+ Par := Parent (Nod);
end loop;
return False;
diff --git a/gcc/ada/exp_util.ads b/gcc/ada/exp_util.ads
index 932bf3f..267a127 100644
--- a/gcc/ada/exp_util.ads
+++ b/gcc/ada/exp_util.ads
@@ -669,7 +669,7 @@ package Exp_Util is
-- of the same expression won't generate multiple side effects, whereas
-- Force_Evaluation further guarantees that all evaluations will yield
-- the same result. If Mode is Relaxed then calls to this subprogram have
- -- no effect if Exp is side-effect free; if Mode is Strict and Exp is not
+ -- no effect if Exp is side-effect-free; if Mode is Strict and Exp is not
-- a static expression then no side-effect check is performed on Exp and
-- temporaries are unconditionally generated.
--
@@ -1075,7 +1075,7 @@ package Exp_Util is
-- side effect (used in implementing Force_Evaluation). Note: after call to
-- Remove_Side_Effects, it is safe to call New_Copy_Tree to obtain a copy
-- of the resulting expression. If Check_Side_Effects is set to True then
- -- no action is performed if Exp is known to be side-effect free.
+ -- no action is performed if Exp is known to be side-effect-free.
--
-- Related_Id denotes the entity of the context where Expr appears. Flags
-- Is_Low_Bound and Is_High_Bound specify whether the expression to check
@@ -1206,7 +1206,7 @@ package Exp_Util is
(L : List_Id;
Name_Req : Boolean := False;
Variable_Ref : Boolean := False) return Boolean;
- -- Determines if all elements of the list L are side-effect free. Name_Req
+ -- Determines if all elements of the list L are side-effect-free. Name_Req
-- and Variable_Ref are as described above.
procedure Silly_Boolean_Array_Not_Test (N : Node_Id; T : Entity_Id);
@@ -1255,9 +1255,10 @@ package Exp_Util is
-- extension to verify legality rules on inherited conditions.
function Within_Case_Or_If_Expression (N : Node_Id) return Boolean;
- -- Determine whether arbitrary node N is immediately within a case or an if
- -- expression. The criterion is whether temporaries created by the actions
- -- attached to N need to outlive an enclosing case or if expression.
+ -- Determine whether arbitrary node N is immediately within a dependent
+ -- expression of a case or an if expression. The criterion is whether
+ -- temporaries created by the actions attached to N need to outlive an
+ -- enclosing case or if expression.
private
pragma Inline (Duplicate_Subexpr);
diff --git a/gcc/ada/expect.c b/gcc/ada/expect.c
index 7333c11..add6255 100644
--- a/gcc/ada/expect.c
+++ b/gcc/ada/expect.c
@@ -41,6 +41,7 @@
#include "adaint.h"
#include <sys/types.h>
+#include <string.h>
#if defined (__vxworks) && defined (__RTP__)
# include <wait.h>
diff --git a/gcc/ada/freeze.adb b/gcc/ada/freeze.adb
index 6109913..26b5589 100644
--- a/gcc/ada/freeze.adb
+++ b/gcc/ada/freeze.adb
@@ -5689,77 +5689,6 @@ package body Freeze is
end if;
end if;
- -- The following checks are relevant only when SPARK_Mode is on as
- -- they are not standard Ada legality rules.
-
- if SPARK_Mode = On then
-
- -- A discriminated type cannot be effectively volatile
- -- (SPARK RM 7.1.3(5)).
-
- if Is_Effectively_Volatile (Rec) then
- if Has_Discriminants (Rec) then
- Error_Msg_N ("discriminated type & cannot be volatile", Rec);
- end if;
-
- -- A non-effectively volatile record type cannot contain
- -- effectively volatile components (SPARK RM 7.1.3(6)).
-
- else
- Comp := First_Component (Rec);
- while Present (Comp) loop
- if Comes_From_Source (Comp)
- and then Is_Effectively_Volatile (Etype (Comp))
- then
- Error_Msg_Name_1 := Chars (Rec);
- Error_Msg_N
- ("component & of non-volatile type % cannot be "
- & "volatile", Comp);
- end if;
-
- Next_Component (Comp);
- end loop;
- end if;
-
- -- A type which does not yield a synchronized object cannot have
- -- a component that yields a synchronized object (SPARK RM 9.5).
-
- if not Yields_Synchronized_Object (Rec) then
- Comp := First_Component (Rec);
- while Present (Comp) loop
- if Comes_From_Source (Comp)
- and then Yields_Synchronized_Object (Etype (Comp))
- then
- Error_Msg_Name_1 := Chars (Rec);
- Error_Msg_N
- ("component & of non-synchronized type % cannot be "
- & "synchronized", Comp);
- end if;
-
- Next_Component (Comp);
- end loop;
- end if;
-
- -- A Ghost type cannot have a component of protected or task type
- -- (SPARK RM 6.9(19)).
-
- if Is_Ghost_Entity (Rec) then
- Comp := First_Component (Rec);
- while Present (Comp) loop
- if Comes_From_Source (Comp)
- and then Is_Concurrent_Type (Etype (Comp))
- then
- Error_Msg_Name_1 := Chars (Rec);
- Error_Msg_N
- ("component & of ghost type % cannot be concurrent",
- Comp);
- end if;
-
- Next_Component (Comp);
- end loop;
- end if;
- end if;
-
-- Make sure that if we have an iterator aspect, then we have
-- either Constant_Indexing or Variable_Indexing.
@@ -8047,6 +7976,16 @@ package body Freeze is
procedure Freeze_Expression (N : Node_Id) is
+ function Declared_In_Expanded_Body
+ (N : Node_Id;
+ Typ : Entity_Id;
+ Nam : Entity_Id) return Boolean;
+ -- Given the N_Handled_Sequence_Of_Statements node of an expander
+ -- generated subprogram body, determines if the frozen entity is
+ -- declared inside this body. This is recognized locating the
+ -- enclosing subprogram of the entity Name or its Type and
+ -- checking if it is this subprogram body.
+
function Find_Aggregate_Component_Desig_Type return Entity_Id;
-- If the expression is an array aggregate, the type of the component
-- expressions is also frozen. If the component type is an access type
@@ -8067,6 +8006,45 @@ package body Freeze is
-- Determines whether an entity E referenced in node N is declared in
-- the list L.
+ -------------------------------
+ -- Declared_In_Expanded_Body --
+ -------------------------------
+
+ function Declared_In_Expanded_Body
+ (N : Node_Id;
+ Typ : Entity_Id;
+ Nam : Entity_Id) return Boolean
+ is
+ pragma Assert (In_Expanded_Body (N));
+
+ Subp_Body : constant Node_Id := Parent (N);
+ Subp_Id : Entity_Id;
+ Scop : Entity_Id;
+
+ begin
+ if Acts_As_Spec (Subp_Body) then
+ Subp_Id := Unique_Defining_Entity (Specification (Subp_Body));
+ else
+ Subp_Id := Corresponding_Spec (Subp_Body);
+ end if;
+
+ if Present (Typ) then
+ Scop := Scope (Typ);
+ elsif Present (Nam) then
+ Scop := Scope (Nam);
+ else
+ Scop := Standard_Standard;
+ end if;
+
+ while Scop /= Standard_Standard
+ and then not Is_Subprogram (Scop)
+ loop
+ Scop := Scope (Scop);
+ end loop;
+
+ return Scop = Subp_Id;
+ end Declared_In_Expanded_Body;
+
-----------------------------------------
-- Find_Aggregate_Component_Desig_Type --
-----------------------------------------
@@ -8113,11 +8091,13 @@ package body Freeze is
if Nkind (P) /= N_Subprogram_Body then
return False;
- -- AI12-0157: An expression function that is a completion is a freeze
- -- point. If the body is the result of expansion, it is not.
+ -- Treat the generated body of an expression function like other
+ -- bodies generated during expansion (e.g. stream subprograms) so
+ -- that those bodies are not treated as freezing points.
elsif Was_Expression_Function (P) then
- return not Comes_From_Source (P);
+ pragma Assert (not Comes_From_Source (P));
+ return True;
-- This is the body of a generated predicate function
@@ -8185,14 +8165,6 @@ package body Freeze is
Allocator_Typ : Entity_Id := Empty;
- Freeze_Outside : Boolean := False;
- -- This flag is set true if the entity must be frozen outside the
- -- current subprogram. This happens in the case of expander generated
- -- subprograms (_Init_Proc, _Input, _Output, _Read, _Write) which do
- -- not freeze all entities like other bodies, but which nevertheless
- -- may reference entities that have to be frozen before the body and
- -- obviously cannot be frozen inside the body.
-
Freeze_Outside_Subp : Entity_Id := Empty;
-- This entity is set if we are inside a subprogram body and the frozen
-- entity is defined in the enclosing scope of this subprogram. In such
@@ -8537,79 +8509,11 @@ package body Freeze is
-- An exception occurs when the sequence of statements is
-- for an expander generated body that did not do the usual
-- freeze all operation. In this case we usually want to
- -- freeze outside this body, not inside it, and we skip
- -- past the subprogram body that we are inside.
+ -- freeze outside this body, not inside it, unless the
+ -- entity is declared inside this expander generated body.
- if In_Expanded_Body (Parent_P) then
- declare
- Subp_Body : constant Node_Id := Parent (Parent_P);
- Spec_Id : Entity_Id;
-
- begin
- -- Freeze the entity only when it is declared inside
- -- the body of the expander generated procedure. This
- -- case is recognized by the subprogram scope of the
- -- entity or its type, which is either the spec of an
- -- enclosing body, or (in the case of init_procs for
- -- which there is no separate spec) the current scope.
-
- if Nkind (Subp_Body) = N_Subprogram_Body then
- declare
- S : Entity_Id;
-
- begin
- Spec_Id := Corresponding_Spec (Subp_Body);
-
- if Present (Typ) then
- S := Scope (Typ);
- elsif Present (Nam) then
- S := Scope (Nam);
- else
- S := Standard_Standard;
- end if;
-
- while S /= Standard_Standard
- and then not Is_Subprogram (S)
- loop
- S := Scope (S);
- end loop;
-
- if S = Spec_Id then
- exit;
-
- elsif Present (Typ)
- and then Scope (Typ) = Current_Scope
- and then
- Defining_Entity (Subp_Body) = Current_Scope
- then
- exit;
- end if;
- end;
- end if;
-
- -- If the entity is not frozen by an expression
- -- function that is not a completion, continue
- -- climbing the tree.
-
- if Nkind (Subp_Body) = N_Subprogram_Body
- and then Was_Expression_Function (Subp_Body)
- then
- null;
-
- -- Freeze outside the body
-
- else
- Parent_P := Parent (Parent_P);
- Freeze_Outside := True;
- end if;
- end;
-
- -- Here if normal case where we are in handled statement
- -- sequence and want to do the insertion right there.
-
- else
- exit;
- end if;
+ exit when not In_Expanded_Body (Parent_P)
+ or else Declared_In_Expanded_Body (Parent_P, Typ, Nam);
-- If parent is a body or a spec or a block, then the current
-- node is a statement or declaration and we can insert the
@@ -8645,7 +8549,37 @@ package body Freeze is
| N_Selective_Accept
| N_Triggering_Alternative
=>
- exit when Is_List_Member (P);
+ if No (Current_Subprogram) then
+ exit when Is_List_Member (P);
+
+ -- Check exceptional case documented above for an enclosing
+ -- handled sequence of statements.
+
+ else
+ declare
+ Par : Node_Id := Parent (Parent_P);
+
+ begin
+ while Present (Par)
+ and then
+ Nkind (Par) /= N_Handled_Sequence_Of_Statements
+ and then Nkind (Parent (Par)) /= N_Subprogram_Body
+ loop
+ Par := Parent (Par);
+ end loop;
+
+ -- If we don't have a parent, then we are not in a
+ -- well-formed tree and we ignore the freeze request.
+ -- See previous comment in the enclosing loop.
+
+ if No (Par) then
+ return;
+ end if;
+
+ exit when not In_Expanded_Body (Par)
+ or else Declared_In_Expanded_Body (Par, Typ, Nam);
+ end;
+ end if;
-- The freeze nodes produced by an expression coming from the
-- Actions list of an N_Expression_With_Actions, short-circuit
@@ -8735,7 +8669,6 @@ package body Freeze is
-- placing them at the proper place, after the generic unit.
if (In_Spec_Exp and not Inside_A_Generic)
- or else Freeze_Outside
or else (Is_Type (Current_Scope)
and then (not Is_Concurrent_Type (Current_Scope)
or else not Has_Completion (Current_Scope)))
diff --git a/gcc/ada/gcc-interface/decl.cc b/gcc/ada/gcc-interface/decl.cc
index c446b14..d2456bf 100644
--- a/gcc/ada/gcc-interface/decl.cc
+++ b/gcc/ada/gcc-interface/decl.cc
@@ -8897,6 +8897,7 @@ annotate_value (tree gnu_size)
return No_Uint;
break;
+ case PARM_DECL:
case VAR_DECL:
tcode = Dynamic_Val;
ops[0] = UI_From_Int (++var_count);
diff --git a/gcc/ada/gcc-interface/gigi.h b/gcc/ada/gcc-interface/gigi.h
index eb5496f..63ccf31 100644
--- a/gcc/ada/gcc-interface/gigi.h
+++ b/gcc/ada/gcc-interface/gigi.h
@@ -350,7 +350,7 @@ struct attrib
};
/* Table of machine-independent internal attributes. */
-extern const struct attribute_spec gnat_internal_attribute_table[];
+extern const struct scoped_attribute_specs gnat_internal_attribute_table;
/* Define the entries in the standard data array. */
enum standard_datatypes
diff --git a/gcc/ada/gcc-interface/misc.cc b/gcc/ada/gcc-interface/misc.cc
index 7d6d446..01e8267 100644
--- a/gcc/ada/gcc-interface/misc.cc
+++ b/gcc/ada/gcc-interface/misc.cc
@@ -1352,6 +1352,11 @@ get_lang_specific (tree node)
return TYPE_LANG_SPECIFIC (node);
}
+const struct scoped_attribute_specs *const gnat_attribute_table[] =
+{
+ &gnat_internal_attribute_table
+};
+
/* Definitions for our language-specific hooks. */
#undef LANG_HOOKS_NAME
@@ -1417,7 +1422,7 @@ get_lang_specific (tree node)
#undef LANG_HOOKS_GET_FIXED_POINT_TYPE_INFO
#define LANG_HOOKS_GET_FIXED_POINT_TYPE_INFO gnat_get_fixed_point_type_info
#undef LANG_HOOKS_ATTRIBUTE_TABLE
-#define LANG_HOOKS_ATTRIBUTE_TABLE gnat_internal_attribute_table
+#define LANG_HOOKS_ATTRIBUTE_TABLE gnat_attribute_table
#undef LANG_HOOKS_BUILTIN_FUNCTION
#define LANG_HOOKS_BUILTIN_FUNCTION gnat_builtin_function
#undef LANG_HOOKS_INIT_TS
diff --git a/gcc/ada/gcc-interface/trans.cc b/gcc/ada/gcc-interface/trans.cc
index 9c418be..5e9e92d 100644
--- a/gcc/ada/gcc-interface/trans.cc
+++ b/gcc/ada/gcc-interface/trans.cc
@@ -69,6 +69,21 @@
#include "ada-tree.h"
#include "gigi.h"
+/* The following #include is for strub_make_callable.
+
+ This function marks a function as safe to call from strub contexts. We mark
+ Ada subprograms that may be called implicitly by the compiler, and that won't
+ leave on the stack caller data passed to them. This stops implicit calls
+ introduced in subprograms that have their stack scrubbed from being flagged
+ as unsafe, even in -fstrub=strict mode.
+
+ These subprograms are also marked with the strub(callable) attribute in Ada
+ sources, but their declarations aren't necessarily imported by GNAT, or made
+ visible to gigi, in units that end up relying on them. So when gigi
+ introduces their declarations on its own, it must also add the attribute, by
+ calling strub_make_callable. */
+#include "ipa-strub.h"
+
/* We should avoid allocating more than ALLOCA_THRESHOLD bytes via alloca,
for fear of running out of stack space. If we need more, we use xmalloc
instead. */
@@ -454,6 +469,7 @@ gigi (Node_Id gnat_root,
int64_type, NULL_TREE),
NULL_TREE, is_default, true, true, true, false,
false, NULL, Empty);
+ strub_make_callable (mulv64_decl);
if (Enable_128bit_Types)
{
@@ -466,6 +482,7 @@ gigi (Node_Id gnat_root,
NULL_TREE),
NULL_TREE, is_default, true, true, true, false,
false, NULL, Empty);
+ strub_make_callable (mulv128_decl);
}
/* Name of the _Parent field in tagged record types. */
@@ -722,6 +739,7 @@ build_raise_check (int check, enum exception_info_kind kind)
= create_subprog_decl (get_identifier (Name_Buffer), NULL_TREE, ftype,
NULL_TREE, is_default, true, true, true, false,
false, NULL, Empty);
+ strub_make_callable (result);
set_call_expr_flags (result, ECF_NORETURN | ECF_XTHROW);
return result;
diff --git a/gcc/ada/gcc-interface/utils.cc b/gcc/ada/gcc-interface/utils.cc
index e7b5c77..33904d8 100644
--- a/gcc/ada/gcc-interface/utils.cc
+++ b/gcc/ada/gcc-interface/utils.cc
@@ -39,6 +39,7 @@
#include "varasm.h"
#include "toplev.h"
#include "opts.h"
+#include "ipa-strub.h"
#include "output.h"
#include "debug.h"
#include "convert.h"
@@ -136,7 +137,7 @@ static tree fake_attribute_handler (tree *, tree, tree, int, bool *);
/* Table of machine-independent internal attributes for Ada. We support
this minimal set of attributes to accommodate the needs of builtins. */
-const struct attribute_spec gnat_internal_attribute_table[] =
+static const attribute_spec gnat_internal_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -217,9 +218,11 @@ const struct attribute_spec gnat_internal_attribute_table[] =
/* This is handled entirely in the front end. */
{ "hardbool", 0, 0, false, true, false, true,
fake_attribute_handler, NULL },
+};
- { NULL, 0, 0, false, false, false, false,
- NULL, NULL }
+const scoped_attribute_specs gnat_internal_attribute_table =
+{
+ "gnu", { gnat_internal_attributes }
};
/* Associates a GNAT tree node to a GCC tree node. It is used in
@@ -6740,9 +6743,77 @@ handle_no_stack_protector_attribute (tree *node, tree name, tree, int,
struct attribute_spec.handler. */
static tree
-handle_strub_attribute (tree *, tree, tree, int, bool *no_add_attrs)
+handle_strub_attribute (tree *node, tree name,
+ tree args,
+ int ARG_UNUSED (flags), bool *no_add_attrs)
{
- *no_add_attrs = true;
+ bool enable = true;
+
+ if (args && FUNCTION_POINTER_TYPE_P (*node))
+ *node = TREE_TYPE (*node);
+
+ if (args && FUNC_OR_METHOD_TYPE_P (*node))
+ {
+ switch (strub_validate_fn_attr_parm (TREE_VALUE (args)))
+ {
+ case 1:
+ case 2:
+ enable = true;
+ break;
+
+ case 0:
+ warning (OPT_Wattributes,
+ "%qE attribute ignored because of argument %qE",
+ name, TREE_VALUE (args));
+ *no_add_attrs = true;
+ enable = false;
+ break;
+
+ case -1:
+ case -2:
+ enable = false;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ args = TREE_CHAIN (args);
+ }
+
+ if (args)
+ {
+ warning (OPT_Wattributes,
+ "ignoring attribute %qE because of excess arguments"
+ " starting at %qE",
+ name, TREE_VALUE (args));
+ *no_add_attrs = true;
+ enable = false;
+ }
+
+ /* Warn about unmet expectations that the strub attribute works like a
+ qualifier. ??? Could/should we extend it to the element/field types
+ here? */
+ if (TREE_CODE (*node) == ARRAY_TYPE
+ || VECTOR_TYPE_P (*node)
+ || TREE_CODE (*node) == COMPLEX_TYPE)
+ warning (OPT_Wattributes,
+ "attribute %qE does not apply to elements"
+ " of non-scalar type %qT",
+ name, *node);
+ else if (RECORD_OR_UNION_TYPE_P (*node))
+ warning (OPT_Wattributes,
+ "attribute %qE does not apply to fields"
+ " of aggregate type %qT",
+ name, *node);
+
+ /* If we see a strub-enabling attribute, and we're at the default setting,
+ implicitly or explicitly, note that the attribute was seen, so that we can
+ reduce the compile-time overhead to nearly zero when the strub feature is
+ not used. */
+ if (enable && flag_strub < -2)
+ flag_strub += 2;
+
return NULL_TREE;
}
diff --git a/gcc/ada/gcc-interface/utils2.cc b/gcc/ada/gcc-interface/utils2.cc
index 95bbce2..6a95109 100644
--- a/gcc/ada/gcc-interface/utils2.cc
+++ b/gcc/ada/gcc-interface/utils2.cc
@@ -2021,7 +2021,10 @@ build_simple_component_ref (tree record, tree field, bool no_fold)
/* The failure of this assertion will very likely come from a missing
insertion of an explicit dereference. */
- gcc_assert (RECORD_OR_UNION_TYPE_P (type) && COMPLETE_TYPE_P (type));
+ gcc_assert (RECORD_OR_UNION_TYPE_P (type));
+
+ /* The type must be frozen at this point. */
+ gcc_assert (COMPLETE_TYPE_P (type));
/* Try to fold a conversion from another record or union type unless the type
contains a placeholder as it might be needed for a later substitution. */
diff --git a/gcc/ada/gnat_rm.texi b/gcc/ada/gnat_rm.texi
index 52ddb27..a6a1a67 100644
--- a/gcc/ada/gnat_rm.texi
+++ b/gcc/ada/gnat_rm.texi
@@ -19,7 +19,7 @@
@copying
@quotation
-GNAT Reference Manual , Oct 26, 2023
+GNAT Reference Manual , Nov 24, 2023
AdaCore
@@ -21621,7 +21621,7 @@ library streams facility; where
All files are opened using @code{fopen}.
@item
-All input/output operations use @code{fread}/@cite{fwrite}.
+All input/output operations use @code{fread}/@code{fwrite}.
@end itemize
There is no internal buffering of any kind at the Ada library level. The only
@@ -21675,8 +21675,7 @@ The records of a Direct_IO file are simply written to the file in index
sequence, with the first record starting at offset zero, and subsequent
records following. There is no control information of any kind. For
example, if 32-bit integers are being written, each record takes
-4-bytes, so the record at index @code{K} starts at offset
-(@code{K}-1)*4.
+4-bytes, so the record at index @code{K} starts at offset @code{(K-1)*4}.
There is no limit on the size of Direct_IO files, they are expanded as
necessary to accommodate whatever records are written to the file.
diff --git a/gcc/ada/inline.adb b/gcc/ada/inline.adb
index 1fbbe6d..cc2bc3a 100644
--- a/gcc/ada/inline.adb
+++ b/gcc/ada/inline.adb
@@ -3726,7 +3726,7 @@ package body Inline is
Insert_After (Parent (Entity (N)), Blk);
-- If the context is an assignment, and the left-hand side is free of
- -- side-effects, the replacement is also safe.
+ -- side effects, the replacement is also safe.
elsif Nkind (Parent (N)) = N_Assignment_Statement
and then
diff --git a/gcc/ada/libgnarl/a-rttiev.ads b/gcc/ada/libgnarl/a-rttiev.ads
index cb272df..3fa7d60 100644
--- a/gcc/ada/libgnarl/a-rttiev.ads
+++ b/gcc/ada/libgnarl/a-rttiev.ads
@@ -61,6 +61,12 @@ package Ada.Real_Time.Timing_Events is
function Time_Of_Event (Event : Timing_Event) return Time;
+ -- The compilation closure of this version (as opposed to the hie version)
+ -- of Ada.Real_TIme.Timing_Events includes a specification of a
+ -- Concurrent Partition_Elaboration_Policy. Thus, a bind-time error
+ -- will result if this unit occurs in the same partition as a conflicting
+ -- Partition_Elaboration_Policy specification.
+
private
type Timing_Event is new Ada.Finalization.Limited_Controlled with record
diff --git a/gcc/ada/libgnat/i-cstrin.adb b/gcc/ada/libgnat/i-cstrin.adb
index afbac72..1eb2865 100644
--- a/gcc/ada/libgnat/i-cstrin.adb
+++ b/gcc/ada/libgnat/i-cstrin.adb
@@ -92,12 +92,10 @@ is
procedure Free (Item : in out chars_ptr) is
begin
- if Item = Null_Ptr then
- return;
+ if Item /= Null_Ptr then
+ Memory_Free (Item);
+ Item := Null_Ptr;
end if;
-
- Memory_Free (Item);
- Item := Null_Ptr;
end Free;
--------------------
@@ -187,6 +185,8 @@ is
function Position_Of_Nul (Into : char_array) return size_t is
begin
+ pragma Annotate (Gnatcheck, Exempt_On, "Improper_Returns",
+ "early returns for performance");
for J in Into'Range loop
if Into (J) = nul then
return J;
@@ -194,6 +194,8 @@ is
end loop;
return Into'Last + 1;
+
+ pragma Annotate (Gnatcheck, Exempt_Off, "Improper_Returns");
end Position_Of_Nul;
------------
@@ -226,6 +228,8 @@ is
Nul_Check : Boolean := False) return chars_ptr
is
begin
+ pragma Annotate (Gnatcheck, Exempt_On, "Improper_Returns",
+ "early returns for performance");
if Item = null then
return Null_Ptr;
elsif Nul_Check
@@ -235,6 +239,8 @@ is
else
return To_chars_ptr (Item (Item'First)'Address);
end if;
+
+ pragma Annotate (Gnatcheck, Exempt_Off, "Improper_Returns");
end To_Chars_Ptr;
------------
@@ -302,6 +308,8 @@ is
Length : size_t) return char_array
is
begin
+ pragma Annotate (Gnatcheck, Exempt_On, "Improper_Returns",
+ "early returns for performance");
if Item = Null_Ptr then
raise Dereference_Error;
end if;
@@ -328,6 +336,8 @@ is
return Result;
end;
+
+ pragma Annotate (Gnatcheck, Exempt_Off, "Improper_Returns");
end Value;
function Value (Item : chars_ptr) return String is
@@ -339,6 +349,8 @@ is
Result : char_array (0 .. Length);
begin
+ pragma Annotate (Gnatcheck, Exempt_On, "Improper_Returns",
+ "early returns for performance");
-- As per AI-00177, this is equivalent to:
-- To_Ada (Value (Item, Length) & nul);
@@ -357,6 +369,8 @@ is
Result (Length) := nul;
return To_Ada (Result);
+
+ pragma Annotate (Gnatcheck, Exempt_Off, "Improper_Returns");
end Value;
end Interfaces.C.Strings;
diff --git a/gcc/ada/libgnat/s-imgboo.adb b/gcc/ada/libgnat/s-imgboo.adb
index fb3301a..9a6340f 100644
--- a/gcc/ada/libgnat/s-imgboo.adb
+++ b/gcc/ada/libgnat/s-imgboo.adb
@@ -37,8 +37,6 @@ pragma Assertion_Policy (Ghost => Ignore,
Loop_Invariant => Ignore,
Assert => Ignore);
-with System.Val_Spec;
-
package body System.Img_Bool
with SPARK_Mode
is
diff --git a/gcc/ada/libgnat/s-imgboo.ads b/gcc/ada/libgnat/s-imgboo.ads
index d40c086..92cc7c3 100644
--- a/gcc/ada/libgnat/s-imgboo.ads
+++ b/gcc/ada/libgnat/s-imgboo.ads
@@ -42,7 +42,7 @@ pragma Assertion_Policy (Pre => Ignore,
Contract_Cases => Ignore,
Ghost => Ignore);
-with System.Val_Bool;
+with System.Val_Spec;
package System.Img_Bool
with SPARK_Mode, Preelaborate
@@ -56,8 +56,7 @@ is
Pre => S'First = 1
and then (if V then S'Length >= 4 else S'Length >= 5),
Post => (if V then P = 4 else P = 5)
- and then System.Val_Bool.Is_Boolean_Image_Ghost (S (1 .. P))
- and then System.Val_Bool.Value_Boolean (S (1 .. P)) = V;
+ and then System.Val_Spec.Is_Boolean_Image_Ghost (S (1 .. P), V);
-- Computes Boolean'Image (V) and stores the result in S (1 .. P)
-- setting the resulting value of P. The caller guarantees that S
-- is long enough to hold the result, and that S'First is 1.
diff --git a/gcc/ada/libgnat/s-valboo.ads b/gcc/ada/libgnat/s-valboo.ads
index d482199..6cdc3e5 100644
--- a/gcc/ada/libgnat/s-valboo.ads
+++ b/gcc/ada/libgnat/s-valboo.ads
@@ -47,40 +47,10 @@ package System.Val_Bool
is
pragma Preelaborate;
- function Is_Boolean_Image_Ghost (Str : String) return Boolean is
- (not System.Val_Spec.Only_Space_Ghost (Str, Str'First, Str'Last)
- and then
- (declare
- F : constant Positive := System.Val_Spec.First_Non_Space_Ghost
- (Str, Str'First, Str'Last);
- begin
- (F <= Str'Last - 3
- and then Str (F) in 't' | 'T'
- and then Str (F + 1) in 'r' | 'R'
- and then Str (F + 2) in 'u' | 'U'
- and then Str (F + 3) in 'e' | 'E'
- and then
- (if F + 3 < Str'Last then
- System.Val_Spec.Only_Space_Ghost (Str, F + 4, Str'Last)))
- or else
- (F <= Str'Last - 4
- and then Str (F) in 'f' | 'F'
- and then Str (F + 1) in 'a' | 'A'
- and then Str (F + 2) in 'l' | 'L'
- and then Str (F + 3) in 's' | 'S'
- and then Str (F + 4) in 'e' | 'E'
- and then
- (if F + 4 < Str'Last then
- System.Val_Spec.Only_Space_Ghost (Str, F + 5, Str'Last)))))
- with
- Ghost;
- -- Ghost function that returns True iff Str is the image of a boolean, that
- -- is "true" or "false" in any capitalization, possibly surounded by space
- -- characters.
-
function Value_Boolean (Str : String) return Boolean
with
- Pre => Is_Boolean_Image_Ghost (Str),
+ Pre => System.Val_Spec.Is_Boolean_Image_Ghost (Str, True)
+ or else System.Val_Spec.Is_Boolean_Image_Ghost (Str, False),
Post =>
Value_Boolean'Result =
(Str (System.Val_Spec.First_Non_Space_Ghost
diff --git a/gcc/ada/libgnat/s-valspe.ads b/gcc/ada/libgnat/s-valspe.ads
index dd861e5..6f0ca53 100644
--- a/gcc/ada/libgnat/s-valspe.ads
+++ b/gcc/ada/libgnat/s-valspe.ads
@@ -72,6 +72,42 @@ is
-- Ghost function that returns the index of the first non-space character
-- in S, which necessarily exists given the precondition on S.
+ function Is_Boolean_Image_Ghost
+ (Str : String;
+ Val : Boolean) return Boolean
+ is
+ (not Only_Space_Ghost (Str, Str'First, Str'Last)
+ and then
+ (declare
+ F : constant Positive := First_Non_Space_Ghost
+ (Str, Str'First, Str'Last);
+ begin
+ (Val
+ and then F <= Str'Last - 3
+ and then Str (F) in 't' | 'T'
+ and then Str (F + 1) in 'r' | 'R'
+ and then Str (F + 2) in 'u' | 'U'
+ and then Str (F + 3) in 'e' | 'E'
+ and then
+ (if F + 3 < Str'Last then
+ Only_Space_Ghost (Str, F + 4, Str'Last)))
+ or else
+ (not Val
+ and then F <= Str'Last - 4
+ and then Str (F) in 'f' | 'F'
+ and then Str (F + 1) in 'a' | 'A'
+ and then Str (F + 2) in 'l' | 'L'
+ and then Str (F + 3) in 's' | 'S'
+ and then Str (F + 4) in 'e' | 'E'
+ and then
+ (if F + 4 < Str'Last then
+ Only_Space_Ghost (Str, F + 5, Str'Last)))))
+ with
+ Ghost;
+ -- Ghost function that returns True iff Str is the image of boolean Val,
+ -- that is "true" or "false" in any capitalization, possibly surounded by
+ -- space characters.
+
function Only_Number_Ghost (Str : String; From, To : Integer) return Boolean
is
(for all J in From .. To => Str (J) in '0' .. '9' | '_')
diff --git a/gcc/ada/sem_aggr.adb b/gcc/ada/sem_aggr.adb
index bc03a07..e1e7b8b 100644
--- a/gcc/ada/sem_aggr.adb
+++ b/gcc/ada/sem_aggr.adb
@@ -4623,14 +4623,6 @@ package body Sem_Aggr is
-- either New_Assoc_List, or the association being built for an inner
-- aggregate.
- procedure Add_Discriminant_Values
- (New_Aggr : Node_Id;
- Assoc_List : List_Id);
- -- The constraint to a component may be given by a discriminant of the
- -- enclosing type, in which case we have to retrieve its value, which is
- -- part of the enclosing aggregate. Assoc_List provides the discriminant
- -- associations of the current type or of some enclosing record.
-
function Discriminant_Present (Input_Discr : Entity_Id) return Boolean;
-- If aggregate N is a regular aggregate this routine will return True.
-- Otherwise, if N is an extension aggregate, then Input_Discr denotes
@@ -4673,13 +4665,6 @@ package body Sem_Aggr is
-- An error message is emitted if the components taking their value from
-- the others choice do not have same type.
- procedure Propagate_Discriminants
- (Aggr : Node_Id;
- Assoc_List : List_Id);
- -- Nested components may themselves be discriminated types constrained
- -- by outer discriminants, whose values must be captured before the
- -- aggregate is expanded into assignments.
-
procedure Resolve_Aggr_Expr (Expr : Node_Id; Component : Entity_Id);
-- Analyzes and resolves expression Expr against the Etype of the
-- Component. This routine also applies all appropriate checks to Expr.
@@ -4736,73 +4721,6 @@ package body Sem_Aggr is
end if;
end Add_Association;
- -----------------------------
- -- Add_Discriminant_Values --
- -----------------------------
-
- procedure Add_Discriminant_Values
- (New_Aggr : Node_Id;
- Assoc_List : List_Id)
- is
- Assoc : Node_Id;
- Discr : Entity_Id;
- Discr_Elmt : Elmt_Id;
- Discr_Val : Node_Id;
- Val : Entity_Id;
-
- begin
- Discr := First_Discriminant (Etype (New_Aggr));
- Discr_Elmt := First_Elmt (Discriminant_Constraint (Etype (New_Aggr)));
- while Present (Discr_Elmt) loop
- Discr_Val := Node (Discr_Elmt);
-
- -- If the constraint is given by a discriminant then it is a
- -- discriminant of an enclosing record, and its value has already
- -- been placed in the association list.
-
- if Is_Entity_Name (Discr_Val)
- and then Ekind (Entity (Discr_Val)) = E_Discriminant
- then
- Val := Entity (Discr_Val);
-
- Assoc := First (Assoc_List);
- while Present (Assoc) loop
- if Present (Entity (First (Choices (Assoc))))
- and then Entity (First (Choices (Assoc))) = Val
- then
- Discr_Val := Expression (Assoc);
- exit;
- end if;
-
- Next (Assoc);
- end loop;
- end if;
-
- Add_Association
- (Discr, New_Copy_Tree (Discr_Val),
- Component_Associations (New_Aggr));
-
- -- If the discriminant constraint is a current instance, mark the
- -- current aggregate so that the self-reference can be expanded by
- -- Build_Record_Aggr_Code.Replace_Type later.
-
- if Nkind (Discr_Val) = N_Attribute_Reference
- and then Is_Entity_Name (Prefix (Discr_Val))
- and then Is_Type (Entity (Prefix (Discr_Val)))
- and then
- Is_Ancestor
- (Entity (Prefix (Discr_Val)),
- Etype (N),
- Use_Full_View => True)
- then
- Set_Has_Self_Reference (N);
- end if;
-
- Next_Elmt (Discr_Elmt);
- Next_Discriminant (Discr);
- end loop;
- end Add_Discriminant_Values;
-
--------------------------
-- Discriminant_Present --
--------------------------
@@ -5126,99 +5044,6 @@ package body Sem_Aggr is
return Expr;
end Get_Value;
- -----------------------------
- -- Propagate_Discriminants --
- -----------------------------
-
- procedure Propagate_Discriminants
- (Aggr : Node_Id;
- Assoc_List : List_Id)
- is
- Loc : constant Source_Ptr := Sloc (N);
-
- procedure Process_Component (Comp : Entity_Id);
- -- Add one component with a box association to the inner aggregate,
- -- and recurse if component is itself composite.
-
- -----------------------
- -- Process_Component --
- -----------------------
-
- procedure Process_Component (Comp : Entity_Id) is
- T : constant Entity_Id := Etype (Comp);
- New_Aggr : Node_Id;
-
- begin
- if Is_Record_Type (T) and then Has_Discriminants (T) then
- New_Aggr := Make_Aggregate (Loc, No_List, New_List);
- Set_Etype (New_Aggr, T);
-
- Add_Association
- (Comp, New_Aggr, Component_Associations (Aggr));
-
- -- Collect discriminant values and recurse
-
- Add_Discriminant_Values (New_Aggr, Assoc_List);
- Propagate_Discriminants (New_Aggr, Assoc_List);
-
- Build_Constrained_Itype
- (New_Aggr, T, Component_Associations (New_Aggr));
- else
- Add_Association
- (Comp, Empty, Component_Associations (Aggr),
- Is_Box_Present => True);
- end if;
- end Process_Component;
-
- -- Local variables
-
- Aggr_Type : constant Entity_Id := Base_Type (Etype (Aggr));
- Components : constant Elist_Id := New_Elmt_List;
- Def_Node : constant Node_Id :=
- Type_Definition (Declaration_Node (Aggr_Type));
-
- Comp : Node_Id;
- Comp_Elmt : Elmt_Id;
- Errors : Boolean;
-
- -- Start of processing for Propagate_Discriminants
-
- begin
- -- The component type may be a variant type. Collect the components
- -- that are ruled by the known values of the discriminants. Their
- -- values have already been inserted into the component list of the
- -- current aggregate.
-
- if Nkind (Def_Node) = N_Record_Definition
- and then Present (Component_List (Def_Node))
- and then Present (Variant_Part (Component_List (Def_Node)))
- then
- Gather_Components (Aggr_Type,
- Component_List (Def_Node),
- Governed_By => Component_Associations (Aggr),
- Into => Components,
- Report_Errors => Errors);
-
- Comp_Elmt := First_Elmt (Components);
- while Present (Comp_Elmt) loop
- if Ekind (Node (Comp_Elmt)) /= E_Discriminant then
- Process_Component (Node (Comp_Elmt));
- end if;
-
- Next_Elmt (Comp_Elmt);
- end loop;
-
- -- No variant part, iterate over all components
-
- else
- Comp := First_Component (Etype (Aggr));
- while Present (Comp) loop
- Process_Component (Comp);
- Next_Component (Comp);
- end loop;
- end if;
- end Propagate_Discriminants;
-
-----------------------
-- Resolve_Aggr_Expr --
-----------------------
@@ -6074,107 +5899,16 @@ package body Sem_Aggr is
Assoc_List => New_Assoc_List);
Set_Has_Self_Reference (N);
- elsif Needs_Simple_Initialization (Ctyp) then
+ elsif Needs_Simple_Initialization (Ctyp)
+ or else Has_Non_Null_Base_Init_Proc (Ctyp)
+ or else not Expander_Active
+ then
Add_Association
(Component => Component,
Expr => Empty,
Assoc_List => New_Assoc_List,
Is_Box_Present => True);
- elsif Has_Non_Null_Base_Init_Proc (Ctyp)
- or else not Expander_Active
- then
- if Is_Record_Type (Ctyp)
- and then Has_Discriminants (Ctyp)
- and then not Is_Private_Type (Ctyp)
- then
- -- We build a partially initialized aggregate with the
- -- values of the discriminants and box initialization
- -- for the rest, if other components are present.
-
- -- The type of the aggregate is the known subtype of
- -- the component. The capture of discriminants must be
- -- recursive because subcomponents may be constrained
- -- (transitively) by discriminants of enclosing types.
- -- For a private type with discriminants, a call to the
- -- initialization procedure will be generated, and no
- -- subaggregate is needed.
-
- Capture_Discriminants : declare
- Loc : constant Source_Ptr := Sloc (N);
- Expr : Node_Id;
-
- begin
- Expr := Make_Aggregate (Loc, No_List, New_List);
- Set_Etype (Expr, Ctyp);
-
- -- If the enclosing type has discriminants, they have
- -- been collected in the aggregate earlier, and they
- -- may appear as constraints of subcomponents.
-
- -- Similarly if this component has discriminants, they
- -- might in turn be propagated to their components.
-
- if Has_Discriminants (Typ) then
- Add_Discriminant_Values (Expr, New_Assoc_List);
- Propagate_Discriminants (Expr, New_Assoc_List);
-
- elsif Has_Discriminants (Ctyp) then
- Add_Discriminant_Values
- (Expr, Component_Associations (Expr));
- Propagate_Discriminants
- (Expr, Component_Associations (Expr));
-
- Build_Constrained_Itype
- (Expr, Ctyp, Component_Associations (Expr));
-
- else
- declare
- Comp : Entity_Id;
-
- begin
- -- If the type has additional components, create
- -- an OTHERS box association for them.
-
- Comp := First_Component (Ctyp);
- while Present (Comp) loop
- if Ekind (Comp) = E_Component then
- if not Is_Record_Type (Etype (Comp)) then
- Append_To
- (Component_Associations (Expr),
- Make_Component_Association (Loc,
- Choices =>
- New_List (
- Make_Others_Choice (Loc)),
- Expression => Empty,
- Box_Present => True));
- end if;
-
- exit;
- end if;
-
- Next_Component (Comp);
- end loop;
- end;
- end if;
-
- Add_Association
- (Component => Component,
- Expr => Expr,
- Assoc_List => New_Assoc_List);
- end Capture_Discriminants;
-
- -- Otherwise the component type is not a record, or it has
- -- not discriminants, or it is private.
-
- else
- Add_Association
- (Component => Component,
- Expr => Empty,
- Assoc_List => New_Assoc_List,
- Is_Box_Present => True);
- end if;
-
-- Otherwise we only need to resolve the expression if the
-- component has partially initialized values (required to
-- expand the corresponding assignments and run-time checks).
diff --git a/gcc/ada/sem_attr.adb b/gcc/ada/sem_attr.adb
index 000253e..a194360 100644
--- a/gcc/ada/sem_attr.adb
+++ b/gcc/ada/sem_attr.adb
@@ -8693,6 +8693,26 @@ package body Sem_Attr is
Set_Raises_Constraint_Error (N);
end if;
+ -- RM 13.14(8/4): a nonstatic expression in a spec expression does
+ -- not cause freezing, so the representation attributes cannot be
+ -- evaluated at this point if the type is not already frozen.
+
+ if not Static
+ and then In_Spec_Expression
+ and then Id in Attribute_Alignment
+ | Attribute_Component_Size
+ | Attribute_Max_Alignment_For_Allocation
+ | Attribute_Max_Size_In_Storage_Elements
+ | Attribute_Object_Size
+ | Attribute_Size
+ | Attribute_Small
+ | Attribute_VADS_Size
+ | Attribute_Value_Size
+ and then not Is_Frozen (P_Type)
+ then
+ return;
+ end if;
+
-- Array case. We enforce the constrained requirement of (RM 4.9(7-8))
-- since we can't do anything with unconstrained arrays. In addition,
-- only the First, Last and Length attributes are possibly static.
diff --git a/gcc/ada/sem_ch12.adb b/gcc/ada/sem_ch12.adb
index 7c645c4..5db9754 100644
--- a/gcc/ada/sem_ch12.adb
+++ b/gcc/ada/sem_ch12.adb
@@ -11526,19 +11526,6 @@ package body Sem_Ch12 is
Actual);
end if;
- -- Check actual/formal compatibility with respect to the four
- -- volatility refinement aspects.
-
- declare
- Actual_Obj : constant Entity_Id :=
- Get_Enclosing_Deep_Object (Actual);
- begin
- Check_Volatility_Compatibility
- (Actual_Obj, A_Gen_Obj, "actual object",
- "its corresponding formal object of mode in out",
- Srcpos_Bearer => Actual);
- end;
-
-- The actual for a ghost generic formal IN OUT parameter should be a
-- ghost object (SPARK RM 6.9(14)).
@@ -11746,22 +11733,6 @@ package body Sem_Ch12 is
("actual must exclude null to match generic formal#", Actual);
end if;
- -- An effectively volatile object cannot be used as an actual in a
- -- generic instantiation (SPARK RM 7.1.3(7)). The following check is
- -- relevant only when SPARK_Mode is on as it is not a standard Ada
- -- legality rule, and also verifies that the actual is an object.
-
- if SPARK_Mode = On
- and then Present (Actual)
- and then Is_Object_Reference (Actual)
- and then Is_Effectively_Volatile_Object (Actual)
- and then not Is_Effectively_Volatile (A_Gen_Obj)
- then
- Error_Msg_N
- ("volatile object cannot act as actual in generic instantiation",
- Actual);
- end if;
-
return List;
end Instantiate_Object;
@@ -12944,14 +12915,6 @@ package body Sem_Ch12 is
("actual for& must have Independent_Components specified",
Actual, A_Gen_T);
end if;
-
- -- Check actual/formal compatibility with respect to the four
- -- volatility refinement aspects.
-
- Check_Volatility_Compatibility
- (Act_T, A_Gen_T,
- "actual type", "its corresponding formal type",
- Srcpos_Bearer => Actual);
end if;
end Check_Shared_Variable_Control_Aspects;
@@ -14001,9 +13964,10 @@ package body Sem_Ch12 is
and then (Ekind (Base_Type (Etype (Actual_Discr)))) =
E_Anonymous_Access_Type
and then
- Get_Instance_Of
- (Designated_Type (Base_Type (Formal_Subt))) =
- Designated_Type (Base_Type (Etype (Actual_Discr)))
+ Subtypes_Match
+ (Get_Instance_Of
+ (Designated_Type (Base_Type (Formal_Subt))),
+ Designated_Type (Base_Type (Etype (Actual_Discr))))
then
null;
@@ -17322,8 +17286,14 @@ package body Sem_Ch12 is
and then (Ekind (Base_Type (Etype (Actual_Discr)))) =
E_Anonymous_Access_Type
and then
- Designated_Type (Base_Type (Formal_Subt)) =
- Designated_Type (Base_Type (Etype (Actual_Discr)))
+ Base_Type
+ (Designated_Type (Base_Type (Formal_Subt))) =
+ Base_Type
+ (Designated_Type (Base_Type (Etype (Actual_Discr))))
+ and then
+ Subtypes_Statically_Match
+ (Designated_Type (Base_Type (Formal_Subt)),
+ Designated_Type (Base_Type (Etype (Actual_Discr))))
then
null;
diff --git a/gcc/ada/sem_ch13.adb b/gcc/ada/sem_ch13.adb
index 302fab7..8f6fa3a 100644
--- a/gcc/ada/sem_ch13.adb
+++ b/gcc/ada/sem_ch13.adb
@@ -10794,7 +10794,7 @@ package body Sem_Ch13 is
Set_Analyzed (FBody);
end if;
- -- Static predicate functions are always side-effect free, and
+ -- Static predicate functions are always side-effect-free, and
-- in most cases dynamic predicate functions are as well. Mark
-- them as such whenever possible, so redundant predicate checks
-- can be optimized. If there is a variable reference within the
@@ -16141,7 +16141,7 @@ package body Sem_Ch13 is
function Extract_Entity (Expr : Node_Id) return Entity_Id;
-- Given an element of a Stable_Properties aspect spec, return the
-- associated entity.
- -- This function updates the Negated flag as a side-effect.
+ -- This function updates the Negated flag as a side effect.
--------------------
-- Extract_Entity --
diff --git a/gcc/ada/sem_ch3.adb b/gcc/ada/sem_ch3.adb
index ca60850..96fd16d 100644
--- a/gcc/ada/sem_ch3.adb
+++ b/gcc/ada/sem_ch3.adb
@@ -1442,26 +1442,6 @@ package body Sem_Ch3 is
end if;
Set_Etype (T, T);
-
- -- For SPARK, check that the designated type is compatible with
- -- respect to volatility with the access type.
-
- if SPARK_Mode /= Off
- and then Comes_From_Source (T)
- then
- -- ??? UNIMPLEMENTED
- -- In the case where the designated type is incomplete at this
- -- point, performing this check here is harmless but the check
- -- will need to be repeated when the designated type is complete.
-
- -- The preceding call to Comes_From_Source is needed because the
- -- FE sometimes introduces implicitly declared access types. See,
- -- for example, the expansion of nested_po.ads in OA28-015.
-
- Check_Volatility_Compatibility
- (Full_Desig, T, "designated type", "access type",
- Srcpos_Bearer => T);
- end if;
end if;
-- If the type has appeared already in a with_type clause, it is frozen
@@ -4453,7 +4433,8 @@ package body Sem_Ch3 is
-- If not a deferred constant, then the object declaration freezes
-- its type, unless the object is of an anonymous type and has delayed
- -- aspects. In that case the type is frozen when the object itself is.
+ -- aspects (in that case the type is frozen when the object itself is)
+ -- or the context is a spec expression.
else
Check_Fully_Declared (T, N);
@@ -4463,7 +4444,7 @@ package body Sem_Ch3 is
and then Is_Itype (T)
then
Set_Has_Delayed_Freeze (T);
- else
+ elsif not In_Spec_Expression then
Freeze_Before (N, T);
end if;
end if;
@@ -17336,29 +17317,6 @@ package body Sem_Ch3 is
begin
Parent_Type := Find_Type_Of_Subtype_Indic (Indic);
- if SPARK_Mode = On
- and then Is_Tagged_Type (Parent_Type)
- then
- declare
- Partial_View : constant Entity_Id :=
- Incomplete_Or_Partial_View (Parent_Type);
-
- begin
- -- If the partial view was not found then the parent type is not
- -- a private type. Otherwise check if the partial view is a tagged
- -- private type.
-
- if Present (Partial_View)
- and then Is_Private_Type (Partial_View)
- and then not Is_Tagged_Type (Partial_View)
- then
- Error_Msg_NE
- ("cannot derive from & declared as untagged private "
- & "(SPARK RM 3.4(1))", N, Partial_View);
- end if;
- end;
- end if;
-
-- Ada 2005 (AI-251): In case of interface derivation check that the
-- parent is also an interface.
@@ -21013,19 +20971,6 @@ package body Sem_Ch3 is
end if;
end if;
- -- A discriminant cannot be effectively volatile (SPARK RM 7.1.3(4)).
- -- This check is relevant only when SPARK_Mode is on as it is not a
- -- standard Ada legality rule. The only way for a discriminant to be
- -- effectively volatile is to have an effectively volatile type, so
- -- we check this directly, because the Ekind of Discr might not be
- -- set yet (to help preventing cascaded errors on derived types).
-
- if SPARK_Mode = On
- and then Is_Effectively_Volatile (Discr_Type)
- then
- Error_Msg_N ("discriminant cannot be volatile", Discr);
- end if;
-
Next (Discr);
end loop;
diff --git a/gcc/ada/sem_ch4.adb b/gcc/ada/sem_ch4.adb
index 83705b9..d506944 100644
--- a/gcc/ada/sem_ch4.adb
+++ b/gcc/ada/sem_ch4.adb
@@ -8473,9 +8473,21 @@ package body Sem_Ch4 is
-- resolution does not depend on the type of the parameter that
-- includes the indexing operation.
- elsif Nkind (Parent (Par)) in N_Subprogram_Call
- and then Is_Entity_Name (Name (Parent (Par)))
- then
+ elsif Nkind (Parent (Par)) in N_Subprogram_Call then
+
+ if not Is_Entity_Name (Name (Parent (Par))) then
+
+ -- ??? We don't know what to do with an N_Selected_Component
+ -- node for a prefixed-notation call to AA.BB where AA's
+ -- type is known, but BB has not yet been resolved. In that
+ -- case, the preceding Is_Entity_Name call returns False.
+ -- Incorrectly returning False here will usually work
+ -- better than incorrectly returning True, so that's what
+ -- we do for now.
+
+ return False;
+ end if;
+
declare
Proc : Entity_Id;
diff --git a/gcc/ada/sem_ch5.adb b/gcc/ada/sem_ch5.adb
index de38ddf..43dee2b 100644
--- a/gcc/ada/sem_ch5.adb
+++ b/gcc/ada/sem_ch5.adb
@@ -3452,14 +3452,6 @@ package body Sem_Ch5 is
if Present (Iterator_Filter (N)) then
Preanalyze_And_Resolve (Iterator_Filter (N), Standard_Boolean);
end if;
-
- -- A loop parameter cannot be effectively volatile (SPARK RM 7.1.3(4)).
- -- This check is relevant only when SPARK_Mode is on as it is not a
- -- standard Ada legality check.
-
- if SPARK_Mode = On and then Is_Effectively_Volatile (Id) then
- Error_Msg_N ("loop parameter cannot be volatile", Id);
- end if;
end Analyze_Loop_Parameter_Specification;
----------------------------
diff --git a/gcc/ada/sem_ch6.adb b/gcc/ada/sem_ch6.adb
index 4f2521a..da6f6c4 100644
--- a/gcc/ada/sem_ch6.adb
+++ b/gcc/ada/sem_ch6.adb
@@ -1753,11 +1753,11 @@ package body Sem_Ch6 is
and then Ekind (Entity (Selector_Name (P)))
in E_Entry | E_Function | E_Procedure
then
- -- When front-end inlining is enabled, as with SPARK_Mode, a call
+ -- When front-end inlining is enabled, as with GNATprove mode, a call
-- in prefix notation may still be missing its controlling argument,
-- so perform the transformation now.
- if SPARK_Mode = On and then In_Inlined_Body then
+ if GNATprove_Mode and then In_Inlined_Body then
declare
Subp : constant Entity_Id := Entity (Selector_Name (P));
Typ : constant Entity_Id := Etype (Prefix (P));
@@ -4612,7 +4612,7 @@ package body Sem_Ch6 is
Analyze_SPARK_Subprogram_Specification (Specification (N));
- -- A function with side-effects shall not be an expression function
+ -- A function with side effects shall not be an expression function
-- (SPARK RM 6.1.11(6)).
if Present (Spec_Id)
@@ -5240,7 +5240,7 @@ package body Sem_Ch6 is
Analyze_Aspect_Specifications (N, Designator);
-- The legality of a function specification in SPARK depends on whether
- -- the function is a function with or without side-effects. Analyze the
+ -- the function is a function with or without side effects. Analyze the
-- pragma in advance if present, before specific SPARK legality checks.
Analyze_Pragmas_If_Present (N, Pragma_SPARK_Mode);
diff --git a/gcc/ada/sem_ch8.adb b/gcc/ada/sem_ch8.adb
index 88be8ae..c5bf086 100644
--- a/gcc/ada/sem_ch8.adb
+++ b/gcc/ada/sem_ch8.adb
@@ -2691,7 +2691,7 @@ package body Sem_Ch8 is
-- Each attempt to find a suitable primitive of a particular
-- type operates on its own copy of the original renaming.
-- As a result the original renaming is kept decoration and
- -- side-effect free.
+ -- side-effect-free.
-- Inherit the overloaded status of the renamed subprogram name
@@ -6473,6 +6473,344 @@ package body Sem_Ch8 is
then
Collect_Interps (N);
+ -- Background: for an instance of a generic, expansion sets
+ -- entity fields on names that refer to things declared
+ -- outside of the instance, but leaves the entity field
+ -- unset on names that should end up referring to things
+ -- declared within the instance. These will instead be set by
+ -- analysis - the idea is that if a name resolves a certain
+ -- way in the generic, then we should get corresponding results
+ -- if we resolve the corresponding name in an instance. For this
+ -- to work, we have to prevent unrelated declarations that
+ -- happen to be visible at the point of the instantiation from
+ -- participating in resolution and causing problems (typically
+ -- ambiguities, but incorrect resolutions are also probably
+ -- possible). So here we filter out such unwanted interpretations.
+ --
+ -- Note that there are other problems with this approach to
+ -- implementing generic instances that are not addressed here.
+ -- Inside a generic, we might have no trouble resolving a call
+ -- where the two candidates are a function that returns a
+ -- formal type and a function that returns Standard.Integer.
+ -- If we instantiate that generic and the corresponding actual
+ -- type is Standard.Integer, then we may incorrectly reject the
+ -- corresponding call in the instance as ambiguous (or worse,
+ -- we may quietly choose the wrong resolution).
+ --
+ -- Another such problem can occur with a type derived from a
+ -- formal derived type. In an instance, such a type may have
+ -- inherited subprograms that are not present in the generic.
+ -- These can then interfere with name resolution (e.g., if
+ -- some declaration is visible via a use-clause in the generic
+ -- and some name in the generic refers to it, then the
+ -- corresponding declaration in an instance may be hidden by
+ -- a directly visible inherited subprogram and the corresponding
+ -- name in the instance may then incorrectly refer to the
+ -- inherited subprogram).
+
+ if In_Instance then
+ declare
+ function Is_Actual_Subp_Of_Inst
+ (E : Entity_Id; Inst : Entity_Id) return Boolean;
+ -- Return True if E is an actual parameter
+ -- corresponding to a formal subprogram of the
+ -- instantiation Inst.
+
+ function Is_Extraneously_Visible
+ (E : Entity_Id; Inst : Entity_Id) return Boolean;
+ -- Return True if E is an interpretation that should
+ -- be filtered out. That is, if E is an "unwanted"
+ -- resolution candidate as described in the
+ -- preceding "Background:" commment.
+
+ function Is_Generic_Actual_Subp_Name
+ (N : Node_Id) return Boolean;
+ -- Return True if N is the name of a subprogram
+ -- renaming generated for a generic actual.
+
+ ----------------------------
+ -- Is_Actual_Subp_Of_Inst --
+ ----------------------------
+
+ function Is_Actual_Subp_Of_Inst
+ (E : Entity_Id; Inst : Entity_Id) return Boolean
+ is
+ Decl : Node_Id;
+ Generic_From_E, Generic_From_Inst : Entity_Id;
+ begin
+ -- ???
+ -- Why is Is_Generic_Actual_Subprogram undefined
+ -- in the E_Operator case?
+
+ if Ekind (E) not in E_Function | E_Procedure
+ or else not Is_Generic_Actual_Subprogram (E)
+ then
+ return False;
+ end if;
+
+ Decl := Enclosing_Declaration (E);
+
+ -- Look for the suprogram renaming declaration built
+ -- for a generic actual subprogram. Unclear why
+ -- Original_Node call is needed, but sometimes it is.
+
+ if Decl not in N_Subprogram_Renaming_Declaration_Id then
+ Decl := Original_Node (Decl);
+ end if;
+
+ if Decl in N_Subprogram_Renaming_Declaration_Id then
+ Generic_From_E :=
+ Scope (Corresponding_Formal_Spec (Decl));
+ else
+ -- ??? In the case of a generic formal subprogram
+ -- which has a pre/post condition, it is unclear how
+ -- to find the Corresponding_Formal_Spec-bearing node.
+
+ Generic_From_E := Empty;
+ end if;
+
+ declare
+ Inst_Parent : Node_Id := Parent (Inst);
+ begin
+ if Nkind (Inst_Parent) = N_Defining_Program_Unit_Name
+ then
+ Inst_Parent := Parent (Inst_Parent);
+ end if;
+
+ Generic_From_Inst := Generic_Parent (Inst_Parent);
+ end;
+
+ return Generic_From_E = Generic_From_Inst
+ and then Present (Generic_From_E);
+ end Is_Actual_Subp_Of_Inst;
+
+ -----------------------------
+ -- Is_Extraneously_Visible --
+ -----------------------------
+
+ function Is_Extraneously_Visible
+ (E : Entity_Id; Inst : Entity_Id) return Boolean is
+ begin
+ -- Return False in various non-extraneous cases.
+ -- If none of those apply, then return True.
+
+ if Within_Scope (E, Inst) then
+ -- return False if E declared within Inst
+ return False;
+
+ elsif Is_Actual_Subp_Of_Inst (E, Inst) then
+ -- Return False if E is an actual subprogram,
+ -- and therefore may be referenced within Inst.
+ return False;
+
+ elsif Nkind (Parent (E)) = N_Subtype_Declaration
+ and then Defining_Identifier (Parent (E)) /= E
+ then
+ -- Return False for a primitive subp of an
+ -- actual corresponding to a formal type.
+
+ return False;
+
+ elsif not In_Open_Scopes (Scope (E)) then
+ -- Return False if this candidate is not
+ -- declared in a currently open scope.
+
+ return False;
+
+ else
+ declare
+ -- We want to know whether the declaration of
+ -- E comes textually after the declaration of
+ -- the generic that Inst is an instance of
+ -- (and after the generic body if there is one).
+ -- To compare, we climb up the deeper of the two
+ -- scope chains until we the levels match.
+ -- There is a separate loop for each starting
+ -- point, but we will execute zero iterations
+ -- for at least one of the two loops.
+ -- For each Xxx_Scope, we have a corresponding
+ -- Xxx_Trailer; the latter is the predecessor of
+ -- the former in the scope traversal.
+
+ E_Trailer : Entity_Id := E;
+ E_Scope : Entity_Id := Scope (E);
+ pragma Assert (Present (E_Scope));
+
+ -- the generic that Inst is an instance of
+ Gen_Trailer : Entity_Id :=
+ Generic_Parent (Specification
+ (Unit_Declaration_Node (Inst)));
+ Gen_Scope : Entity_Id;
+
+ function Has_Formal_Package_Parameter
+ (Generic_Id : Entity_Id) return Boolean;
+ -- Return True iff given generic has at least one
+ -- formal package parameter.
+
+ ----------------------------------
+ -- Has_Formal_Package_Parameter --
+ ----------------------------------
+
+ function Has_Formal_Package_Parameter
+ (Generic_Id : Entity_Id) return Boolean is
+ Formal_Decl : Node_Id :=
+ First (Generic_Formal_Declarations
+ (Enclosing_Generic_Unit (Generic_Id)));
+ begin
+ while Present (Formal_Decl) loop
+ if Nkind (Original_Node (Formal_Decl)) =
+ N_Formal_Package_Declaration
+ then
+ return True;
+ end if;
+
+ Next (Formal_Decl);
+ end loop;
+ return False;
+ end Has_Formal_Package_Parameter;
+
+ begin
+ if No (Gen_Trailer) then
+ -- Dunno how this can happen, but it can.
+ return False;
+ else
+ if Has_Formal_Package_Parameter (Gen_Trailer)
+ then
+ -- Punt on sorting out what is visible via a
+ -- formal package.
+
+ return False;
+ end if;
+
+ if Is_Child_Unit (Gen_Trailer)
+ and then Is_Generic_Unit
+ (Entity (Name
+ (Parent (Gen_Trailer))))
+ then
+ -- Punt on dealing with how the FE fails
+ -- to build a tree for a "sprouted" generic
+ -- so that what should be a reference to
+ -- I1.G2 instead points into G1.G2 .
+
+ return False;
+ end if;
+
+ Gen_Scope := Scope (Gen_Trailer);
+
+ while Scope_Depth (E_Scope)
+ > Scope_Depth (Gen_Scope)
+ loop
+ E_Trailer := E_Scope;
+ E_Scope := Scope (E_Scope);
+ end loop;
+ while Scope_Depth (E_Scope)
+ < Scope_Depth (Gen_Scope)
+ loop
+ Gen_Trailer := Gen_Scope;
+ Gen_Scope := Scope (Gen_Scope);
+ end loop;
+ end if;
+
+ if Gen_Scope = E_Scope then
+ -- if Gen_Trailer and E_Trailer are declared
+ -- in the same declarative part and E_Trailer
+ -- occurs after the declaration (and body, if
+ -- there is one) of Gen_Trailer, then
+ -- return True because E was declared after
+ -- the generic that Inst is an instance of
+ -- (and also after that generic's body, if it
+ -- has one).
+
+ if Is_Package_Or_Generic_Package (Gen_Trailer)
+ and then Present (Package_Body (Gen_Trailer))
+ then
+ Gen_Trailer :=
+ Corresponding_Body
+ (Package_Spec (Gen_Trailer));
+ end if;
+
+ declare
+ Id : Entity_Id := Gen_Trailer;
+ begin
+ loop
+ if not Present (Id) then
+ -- E_Trailer presumably occurred
+ -- earlier on the entity list than
+ -- Gen_Trailer. So E preceded the
+ -- generic that Inst is an instance
+ -- of (or the body of that generic if
+ -- it has one) and so could have
+ -- been referenced within the generic.
+ return False;
+ end if;
+ exit when Id = E_Trailer;
+ Next_Entity (Id);
+ end loop;
+ end;
+ end if;
+ end;
+ end if;
+
+ if Present (Nearest_Enclosing_Instance (Inst)) then
+ return Is_Extraneously_Visible
+ (E => E, Inst => Nearest_Enclosing_Instance (Inst));
+
+ -- The preceding Nearest_Enclosing_Instance test
+ -- doesn't handle the case of an instance of a
+ -- "sprouted" generic. For example, if Inst=I2 in
+ -- generic package G1
+ -- generic package G1.G2;
+ -- package I1 is new G1;
+ -- package I2 is new I1.G2;
+ -- then N_E_I (Inst) = Empty. So deal with that case.
+
+ elsif Present (Nearest_Enclosing_Instance (E)) then
+ return Is_Extraneously_Visible
+ (E => Nearest_Enclosing_Instance (E),
+ Inst => Inst);
+ end if;
+
+ return True;
+ end Is_Extraneously_Visible;
+
+ ---------------------------------
+ -- Is_Generic_Actual_Subp_Name --
+ ---------------------------------
+
+ function Is_Generic_Actual_Subp_Name
+ (N : Node_Id) return Boolean
+ is
+ Decl : constant Node_Id := Enclosing_Declaration (N);
+ begin
+ return Nkind (Decl) = N_Subprogram_Renaming_Declaration
+ and then Present (Corresponding_Formal_Spec (Decl));
+ end Is_Generic_Actual_Subp_Name;
+
+ I : Interp_Index;
+ It : Interp;
+ Inst : Entity_Id := Current_Scope;
+
+ begin
+ while Present (Inst)
+ and then not Is_Generic_Instance (Inst)
+ loop
+ Inst := Scope (Inst);
+ end loop;
+
+ if Present (Inst) then
+ Get_First_Interp (N, I, It);
+ while Present (It.Nam) loop
+ if Is_Extraneously_Visible (E => It.Nam, Inst => Inst)
+ and then not Is_Generic_Actual_Subp_Name (N)
+ then
+ Remove_Interp (I);
+ end if;
+ Get_Next_Interp (I, It);
+ end loop;
+ end if;
+ end;
+ end if;
+
-- If no homonyms were visible, the entity is unambiguous
if not Is_Overloaded (N) then
diff --git a/gcc/ada/sem_disp.adb b/gcc/ada/sem_disp.adb
index ab7bc40..6975f4a 100644
--- a/gcc/ada/sem_disp.adb
+++ b/gcc/ada/sem_disp.adb
@@ -2581,6 +2581,7 @@ package body Sem_Disp is
loop
Parent_Op := Overridden_Operation (Parent_Op);
exit when No (Parent_Op)
+ or else No (Find_DT (Parent_Op))
or else (No_Interfaces
and then Is_Interface (Find_DT (Parent_Op)));
diff --git a/gcc/ada/sem_prag.adb b/gcc/ada/sem_prag.adb
index c49cb27..9d66fb7 100644
--- a/gcc/ada/sem_prag.adb
+++ b/gcc/ada/sem_prag.adb
@@ -2827,21 +2827,6 @@ package body Sem_Prag is
SPARK_Msg_N ("\use its constituents instead", Item);
return;
- -- An external state which has Async_Writers or
- -- Effective_Reads enabled cannot appear as a global item
- -- of a nonvolatile function (SPARK RM 7.1.3(8)).
-
- elsif Is_External_State (Item_Id)
- and then (Async_Writers_Enabled (Item_Id)
- or else Effective_Reads_Enabled (Item_Id))
- and then Ekind (Spec_Id) in E_Function | E_Generic_Function
- and then not Is_Volatile_Function (Spec_Id)
- then
- SPARK_Msg_NE
- ("external state & cannot act as global item of "
- & "nonvolatile function", Item, Item_Id);
- return;
-
-- If the reference to the abstract state appears in an
-- enclosing package body that will eventually refine the
-- state, record the reference for future checks.
@@ -2894,50 +2879,6 @@ package body Sem_Prag is
Item, Item_Id);
return;
end if;
-
- -- Variable related checks. These are only relevant when
- -- SPARK_Mode is on as they are not standard Ada legality
- -- rules.
-
- elsif SPARK_Mode = On
- and then Ekind (Item_Id) = E_Variable
- and then Is_Effectively_Volatile_For_Reading (Item_Id)
- then
- -- The current instance of a protected unit is not an
- -- effectively volatile object, unless the protected unit
- -- is already volatile for another reason (SPARK RM 7.1.2).
-
- if Is_Single_Protected_Object (Item_Id)
- and then Is_CCT_Instance (Etype (Item_Id), Spec_Id)
- and then not Is_Effectively_Volatile_For_Reading
- (Item_Id, Ignore_Protected => True)
- then
- null;
-
- -- An effectively volatile object for reading cannot appear
- -- as a global item of a nonvolatile function (SPARK RM
- -- 7.1.3(8)).
-
- elsif Ekind (Spec_Id) in E_Function | E_Generic_Function
- and then not Is_Volatile_Function (Spec_Id)
- then
- Error_Msg_NE
- ("volatile object & cannot act as global item of a "
- & "function", Item, Item_Id);
- return;
-
- -- An effectively volatile object with external property
- -- Effective_Reads set to True must have mode Output or
- -- In_Out (SPARK RM 7.1.3(10)).
-
- elsif Effective_Reads_Enabled (Item_Id)
- and then Global_Mode = Name_Input
- then
- Error_Msg_NE
- ("volatile object & with property Effective_Reads must "
- & "have mode In_Out or Output", Item, Item_Id);
- return;
- end if;
end if;
-- When the item renames an entire object, replace the item
@@ -8128,27 +8069,6 @@ package body Sem_Prag is
Check_Full_Access_Only (E);
end if;
- -- The following check is only relevant when SPARK_Mode is on as
- -- this is not a standard Ada legality rule. Pragma Volatile can
- -- only apply to a full type declaration or an object declaration
- -- (SPARK RM 7.1.3(2)). Original_Node is necessary to account for
- -- untagged derived types that are rewritten as subtypes of their
- -- respective root types.
-
- if SPARK_Mode = On
- and then Prag_Id = Pragma_Volatile
- and then Nkind (Original_Node (Decl)) not in
- N_Full_Type_Declaration |
- N_Formal_Type_Declaration |
- N_Object_Declaration |
- N_Single_Protected_Declaration |
- N_Single_Task_Declaration
- then
- Error_Pragma_Arg
- ("argument of pragma % must denote a full type or object "
- & "declaration", Arg1);
- end if;
-
-- Deal with the case where the pragma/attribute is applied to a type
if Is_Type (E) then
@@ -13425,7 +13345,7 @@ package body Sem_Prag is
Analyze_If_Present (Pragma_Side_Effects);
-- Pragma Always_Terminates is not allowed on functions without
- -- side-effects.
+ -- side effects.
if Ekind (Spec_Id) in E_Function | E_Generic_Function
and then not Is_Function_With_Side_Effects (Spec_Id)
@@ -16989,7 +16909,7 @@ package body Sem_Prag is
Analyze_If_Present (Pragma_Side_Effects);
-- Pragma Exceptional_Cases is not allowed on functions without
- -- side-effects.
+ -- side effects.
if Ekind (Spec_Id) in E_Function | E_Generic_Function
and then not Is_Function_With_Side_Effects (Spec_Id)
@@ -23072,7 +22992,7 @@ package body Sem_Prag is
Analyze_If_Present (Pragma_Side_Effects);
- -- A function with side-effects shall not have a Pure_Function
+ -- A function with side effects shall not have a Pure_Function
-- aspect or pragma (SPARK RM 6.1.11(5)).
if Is_Function_With_Side_Effects (E) then
@@ -23949,8 +23869,8 @@ package body Sem_Prag is
Add_Contract_Item (N, Spec_Id);
- -- A function with side-effects cannot override a function without
- -- side-effects (SPARK RM 7.1.2(16)). Overriding checks are
+ -- A function with side effects cannot override a function without
+ -- side effects (SPARK RM 7.1.2(16)). Overriding checks are
-- usually performed in New_Overloaded_Entity, however at
-- that point the pragma has not been processed yet.
@@ -23960,7 +23880,7 @@ package body Sem_Prag is
and then not Is_Function_With_Side_Effects (Over_Id)
then
Error_Msg_N
- ("incompatible declaration of side-effects for function",
+ ("incompatible declaration of side effects for function",
Spec_Id);
Error_Msg_Sloc := Sloc (Over_Id);
@@ -33972,6 +33892,16 @@ package body Sem_Prag is
-- Start of processing for Validate_Compile_Time_Warning_Errors
begin
+
+ -- These error/warning messages were deferred because they could not be
+ -- evaluated in the front-end and they needed additional information
+ -- from the back-end. There is no reason to run these checks again if
+ -- the back-end was not activated by this point.
+
+ if not Generating_Code then
+ return;
+ end if;
+
Expander_Mode_Save_And_Set (False);
In_Compile_Time_Warning_Or_Error := True;
diff --git a/gcc/ada/sem_res.adb b/gcc/ada/sem_res.adb
index 70a8417..c684075 100644
--- a/gcc/ada/sem_res.adb
+++ b/gcc/ada/sem_res.adb
@@ -3620,10 +3620,6 @@ package body Sem_Res is
-- interpretation, but the form of the actual can only be determined
-- once the primitive operation is identified.
- procedure Flag_Effectively_Volatile_Objects (Expr : Node_Id);
- -- Emit an error concerning the illegal usage of an effectively volatile
- -- object for reading in interfering context (SPARK RM 7.1.3(10)).
-
procedure Insert_Default;
-- If the actual is missing in a call, insert in the actuals list
-- an instance of the default expression. The insertion is always
@@ -3874,68 +3870,6 @@ package body Sem_Res is
end if;
end Check_Prefixed_Call;
- ---------------------------------------
- -- Flag_Effectively_Volatile_Objects --
- ---------------------------------------
-
- procedure Flag_Effectively_Volatile_Objects (Expr : Node_Id) is
- function Flag_Object (N : Node_Id) return Traverse_Result;
- -- Determine whether arbitrary node N denotes an effectively volatile
- -- object for reading and if it does, emit an error.
-
- -----------------
- -- Flag_Object --
- -----------------
-
- function Flag_Object (N : Node_Id) return Traverse_Result is
- Id : Entity_Id;
-
- begin
- case Nkind (N) is
- -- Do not consider nested function calls because they have
- -- already been processed during their own resolution.
-
- when N_Function_Call =>
- return Skip;
-
- when N_Identifier | N_Expanded_Name =>
- Id := Entity (N);
-
- -- Identifiers of components and discriminants are not names
- -- in the sense of Ada RM 4.1. They can only occur as a
- -- selector_name in selected_component or as a choice in
- -- component_association.
-
- if Present (Id)
- and then Is_Object (Id)
- and then Ekind (Id) not in E_Component | E_Discriminant
- and then Is_Effectively_Volatile_For_Reading (Id)
- and then
- not Is_OK_Volatile_Context (Context => Parent (N),
- Obj_Ref => N,
- Check_Actuals => True)
- then
- Error_Msg_Code := GEC_Volatile_Non_Interfering_Context;
- Error_Msg_N
- ("volatile object cannot appear in this context '[[]']",
- N);
- end if;
-
- return Skip;
-
- when others =>
- return OK;
- end case;
- end Flag_Object;
-
- procedure Flag_Objects is new Traverse_Proc (Flag_Object);
-
- -- Start of processing for Flag_Effectively_Volatile_Objects
-
- begin
- Flag_Objects (Expr);
- end Flag_Effectively_Volatile_Objects;
-
--------------------
-- Insert_Default --
--------------------
@@ -4801,7 +4735,7 @@ package body Sem_Res is
-- leads to an infinite recursion.
if Predicate_Tests_On_Arguments (Nam) then
- Apply_Predicate_Check (A, F_Typ, Nam);
+ Apply_Predicate_Check (A, F_Typ, Fun => Nam);
end if;
-- Apply required constraint checks
@@ -5128,22 +5062,6 @@ package body Sem_Res is
Check_Unset_Reference (A);
end if;
- -- The following checks are only relevant when SPARK_Mode is on as
- -- they are not standard Ada legality rule. Internally generated
- -- temporaries are ignored.
-
- if SPARK_Mode = On and then Comes_From_Source (A) then
-
- -- Inspect the expression and flag each effectively volatile
- -- object for reading as illegal because it appears within
- -- an interfering context. Note that this is usually done
- -- in Resolve_Entity_Name, but when the effectively volatile
- -- object for reading appears as an actual in a call, the call
- -- must be resolved first.
-
- Flag_Effectively_Volatile_Objects (A);
- end if;
-
-- A formal parameter of a specific tagged type whose related
-- subprogram is subject to pragma Extensions_Visible with value
-- "False" cannot act as an actual in a subprogram with value
@@ -8130,19 +8048,6 @@ package body Sem_Res is
if SPARK_Mode = On then
- -- An effectively volatile object for reading must appear in
- -- non-interfering context (SPARK RM 7.1.3(10)).
-
- if Is_Object (E)
- and then Is_Effectively_Volatile_For_Reading (E)
- and then
- not Is_OK_Volatile_Context (Par, N, Check_Actuals => False)
- then
- Error_Msg_Code := GEC_Volatile_Non_Interfering_Context;
- SPARK_Msg_N
- ("volatile object cannot appear in this context '[[]']", N);
- end if;
-
-- Parameters of modes OUT or IN OUT of the subprogram shall not
-- occur in the consequences of an exceptional contract unless
-- they are either passed by reference or occur in the prefix
diff --git a/gcc/ada/sem_scil.adb b/gcc/ada/sem_scil.adb
index da8fab6..d7679d8 100644
--- a/gcc/ada/sem_scil.adb
+++ b/gcc/ada/sem_scil.adb
@@ -91,6 +91,7 @@ package body Sem_SCIL is
elsif Nkind (Ctrl_Tag) in N_Object_Renaming_Declaration
| N_Object_Declaration
| N_Parameter_Specification
+ | N_Discriminant_Specification
then
Ctrl_Typ := Etype (Defining_Identifier (Ctrl_Tag));
diff --git a/gcc/ada/sem_util.adb b/gcc/ada/sem_util.adb
index 423b8d3..909f93d 100644
--- a/gcc/ada/sem_util.adb
+++ b/gcc/ada/sem_util.adb
@@ -3795,36 +3795,6 @@ package body Sem_Util is
end loop;
end Check_Inherited_Nonoverridable_Aspects;
- ----------------------------------------
- -- Check_Nonvolatile_Function_Profile --
- ----------------------------------------
-
- procedure Check_Nonvolatile_Function_Profile (Func_Id : Entity_Id) is
- Formal : Entity_Id;
-
- begin
- -- Inspect all formal parameters
-
- Formal := First_Formal (Func_Id);
- while Present (Formal) loop
- if Is_Effectively_Volatile_For_Reading (Etype (Formal)) then
- Error_Msg_NE
- ("nonvolatile function & cannot have a volatile parameter",
- Formal, Func_Id);
- end if;
-
- Next_Formal (Formal);
- end loop;
-
- -- Inspect the return type
-
- if Is_Effectively_Volatile_For_Reading (Etype (Func_Id)) then
- Error_Msg_NE
- ("nonvolatile function & cannot have a volatile return type",
- Result_Definition (Parent (Func_Id)), Func_Id);
- end if;
- end Check_Nonvolatile_Function_Profile;
-
-------------------
-- Check_Parents --
-------------------
@@ -4370,21 +4340,24 @@ package body Sem_Util is
and then Has_No_Output (Subp_Id))
and then not Is_Wrapper (Subp_Id)
then
- if Pragma_Name (Prag) = Name_Contract_Cases then
- Error_Msg_NE (Adjust_Message
- ("contract case does not check the outcome of calling "
- & "&?.t?"), Expr, Subp_Id);
-
- elsif Pragma_Name (Prag) = Name_Refined_Post then
- Error_Msg_NE (Adjust_Message
- ("refined postcondition does not check the outcome of "
- & "calling &?.t?"), Err_Node, Subp_Id);
-
- else
- Error_Msg_NE (Adjust_Message
- ("postcondition does not check the outcome of calling "
- & "&?.t?"), Err_Node, Subp_Id);
- end if;
+ case Pragma_Name (Prag) is
+ when Name_Contract_Cases =>
+ Error_Msg_NE (Adjust_Message
+ ("contract case does not check the outcome of calling "
+ & "&?.t?"), Expr, Subp_Id);
+
+ when Name_Refined_Post =>
+ Error_Msg_NE (Adjust_Message
+ ("refined postcondition does not check the outcome of "
+ & "calling &?.t?"), Err_Node, Subp_Id);
+
+ when Name_Postcondition =>
+ Error_Msg_NE (Adjust_Message
+ ("postcondition does not check the outcome of calling "
+ & "&?.t?"), Err_Node, Subp_Id);
+
+ when others => pragma Assert (False);
+ end case;
end if;
end Check_Conjunct;
@@ -4555,11 +4528,16 @@ package body Sem_Util is
-- Start of processing for Check_Result_And_Post_State
begin
+ -- Do not check in instances, because we already checked the generic
+
+ if In_Instance then
+ return;
+
-- The lack of attribute 'Result or a post-state is classified as a
-- suspicious contract. Do not perform the check if the corresponding
- -- swich is not set.
+ -- switch is not set.
- if not Warn_On_Suspicious_Contract then
+ elsif not Warn_On_Suspicious_Contract then
return;
-- Nothing to do if there is no contract
@@ -5145,96 +5123,6 @@ package body Sem_Util is
end if;
end Check_Unused_Body_States;
- ------------------------------------
- -- Check_Volatility_Compatibility --
- ------------------------------------
-
- procedure Check_Volatility_Compatibility
- (Id1, Id2 : Entity_Id;
- Description_1, Description_2 : String;
- Srcpos_Bearer : Node_Id) is
-
- begin
- if SPARK_Mode /= On then
- return;
- end if;
-
- declare
- AR1 : constant Boolean := Async_Readers_Enabled (Id1);
- AW1 : constant Boolean := Async_Writers_Enabled (Id1);
- ER1 : constant Boolean := Effective_Reads_Enabled (Id1);
- EW1 : constant Boolean := Effective_Writes_Enabled (Id1);
- AR2 : constant Boolean := Async_Readers_Enabled (Id2);
- AW2 : constant Boolean := Async_Writers_Enabled (Id2);
- ER2 : constant Boolean := Effective_Reads_Enabled (Id2);
- EW2 : constant Boolean := Effective_Writes_Enabled (Id2);
-
- AR_Check_Failed : constant Boolean := AR1 and not AR2;
- AW_Check_Failed : constant Boolean := AW1 and not AW2;
- ER_Check_Failed : constant Boolean := ER1 and not ER2;
- EW_Check_Failed : constant Boolean := EW1 and not EW2;
-
- package Failure_Description is
- procedure Note_If_Failure
- (Failed : Boolean; Aspect_Name : String);
- -- If Failed is False, do nothing.
- -- If Failed is True, add Aspect_Name to the failure description.
-
- function Failure_Text return String;
- -- returns accumulated list of failing aspects
- end Failure_Description;
-
- package body Failure_Description is
- Description_Buffer : Bounded_String;
-
- ---------------------
- -- Note_If_Failure --
- ---------------------
-
- procedure Note_If_Failure
- (Failed : Boolean; Aspect_Name : String) is
- begin
- if Failed then
- if Description_Buffer.Length /= 0 then
- Append (Description_Buffer, ", ");
- end if;
- Append (Description_Buffer, Aspect_Name);
- end if;
- end Note_If_Failure;
-
- ------------------
- -- Failure_Text --
- ------------------
-
- function Failure_Text return String is
- begin
- return +Description_Buffer;
- end Failure_Text;
- end Failure_Description;
-
- use Failure_Description;
- begin
- if AR_Check_Failed
- or AW_Check_Failed
- or ER_Check_Failed
- or EW_Check_Failed
- then
- Note_If_Failure (AR_Check_Failed, "Async_Readers");
- Note_If_Failure (AW_Check_Failed, "Async_Writers");
- Note_If_Failure (ER_Check_Failed, "Effective_Reads");
- Note_If_Failure (EW_Check_Failed, "Effective_Writes");
-
- Error_Msg_N
- (Description_1
- & " and "
- & Description_2
- & " are not compatible with respect to volatility due to "
- & Failure_Text,
- Srcpos_Bearer);
- end if;
- end;
- end Check_Volatility_Compatibility;
-
-----------------
-- Choice_List --
-----------------
@@ -19318,7 +19206,7 @@ package body Sem_Util is
-- An effectively volatile object may act as an actual when the
-- corresponding formal is of a non-scalar effectively volatile
- -- type (SPARK RM 7.1.3(10)).
+ -- type (SPARK RM 7.1.3(9)).
if not Is_Scalar_Type (Etype (Formal))
and then Is_Effectively_Volatile_For_Reading (Etype (Formal))
@@ -19327,7 +19215,7 @@ package body Sem_Util is
-- An effectively volatile object may act as an actual in a
-- call to an instance of Unchecked_Conversion. (SPARK RM
- -- 7.1.3(10)).
+ -- 7.1.3(9)).
elsif Is_Unchecked_Conversion_Instance (Subp) then
return True;
diff --git a/gcc/ada/sem_util.ads b/gcc/ada/sem_util.ads
index 96b4730..081217a 100644
--- a/gcc/ada/sem_util.ads
+++ b/gcc/ada/sem_util.ads
@@ -415,10 +415,6 @@ package Sem_Util is
-- In the error case, error message is associate with Inheritor;
-- Inheritor parameter is otherwise unused.
- procedure Check_Nonvolatile_Function_Profile (Func_Id : Entity_Id);
- -- Verify that the profile of nonvolatile function Func_Id does not contain
- -- effectively volatile parameters or return type for reading.
-
function Check_Parents (N : Node_Id; List : Elist_Id) return Boolean;
-- Return True if all the occurrences of subtree N referencing entities in
-- the given List have the right value in their Parent field.
@@ -467,19 +463,6 @@ package Sem_Util is
-- and the context is external to the protected operation, to warn against
-- a possible unlocked access to data.
- procedure Check_Volatility_Compatibility
- (Id1, Id2 : Entity_Id;
- Description_1, Description_2 : String;
- Srcpos_Bearer : Node_Id);
- -- Id1 and Id2 should each be the entity of a state abstraction, a
- -- variable, or a type (i.e., something suitable for passing to
- -- Async_Readers_Enabled and similar functions).
- -- Does nothing if SPARK_Mode /= On. Otherwise, flags a legality violation
- -- if one or more of the four volatility-related aspects is False for Id1
- -- and True for Id2. The two descriptions are included in the error message
- -- text; the source position for the generated message is determined by
- -- Srcpos_Bearer.
-
function Choice_List (N : Node_Id) return List_Id;
-- Utility to retrieve the choices of a Component_Association or the
-- Discrete_Choices of an Iterated_Component_Association. For various
@@ -1480,7 +1463,7 @@ package Sem_Util is
-- Is the given expression a container aggregate?
function Is_Function_With_Side_Effects (Subp : Entity_Id) return Boolean;
- -- Return True if Subp is a function with side-effects, ie. it has a
+ -- Return True if Subp is a function with side effects, ie. it has a
-- (direct or inherited) pragma Side_Effects with static value True.
function Is_Newly_Constructed
@@ -2199,7 +2182,7 @@ package Sem_Util is
Obj_Ref : Node_Id;
Check_Actuals : Boolean) return Boolean;
-- Determine whether node Context denotes a "non-interfering context" (as
- -- defined in SPARK RM 7.1.3(10)) where volatile reference Obj_Ref can
+ -- defined in SPARK RM 7.1.3(9)) where volatile reference Obj_Ref can
-- safely reside. When examining references that might be located within
-- actual parameters of a subprogram call that has not been resolved yet,
-- Check_Actuals should be False; such references will be assumed to be
diff --git a/gcc/ada/terminals.c b/gcc/ada/terminals.c
index c0ee4a1..14de0fe 100644
--- a/gcc/ada/terminals.c
+++ b/gcc/ada/terminals.c
@@ -31,7 +31,7 @@
#define ATTRIBUTE_UNUSED __attribute__((unused))
-/* First all usupported platforms. Add stubs for exported routines. */
+/* First all unsupported platforms. Add stubs for exported routines. */
#if defined (VMS) || defined (__vxworks) || defined (__Lynx__) \
|| defined (__ANDROID__) || defined (__PikeOS__) || defined(__DJGPP__)
@@ -1089,7 +1089,7 @@ __gnat_setup_winsize (void *desc ATTRIBUTE_UNUSED,
{
}
-#else /* defined(_WIN32, implementatin for all UNIXes */
+#else /* defined(_WIN32, implementation for all UNIXes */
/* First defined some macro to identify easily some systems */
#if defined (__FreeBSD__) \
@@ -1104,6 +1104,7 @@ __gnat_setup_winsize (void *desc ATTRIBUTE_UNUSED,
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
+#include <signal.h>
#include <sys/ioctl.h>
#include <termios.h>
#include <fcntl.h>
@@ -1121,6 +1122,9 @@ __gnat_setup_winsize (void *desc ATTRIBUTE_UNUSED,
#if defined (__hpux__)
# include <sys/stropts.h>
#endif
+#if defined (__APPLE__)
+# include <util.h>
+#endif
#define CDISABLE _POSIX_VDISABLE
diff --git a/gcc/analyzer/ChangeLog b/gcc/analyzer/ChangeLog
index bab9e54..b6e8d51 100644
--- a/gcc/analyzer/ChangeLog
+++ b/gcc/analyzer/ChangeLog
@@ -1,3 +1,43 @@
+2023-12-01 David Malcolm <dmalcolm@redhat.com>
+
+ * analyzer.h (class saved_diagnostic): New forward decl.
+ * bounds-checking.cc: Update for changes to
+ pending_diagnostic::emit.
+ * call-details.cc: Likewise.
+ * diagnostic-manager.cc: Include "diagnostic-format-sarif.h".
+ (saved_diagnostic::maybe_add_sarif_properties): New.
+ (class pending_diagnostic_metadata): New.
+ (diagnostic_manager::emit_saved_diagnostic): Create a
+ pending_diagnostic_metadata and a diagnostic_emission_context.
+ Pass the latter to the pending_diagnostic::emit vfunc.
+ * diagnostic-manager.h
+ (saved_diagnostic::maybe_add_sarif_properties): New decl.
+ * engine.cc: Update for changes to pending_diagnostic::emit.
+ * infinite-loop.cc: Likewise.
+ * infinite-recursion.cc: Likewise.
+ * kf-analyzer.cc: Likewise.
+ * kf.cc: Likewise.
+ * pending-diagnostic.cc
+ (diagnostic_emission_context::get_pending_diagnostic): New.
+ (diagnostic_emission_context::warn): New.
+ (diagnostic_emission_context::inform): New.
+ * pending-diagnostic.h (class diagnostic_emission_context): New.
+ (pending_diagnostic::emit): Update params.
+ (pending_diagnostic::maybe_add_sarif_properties): New vfunc.
+ * region.cc: Don't include "diagnostic-metadata.h".
+ * region-model.cc: Include "diagnostic-format-sarif.h". Update
+ for changes to pending_diagnostic::emit.
+ (exposure_through_uninit_copy::maybe_add_sarif_properties): New.
+ * sm-fd.cc: Update for changes to pending_diagnostic::emit.
+ * sm-file.cc: Likewise.
+ * sm-malloc.cc: Likewise.
+ * sm-pattern-test.cc: Likewise.
+ * sm-sensitive.cc: Likewise.
+ * sm-signal.cc: Likewise.
+ * sm-taint.cc: Likewise.
+ * store.cc: Don't include "diagnostic-metadata.h".
+ * varargs.cc: Update for changes to pending_diagnostic::emit.
+
2023-11-19 David Malcolm <dmalcolm@redhat.com>
* analyzer.h: Include "rich-location.h".
diff --git a/gcc/analyzer/analyzer.h b/gcc/analyzer/analyzer.h
index cf32d4b..3115f87 100644
--- a/gcc/analyzer/analyzer.h
+++ b/gcc/analyzer/analyzer.h
@@ -94,6 +94,7 @@ class bounded_ranges_manager;
struct pending_location;
class pending_diagnostic;
class pending_note;
+class saved_diagnostic;
struct event_loc_info;
class checker_event;
class state_change_event;
diff --git a/gcc/analyzer/bounds-checking.cc b/gcc/analyzer/bounds-checking.cc
index 583b5ab..cc43ecc 100644
--- a/gcc/analyzer/bounds-checking.cc
+++ b/gcc/analyzer/bounds-checking.cc
@@ -30,7 +30,6 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "gimple-iterator.h"
#include "diagnostic-core.h"
-#include "diagnostic-metadata.h"
#include "diagnostic-diagram.h"
#include "analyzer/analyzer.h"
#include "analyzer/analyzer-logging.h"
@@ -119,10 +118,10 @@ protected:
}
void
- maybe_show_notes (location_t loc, logger *logger) const
+ maybe_show_notes (diagnostic_emission_context &ctxt) const
{
- maybe_describe_array_bounds (loc);
- maybe_show_diagram (logger);
+ maybe_describe_array_bounds (ctxt.get_location ());
+ maybe_show_diagram (ctxt.get_logger ());
}
/* Potentially add a note about valid ways to index this array, such
@@ -281,27 +280,22 @@ public:
return "concrete_buffer_overflow";
}
- bool emit (rich_location *rich_loc,
- logger *logger) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
bool warned;
switch (get_memory_space ())
{
default:
- m.add_cwe (787);
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "buffer overflow");
+ ctxt.add_cwe (787);
+ warned = ctxt.warn ("buffer overflow");
break;
case MEMSPACE_STACK:
- m.add_cwe (121);
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "stack-based buffer overflow");
+ ctxt.add_cwe (121);
+ warned = ctxt.warn ("stack-based buffer overflow");
break;
case MEMSPACE_HEAP:
- m.add_cwe (122);
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "heap-based buffer overflow");
+ ctxt.add_cwe (122);
+ warned = ctxt.warn ("heap-based buffer overflow");
break;
}
@@ -312,25 +306,25 @@ public:
unsigned HOST_WIDE_INT num_bad_bytes
= m_out_of_bounds_range.m_size_in_bytes.to_uhwi ();
if (m_diag_arg)
- inform_n (rich_loc->get_loc (),
+ inform_n (ctxt.get_location (),
num_bad_bytes,
"write of %wu byte to beyond the end of %qE",
"write of %wu bytes to beyond the end of %qE",
num_bad_bytes,
m_diag_arg);
else
- inform_n (rich_loc->get_loc (),
+ inform_n (ctxt.get_location (),
num_bad_bytes,
"write of %wu byte to beyond the end of the region",
"write of %wu bytes to beyond the end of the region",
num_bad_bytes);
}
else if (m_diag_arg)
- inform (rich_loc->get_loc (),
+ inform (ctxt.get_location (),
"write to beyond the end of %qE",
m_diag_arg);
- maybe_show_notes (rich_loc->get_loc (), logger);
+ maybe_show_notes (ctxt);
}
return warned;
@@ -388,24 +382,20 @@ public:
return "concrete_buffer_over_read";
}
- bool emit (rich_location *rich_loc, logger *logger) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
bool warned;
- m.add_cwe (126);
+ ctxt.add_cwe (126);
switch (get_memory_space ())
{
default:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "buffer over-read");
+ warned = ctxt.warn ("buffer over-read");
break;
case MEMSPACE_STACK:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "stack-based buffer over-read");
+ warned = ctxt.warn ("stack-based buffer over-read");
break;
case MEMSPACE_HEAP:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "heap-based buffer over-read");
+ warned = ctxt.warn ("heap-based buffer over-read");
break;
}
@@ -416,25 +406,25 @@ public:
unsigned HOST_WIDE_INT num_bad_bytes
= m_out_of_bounds_range.m_size_in_bytes.to_uhwi ();
if (m_diag_arg)
- inform_n (rich_loc->get_loc (),
+ inform_n (ctxt.get_location (),
num_bad_bytes,
"read of %wu byte from after the end of %qE",
"read of %wu bytes from after the end of %qE",
num_bad_bytes,
m_diag_arg);
else
- inform_n (rich_loc->get_loc (),
+ inform_n (ctxt.get_location (),
num_bad_bytes,
"read of %wu byte from after the end of the region",
"read of %wu bytes from after the end of the region",
num_bad_bytes);
}
else if (m_diag_arg)
- inform (rich_loc->get_loc (),
+ inform (ctxt.get_location (),
"read from after the end of %qE",
m_diag_arg);
- maybe_show_notes (rich_loc->get_loc (), logger);
+ maybe_show_notes (ctxt);
}
return warned;
@@ -493,28 +483,24 @@ public:
return "concrete_buffer_underwrite";
}
- bool emit (rich_location *rich_loc, logger *logger) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
bool warned;
- m.add_cwe (124);
+ ctxt.add_cwe (124);
switch (get_memory_space ())
{
default:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "buffer underwrite");
+ warned = ctxt.warn ("buffer underwrite");
break;
case MEMSPACE_STACK:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "stack-based buffer underwrite");
+ warned = ctxt.warn ("stack-based buffer underwrite");
break;
case MEMSPACE_HEAP:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "heap-based buffer underwrite");
+ warned = ctxt.warn ("heap-based buffer underwrite");
break;
}
if (warned)
- maybe_show_notes (rich_loc->get_loc (), logger);
+ maybe_show_notes (ctxt);
return warned;
}
@@ -568,28 +554,24 @@ public:
return "concrete_buffer_under_read";
}
- bool emit (rich_location *rich_loc, logger *logger) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
bool warned;
- m.add_cwe (127);
+ ctxt.add_cwe (127);
switch (get_memory_space ())
{
default:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "buffer under-read");
+ warned = ctxt.warn ("buffer under-read");
break;
case MEMSPACE_STACK:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "stack-based buffer under-read");
+ warned = ctxt.warn ("stack-based buffer under-read");
break;
case MEMSPACE_HEAP:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "heap-based buffer under-read");
+ warned = ctxt.warn ("heap-based buffer under-read");
break;
}
if (warned)
- maybe_show_notes (rich_loc->get_loc (), logger);
+ maybe_show_notes (ctxt);
return warned;
}
@@ -679,30 +661,26 @@ public:
return "symbolic_buffer_overflow";
}
- bool emit (rich_location *rich_loc, logger *logger) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
bool warned;
switch (get_memory_space ())
{
default:
- m.add_cwe (787);
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "buffer overflow");
+ ctxt.add_cwe (787);
+ warned = ctxt.warn ("buffer overflow");
break;
case MEMSPACE_STACK:
- m.add_cwe (121);
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "stack-based buffer overflow");
+ ctxt.add_cwe (121);
+ warned = ctxt.warn ("stack-based buffer overflow");
break;
case MEMSPACE_HEAP:
- m.add_cwe (122);
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "heap-based buffer overflow");
+ ctxt.add_cwe (122);
+ warned = ctxt.warn ("heap-based buffer overflow");
break;
}
if (warned)
- maybe_show_notes (rich_loc->get_loc (), logger);
+ maybe_show_notes (ctxt);
return warned;
}
@@ -796,31 +774,27 @@ public:
return "symbolic_buffer_over_read";
}
- bool emit (rich_location *rich_loc, logger *logger) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
- m.add_cwe (126);
+ ctxt.add_cwe (126);
bool warned;
switch (get_memory_space ())
{
default:
- m.add_cwe (787);
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "buffer over-read");
+ ctxt.add_cwe (787);
+ warned = ctxt.warn ("buffer over-read");
break;
case MEMSPACE_STACK:
- m.add_cwe (121);
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "stack-based buffer over-read");
+ ctxt.add_cwe (121);
+ warned = ctxt.warn ("stack-based buffer over-read");
break;
case MEMSPACE_HEAP:
- m.add_cwe (122);
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "heap-based buffer over-read");
+ ctxt.add_cwe (122);
+ warned = ctxt.warn ("heap-based buffer over-read");
break;
}
if (warned)
- maybe_show_notes (rich_loc->get_loc (), logger);
+ maybe_show_notes (ctxt);
return warned;
}
diff --git a/gcc/analyzer/call-details.cc b/gcc/analyzer/call-details.cc
index 9480f03..c5ae2dc 100644
--- a/gcc/analyzer/call-details.cc
+++ b/gcc/analyzer/call-details.cc
@@ -445,14 +445,12 @@ public:
return OPT_Wanalyzer_overlapping_buffers;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
auto_diagnostic_group d;
- bool warned;
- warned = warning_at (rich_loc, get_controlling_option (),
- "overlapping buffers passed as arguments to %qD",
- m_fndecl);
+ bool warned = ctxt.warn ("overlapping buffers passed as arguments to %qD",
+ m_fndecl);
// TODO: draw a picture?
diff --git a/gcc/analyzer/diagnostic-manager.cc b/gcc/analyzer/diagnostic-manager.cc
index a6755f2..ecd5737 100644
--- a/gcc/analyzer/diagnostic-manager.cc
+++ b/gcc/analyzer/diagnostic-manager.cc
@@ -58,6 +58,7 @@ along with GCC; see the file COPYING3. If not see
#include "analyzer/checker-path.h"
#include "analyzer/reachability.h"
#include "make-unique.h"
+#include "diagnostic-format-sarif.h"
#if ENABLE_ANALYZER
@@ -1018,6 +1019,31 @@ saved_diagnostic::emit_any_notes () const
pn->emit ();
}
+/* For SARIF output, add additional properties to the "result" object
+ for this diagnostic.
+ This extra data is intended for use when debugging the analyzer. */
+
+void
+saved_diagnostic::maybe_add_sarif_properties (sarif_object &result_obj) const
+{
+ sarif_property_bag &props = result_obj.get_or_create_properties ();
+#define PROPERTY_PREFIX "gcc/analyzer/saved_diagnostic/"
+ if (m_sm)
+ props.set_string (PROPERTY_PREFIX "sm", m_sm->get_name ());
+ props.set_integer (PROPERTY_PREFIX "enode", m_enode->m_index);
+ props.set_integer (PROPERTY_PREFIX "snode", m_snode->m_index);
+ if (m_sval)
+ props.set (PROPERTY_PREFIX "sval", m_sval->to_json ());
+ if (m_state)
+ props.set (PROPERTY_PREFIX "state", m_state->to_json ());
+ if (m_best_epath)
+ props.set (PROPERTY_PREFIX "idx", new json::integer_number (m_idx));
+#undef PROPERTY_PREFIX
+
+ /* Potentially add pending_diagnostic-specific properties. */
+ m_d->maybe_add_sarif_properties (result_obj);
+}
+
/* State for building a checker_path from a particular exploded_path.
In particular, this precomputes reachability information: the set of
source enodes for which a path be found to the diagnostic enode. */
@@ -1498,6 +1524,29 @@ diagnostic_manager::emit_saved_diagnostics (const exploded_graph &eg)
best_candidates.emit_best (this, eg);
}
+/* Custom subclass of diagnostic_metadata which, for SARIF output,
+ populates the property bag of the diagnostic's "result" object
+ with information from the saved_diagnostic and the
+ pending_diagnostic. */
+
+class pending_diagnostic_metadata : public diagnostic_metadata
+{
+public:
+ pending_diagnostic_metadata (const saved_diagnostic &sd)
+ : m_sd (sd)
+ {
+ }
+
+ void
+ maybe_add_sarif_properties (sarif_object &result_obj) const override
+ {
+ m_sd.maybe_add_sarif_properties (result_obj);
+ }
+
+private:
+ const saved_diagnostic &m_sd;
+};
+
/* Given a saved_diagnostic SD with m_best_epath through EG,
create an checker_path of suitable events and use it to call
SD's underlying pending_diagnostic "emit" vfunc to emit a diagnostic. */
@@ -1563,7 +1612,9 @@ diagnostic_manager::emit_saved_diagnostic (const exploded_graph &eg,
auto_diagnostic_group d;
auto_cfun sentinel (sd.m_snode->m_fun);
- if (sd.m_d->emit (&rich_loc, get_logger ()))
+ pending_diagnostic_metadata m (sd);
+ diagnostic_emission_context diag_ctxt (sd, rich_loc, m, get_logger ());
+ if (sd.m_d->emit (diag_ctxt))
{
sd.emit_any_notes ();
diff --git a/gcc/analyzer/diagnostic-manager.h b/gcc/analyzer/diagnostic-manager.h
index 27ab9ed..b6d6f08 100644
--- a/gcc/analyzer/diagnostic-manager.h
+++ b/gcc/analyzer/diagnostic-manager.h
@@ -67,6 +67,8 @@ public:
void emit_any_notes () const;
+ void maybe_add_sarif_properties (sarif_object &result_obj) const;
+
//private:
const state_machine *m_sm;
const exploded_node *m_enode;
diff --git a/gcc/analyzer/engine.cc b/gcc/analyzer/engine.cc
index b4e855f..1f930a2 100644
--- a/gcc/analyzer/engine.cc
+++ b/gcc/analyzer/engine.cc
@@ -1811,13 +1811,11 @@ public:
return OPT_Wanalyzer_stale_setjmp_buffer;
}
- bool emit (rich_location *richloc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- return warning_at
- (richloc, get_controlling_option (),
- "%qs called after enclosing function of %qs has returned",
- get_user_facing_name (m_longjmp_call),
- get_user_facing_name (m_setjmp_call));
+ return ctxt.warn ("%qs called after enclosing function of %qs has returned",
+ get_user_facing_name (m_longjmp_call),
+ get_user_facing_name (m_setjmp_call));
}
const char *get_kind () const final override
@@ -3982,10 +3980,9 @@ public:
return OPT_Wanalyzer_jump_through_null;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- return warning_at (rich_loc, get_controlling_option (),
- "jump through null pointer");
+ return ctxt.warn ("jump through null pointer");
}
label_text describe_final_event (const evdesc::final_event &ev) final override
diff --git a/gcc/analyzer/infinite-loop.cc b/gcc/analyzer/infinite-loop.cc
index 771d698..c47ce1c 100644
--- a/gcc/analyzer/infinite-loop.cc
+++ b/gcc/analyzer/infinite-loop.cc
@@ -32,7 +32,6 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic-core.h"
#include "diagnostic-event-id.h"
#include "diagnostic-path.h"
-#include "diagnostic-metadata.h"
#include "function.h"
#include "pretty-print.h"
#include "sbitmap.h"
@@ -178,13 +177,11 @@ public:
return OPT_Wanalyzer_infinite_loop;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
/* "CWE-835: Loop with Unreachable Exit Condition ('Infinite Loop')". */
- diagnostic_metadata m;
- m.add_cwe (835);
- return warning_meta (rich_loc, m, get_controlling_option (),
- "infinite loop");
+ ctxt.add_cwe (835);
+ return ctxt.warn ("infinite loop");
}
bool maybe_add_custom_events_for_superedge (const exploded_edge &,
diff --git a/gcc/analyzer/infinite-recursion.cc b/gcc/analyzer/infinite-recursion.cc
index 9576ff5..0fab9b7 100644
--- a/gcc/analyzer/infinite-recursion.cc
+++ b/gcc/analyzer/infinite-recursion.cc
@@ -31,7 +31,6 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic-core.h"
#include "diagnostic-event-id.h"
#include "diagnostic-path.h"
-#include "diagnostic-metadata.h"
#include "function.h"
#include "pretty-print.h"
#include "sbitmap.h"
@@ -95,13 +94,11 @@ public:
return OPT_Wanalyzer_infinite_recursion;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
/* "CWE-674: Uncontrolled Recursion". */
- diagnostic_metadata m;
- m.add_cwe (674);
- return warning_meta (rich_loc, m, get_controlling_option (),
- "infinite recursion");
+ ctxt.add_cwe (674);
+ return ctxt.warn ("infinite recursion");
}
label_text describe_final_event (const evdesc::final_event &ev) final override
diff --git a/gcc/analyzer/kf-analyzer.cc b/gcc/analyzer/kf-analyzer.cc
index 7ae598a..01e2c46 100644
--- a/gcc/analyzer/kf-analyzer.cc
+++ b/gcc/analyzer/kf-analyzer.cc
@@ -255,9 +255,9 @@ public:
return 0;
}
- bool emit (rich_location *richloc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- inform (richloc, "path");
+ ctxt.inform ("path");
return true;
}
diff --git a/gcc/analyzer/kf.cc b/gcc/analyzer/kf.cc
index 5d8e04d..a69f084 100644
--- a/gcc/analyzer/kf.cc
+++ b/gcc/analyzer/kf.cc
@@ -719,32 +719,29 @@ public:
return OPT_Wanalyzer_putenv_of_auto_var;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
auto_diagnostic_group d;
- diagnostic_metadata m;
/* SEI CERT C Coding Standard: "POS34-C. Do not call putenv() with a
pointer to an automatic variable as the argument". */
diagnostic_metadata::precanned_rule
rule ("POS34-C", "https://wiki.sei.cmu.edu/confluence/x/6NYxBQ");
- m.add_rule (rule);
+ ctxt.add_rule (rule);
bool warned;
if (m_var_decl)
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "%qE on a pointer to automatic variable %qE",
- m_fndecl, m_var_decl);
+ warned = ctxt.warn ("%qE on a pointer to automatic variable %qE",
+ m_fndecl, m_var_decl);
else
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "%qE on a pointer to an on-stack buffer",
- m_fndecl);
+ warned = ctxt.warn ("%qE on a pointer to an on-stack buffer",
+ m_fndecl);
if (warned)
{
if (m_var_decl)
inform (DECL_SOURCE_LOCATION (m_var_decl),
"%qE declared on stack here", m_var_decl);
- inform (rich_loc->get_loc (), "perhaps use %qs rather than %qE",
+ inform (ctxt.get_location (), "perhaps use %qs rather than %qE",
"setenv", m_fndecl);
}
@@ -1733,18 +1730,15 @@ public:
return OPT_Wanalyzer_undefined_behavior_strtok;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
/* CWE-476: NULL Pointer Dereference. */
- diagnostic_metadata m;
- m.add_cwe (476);
- if (warning_meta
- (rich_loc, m, get_controlling_option (),
- "calling %qD for first time with NULL as argument 1"
- " has undefined behavior",
- get_callee_fndecl ()))
+ ctxt.add_cwe (476);
+ if (ctxt.warn ("calling %qD for first time with NULL as argument 1"
+ " has undefined behavior",
+ get_callee_fndecl ()))
{
- inform (rich_loc->get_loc (),
+ inform (ctxt.get_location (),
"some implementations of %qD may crash on such input",
get_callee_fndecl ());
return true;
diff --git a/gcc/analyzer/pending-diagnostic.cc b/gcc/analyzer/pending-diagnostic.cc
index c7d3370..48d9be9 100644
--- a/gcc/analyzer/pending-diagnostic.cc
+++ b/gcc/analyzer/pending-diagnostic.cc
@@ -109,6 +109,51 @@ evdesc::event_desc::formatted_print (const char *fmt, ...) const
return result;
}
+/* class diagnostic_emission_context. */
+
+/* Get the pending_diagnostic being emitted. */
+
+const pending_diagnostic &
+diagnostic_emission_context::get_pending_diagnostic () const
+{
+ return *m_sd.m_d.get ();
+}
+
+/* Emit a warning, using the rich_location, metadata, and the
+ pending_diagnostic's option. */
+
+bool
+diagnostic_emission_context::warn (const char *gmsgid, ...)
+{
+ const pending_diagnostic &pd = get_pending_diagnostic ();
+ auto_diagnostic_group d;
+ va_list ap;
+ va_start (ap, gmsgid);
+ const bool result = emit_diagnostic_valist (DK_WARNING,
+ &m_rich_loc, &m_metadata,
+ pd.get_controlling_option (),
+ gmsgid, &ap);
+ va_end (ap);
+ return result;
+}
+
+/* Emit a note, using the rich_location and metadata (and the
+ pending_diagnostic's option). */
+
+void
+diagnostic_emission_context::inform (const char *gmsgid, ...)
+{
+ const pending_diagnostic &pd = get_pending_diagnostic ();
+ auto_diagnostic_group d;
+ va_list ap;
+ va_start (ap, gmsgid);
+ emit_diagnostic_valist (DK_NOTE,
+ &m_rich_loc, &m_metadata,
+ pd.get_controlling_option (),
+ gmsgid, &ap);
+ va_end (ap);
+}
+
/* Return true if T1 and T2 are "the same" for the purposes of
diagnostic deduplication. */
diff --git a/gcc/analyzer/pending-diagnostic.h b/gcc/analyzer/pending-diagnostic.h
index 7582b37..e393f9a 100644
--- a/gcc/analyzer/pending-diagnostic.h
+++ b/gcc/analyzer/pending-diagnostic.h
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_ANALYZER_PENDING_DIAGNOSTIC_H
#define GCC_ANALYZER_PENDING_DIAGNOSTIC_H
+#include "diagnostic-metadata.h"
#include "diagnostic-path.h"
#include "analyzer/sm.h"
@@ -144,6 +145,47 @@ struct final_event : public event_desc
} /* end of namespace evdesc */
+/* A bundle of information for use by implementations of the
+ pending_diagnostic::emit vfunc.
+
+ The rich_location will have already been populated with a
+ diagnostic_path. */
+
+class diagnostic_emission_context
+{
+public:
+ diagnostic_emission_context (const saved_diagnostic &sd,
+ rich_location &rich_loc,
+ diagnostic_metadata &metadata,
+ logger *logger)
+ : m_sd (sd),
+ m_rich_loc (rich_loc),
+ m_metadata (metadata),
+ m_logger (logger)
+ {
+ }
+
+ const pending_diagnostic &get_pending_diagnostic () const;
+
+ bool warn (const char *, ...) ATTRIBUTE_GCC_DIAG (2,3);
+ void inform (const char *, ...) ATTRIBUTE_GCC_DIAG (2,3);
+
+ location_t get_location () const { return m_rich_loc.get_loc (); }
+ logger *get_logger () const { return m_logger; }
+
+ void add_cwe (int cwe) { m_metadata.add_cwe (cwe); }
+ void add_rule (const diagnostic_metadata::rule &r)
+ {
+ m_metadata.add_rule (r);
+ }
+
+private:
+ const saved_diagnostic &m_sd;
+ rich_location &m_rich_loc;
+ diagnostic_metadata &m_metadata;
+ logger *m_logger;
+};
+
/* An abstract base class for capturing information about a diagnostic in
a form that is ready to emit at a later point (or be rejected).
Each kind of diagnostic will have a concrete subclass of
@@ -177,10 +219,9 @@ class pending_diagnostic
path being explored. By default, don't terminate the path. */
virtual bool terminate_path_p () const { return false; }
- /* Vfunc for emitting the diagnostic. The rich_location will have been
- populated with a diagnostic_path.
+ /* Vfunc for emitting the diagnostic.
Return true if a diagnostic is actually emitted. */
- virtual bool emit (rich_location *, logger *) = 0;
+ virtual bool emit (diagnostic_emission_context &) = 0;
/* Hand-coded RTTI: get an ID for the subclass. */
virtual const char *get_kind () const = 0;
@@ -361,6 +402,15 @@ class pending_diagnostic
/* Default implementation: accept this path. */
return true;
}
+
+ /* Vfunc for use in SARIF output to give pending_diagnostic subclasses
+ the opportunity to add diagnostic-specific properties to the SARIF
+ "result" object for the diagnostic.
+ This is intended for use when debugging a diagnostic. */
+ virtual void maybe_add_sarif_properties (sarif_object &/*result_obj*/) const
+ {
+ /* Default no-op implementation. */
+ }
};
/* A template to make it easier to make subclasses of pending_diagnostic.
diff --git a/gcc/analyzer/region-model.cc b/gcc/analyzer/region-model.cc
index 420c103..2157ad2 100644
--- a/gcc/analyzer/region-model.cc
+++ b/gcc/analyzer/region-model.cc
@@ -40,7 +40,6 @@ along with GCC; see the file COPYING3. If not see
#include "fold-const.h"
#include "tree-pretty-print.h"
#include "diagnostic-color.h"
-#include "diagnostic-metadata.h"
#include "bitmap.h"
#include "selftest.h"
#include "analyzer/analyzer.h"
@@ -79,6 +78,7 @@ along with GCC; see the file COPYING3. If not see
#include "analyzer/checker-path.h"
#include "analyzer/feasible-graph.h"
#include "analyzer/record-layout.h"
+#include "diagnostic-format-sarif.h"
#if ENABLE_ANALYZER
@@ -512,7 +512,7 @@ public:
bool terminate_path_p () const final override { return true; }
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
switch (m_pkind)
{
@@ -520,37 +520,30 @@ public:
gcc_unreachable ();
case POISON_KIND_UNINIT:
{
- diagnostic_metadata m;
- m.add_cwe (457); /* "CWE-457: Use of Uninitialized Variable". */
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of uninitialized value %qE",
- m_expr);
+ ctxt.add_cwe (457); /* "CWE-457: Use of Uninitialized Variable". */
+ return ctxt.warn ("use of uninitialized value %qE",
+ m_expr);
}
break;
case POISON_KIND_FREED:
{
- diagnostic_metadata m;
- m.add_cwe (416); /* "CWE-416: Use After Free". */
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use after %<free%> of %qE",
- m_expr);
+ ctxt.add_cwe (416); /* "CWE-416: Use After Free". */
+ return ctxt.warn ("use after %<free%> of %qE",
+ m_expr);
}
break;
case POISON_KIND_DELETED:
{
- diagnostic_metadata m;
- m.add_cwe (416); /* "CWE-416: Use After Free". */
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use after %<delete%> of %qE",
- m_expr);
+ ctxt.add_cwe (416); /* "CWE-416: Use After Free". */
+ return ctxt.warn ("use after %<delete%> of %qE",
+ m_expr);
}
break;
case POISON_KIND_POPPED_STACK:
{
/* TODO: which CWE? */
- return warning_at
- (rich_loc, get_controlling_option (),
- "dereferencing pointer %qE to within stale stack frame",
+ return ctxt.warn
+ ("dereferencing pointer %qE to within stale stack frame",
m_expr);
}
break;
@@ -655,10 +648,9 @@ public:
return OPT_Wanalyzer_shift_count_negative;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- return warning_at (rich_loc, get_controlling_option (),
- "shift by negative count (%qE)", m_count_cst);
+ return ctxt.warn ("shift by negative count (%qE)", m_count_cst);
}
label_text describe_final_event (const evdesc::final_event &ev) final override
@@ -702,11 +694,10 @@ public:
return OPT_Wanalyzer_shift_count_overflow;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- return warning_at (rich_loc, get_controlling_option (),
- "shift by count (%qE) >= precision of type (%qi)",
- m_count_cst, m_operand_precision);
+ return ctxt.warn ("shift by count (%qE) >= precision of type (%qi)",
+ m_count_cst, m_operand_precision);
}
label_text describe_final_event (const evdesc::final_event &ev) final override
@@ -2840,23 +2831,20 @@ public:
return OPT_Wanalyzer_write_to_const;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
auto_diagnostic_group d;
bool warned;
switch (m_reg->get_kind ())
{
default:
- warned = warning_at (rich_loc, get_controlling_option (),
- "write to %<const%> object %qE", m_decl);
+ warned = ctxt.warn ("write to %<const%> object %qE", m_decl);
break;
case RK_FUNCTION:
- warned = warning_at (rich_loc, get_controlling_option (),
- "write to function %qE", m_decl);
+ warned = ctxt.warn ("write to function %qE", m_decl);
break;
case RK_LABEL:
- warned = warning_at (rich_loc, get_controlling_option (),
- "write to label %qE", m_decl);
+ warned = ctxt.warn ("write to label %qE", m_decl);
break;
}
if (warned)
@@ -2908,10 +2896,9 @@ public:
return OPT_Wanalyzer_write_to_string_literal;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- return warning_at (rich_loc, get_controlling_option (),
- "write to string literal");
+ return ctxt.warn ("write to string literal");
/* Ideally we would show the location of the STRING_CST as well,
but it is not available at this point. */
}
@@ -3112,14 +3099,12 @@ public:
return OPT_Wanalyzer_allocation_size;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
- m.add_cwe (131);
+ ctxt.add_cwe (131);
- return warning_meta (rich_loc, m, get_controlling_option (),
- "allocated buffer size is not a multiple"
- " of the pointee's size");
+ return ctxt.warn ("allocated buffer size is not a multiple"
+ " of the pointee's size");
}
label_text describe_final_event (const evdesc::final_event &ev) final
@@ -5970,15 +5955,14 @@ public:
return same_tree_p (m_arg, ((const float_as_size_arg &) other).m_arg);
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
- bool warned = warning_meta (rich_loc, m, get_controlling_option (),
- "use of floating-point arithmetic here might"
- " yield unexpected results");
+ bool warned = ctxt.warn ("use of floating-point arithmetic here might"
+ " yield unexpected results");
if (warned)
- inform (rich_loc->get_loc (), "only use operands of an integer type"
- " inside the size argument");
+ inform (ctxt.get_location (),
+ "only use operands of an integer type"
+ " inside the size argument");
return warned;
}
@@ -6214,37 +6198,33 @@ public:
return OPT_Wanalyzer_exposure_through_uninit_copy;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
/* CWE-200: Exposure of Sensitive Information to an Unauthorized Actor. */
- m.add_cwe (200);
+ ctxt.add_cwe (200);
enum memory_space mem_space = get_src_memory_space ();
bool warned;
switch (mem_space)
{
default:
- warned = warning_meta
- (rich_loc, m, get_controlling_option (),
- "potential exposure of sensitive information"
- " by copying uninitialized data across trust boundary");
+ warned = ctxt.warn ("potential exposure of sensitive information"
+ " by copying uninitialized data"
+ " across trust boundary");
break;
case MEMSPACE_STACK:
- warned = warning_meta
- (rich_loc, m, get_controlling_option (),
- "potential exposure of sensitive information"
- " by copying uninitialized data from stack across trust boundary");
+ warned = ctxt.warn ("potential exposure of sensitive information"
+ " by copying uninitialized data from stack"
+ " across trust boundary");
break;
case MEMSPACE_HEAP:
- warned = warning_meta
- (rich_loc, m, get_controlling_option (),
- "potential exposure of sensitive information"
- " by copying uninitialized data from heap across trust boundary");
+ warned = ctxt.warn ("potential exposure of sensitive information"
+ " by copying uninitialized data from heap"
+ " across trust boundary");
break;
}
if (warned)
{
- location_t loc = rich_loc->get_loc ();
+ const location_t loc = ctxt.get_location ();
inform_number_of_uninit_bits (loc);
complain_about_uninit_ranges (loc);
@@ -6276,6 +6256,17 @@ public:
interest->add_region_creation (m_src_region);
}
+ void
+ maybe_add_sarif_properties (sarif_object &result_obj) const final override
+ {
+ sarif_property_bag &props = result_obj.get_or_create_properties ();
+#define PROPERTY_PREFIX "gcc/-Wanalyzer-exposure-through-uninit-copy/"
+ props.set (PROPERTY_PREFIX "src_region", m_src_region->to_json ());
+ props.set (PROPERTY_PREFIX "dest_region", m_dest_region->to_json ());
+ props.set (PROPERTY_PREFIX "copied_sval", m_copied_sval->to_json ());
+#undef PROPERTY_PREFIX
+ }
+
private:
enum memory_space get_src_memory_space () const
{
diff --git a/gcc/analyzer/region.cc b/gcc/analyzer/region.cc
index 4feb972..9b27e8f 100644
--- a/gcc/analyzer/region.cc
+++ b/gcc/analyzer/region.cc
@@ -40,7 +40,6 @@ along with GCC; see the file COPYING3. If not see
#include "fold-const.h"
#include "tree-pretty-print.h"
#include "diagnostic-color.h"
-#include "diagnostic-metadata.h"
#include "bitmap.h"
#include "analyzer/analyzer.h"
#include "analyzer/analyzer-logging.h"
diff --git a/gcc/analyzer/sm-fd.cc b/gcc/analyzer/sm-fd.cc
index 34bbd84..7f8a1d9 100644
--- a/gcc/analyzer/sm-fd.cc
+++ b/gcc/analyzer/sm-fd.cc
@@ -29,7 +29,6 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "options.h"
#include "diagnostic-path.h"
-#include "diagnostic-metadata.h"
#include "analyzer/analyzer.h"
#include "diagnostic-event-id.h"
#include "analyzer/analyzer-logging.h"
@@ -465,19 +464,16 @@ public:
}
bool
- emit (rich_location *rich_loc, logger *) final override
+ emit (diagnostic_emission_context &ctxt) final override
{
/*CWE-775: Missing Release of File Descriptor or Handle after Effective
Lifetime
*/
- diagnostic_metadata m;
- m.add_cwe (775);
+ ctxt.add_cwe (775);
if (m_arg)
- return warning_meta (rich_loc, m, get_controlling_option (),
- "leak of file descriptor %qE", m_arg);
+ return ctxt.warn ("leak of file descriptor %qE", m_arg);
else
- return warning_meta (rich_loc, m, get_controlling_option (),
- "leak of file descriptor");
+ return ctxt.warn ("leak of file descriptor");
}
label_text
@@ -550,20 +546,18 @@ public:
}
bool
- emit (rich_location *rich_loc, logger *) final override
+ emit (diagnostic_emission_context &ctxt) final override
{
bool warned;
switch (m_fd_dir)
{
case DIRS_READ:
- warned = warning_at (rich_loc, get_controlling_option (),
- "%qE on read-only file descriptor %qE",
- m_callee_fndecl, m_arg);
+ warned = ctxt.warn ("%qE on read-only file descriptor %qE",
+ m_callee_fndecl, m_arg);
break;
case DIRS_WRITE:
- warned = warning_at (rich_loc, get_controlling_option (),
- "%qE on write-only file descriptor %qE",
- m_callee_fndecl, m_arg);
+ warned = ctxt.warn ("%qE on write-only file descriptor %qE",
+ m_callee_fndecl, m_arg);
break;
default:
gcc_unreachable ();
@@ -612,13 +606,11 @@ public:
return OPT_Wanalyzer_fd_double_close;
}
bool
- emit (rich_location *rich_loc, logger *) final override
+ emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
// CWE-1341: Multiple Releases of Same Resource or Handle
- m.add_cwe (1341);
- return warning_meta (rich_loc, m, get_controlling_option (),
- "double %<close%> of file descriptor %qE", m_arg);
+ ctxt.add_cwe (1341);
+ return ctxt.warn ("double %<close%> of file descriptor %qE", m_arg);
}
label_text
@@ -677,12 +669,10 @@ public:
}
bool
- emit (rich_location *rich_loc, logger *) final override
+ emit (diagnostic_emission_context &ctxt) final override
{
- bool warned;
- warned = warning_at (rich_loc, get_controlling_option (),
- "%qE on closed file descriptor %qE", m_callee_fndecl,
- m_arg);
+ bool warned = ctxt.warn ("%qE on closed file descriptor %qE",
+ m_callee_fndecl, m_arg);
if (warned)
inform_filedescriptor_attribute (DIRS_READ_WRITE);
return warned;
@@ -748,12 +738,10 @@ public:
}
bool
- emit (rich_location *rich_loc, logger *) final override
+ emit (diagnostic_emission_context &ctxt) final override
{
- bool warned;
- warned = warning_at (rich_loc, get_controlling_option (),
- "%qE on possibly invalid file descriptor %qE",
- m_callee_fndecl, m_arg);
+ bool warned = ctxt.warn ("%qE on possibly invalid file descriptor %qE",
+ m_callee_fndecl, m_arg);
if (warned)
inform_filedescriptor_attribute (DIRS_READ_WRITE);
return warned;
@@ -859,14 +847,12 @@ public:
}
bool
- emit (rich_location *rich_loc, logger *) final override
+ emit (diagnostic_emission_context &ctxt) final override
{
/* CWE-666: Operation on Resource in Wrong Phase of Lifetime. */
- diagnostic_metadata m;
- m.add_cwe (666);
- return warning_at (rich_loc, get_controlling_option (),
- "%qE on file descriptor %qE in wrong phase",
- m_callee_fndecl, m_arg);
+ ctxt.add_cwe (666);
+ return ctxt.warn ("%qE on file descriptor %qE in wrong phase",
+ m_callee_fndecl, m_arg);
}
label_text
@@ -1019,25 +1005,22 @@ public:
}
bool
- emit (rich_location *rich_loc, logger *) final override
+ emit (diagnostic_emission_context &ctxt) final override
{
switch (m_expected_type)
{
default:
gcc_unreachable ();
case EXPECTED_TYPE_SOCKET:
- return warning_at (rich_loc, get_controlling_option (),
- "%qE on non-socket file descriptor %qE",
- m_callee_fndecl, m_arg);
+ return ctxt.warn ("%qE on non-socket file descriptor %qE",
+ m_callee_fndecl, m_arg);
case EXPECTED_TYPE_STREAM_SOCKET:
if (m_sm.is_datagram_socket_fd_p (m_actual_state))
- return warning_at (rich_loc, get_controlling_option (),
- "%qE on datagram socket file descriptor %qE",
- m_callee_fndecl, m_arg);
+ return ctxt.warn ("%qE on datagram socket file descriptor %qE",
+ m_callee_fndecl, m_arg);
else
- return warning_at (rich_loc, get_controlling_option (),
- "%qE on non-stream-socket file descriptor %qE",
- m_callee_fndecl, m_arg);
+ return ctxt.warn ("%qE on non-stream-socket file descriptor %qE",
+ m_callee_fndecl, m_arg);
}
}
diff --git a/gcc/analyzer/sm-file.cc b/gcc/analyzer/sm-file.cc
index 0252b39..f8e31f8 100644
--- a/gcc/analyzer/sm-file.cc
+++ b/gcc/analyzer/sm-file.cc
@@ -29,7 +29,6 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "options.h"
#include "diagnostic-path.h"
-#include "diagnostic-metadata.h"
#include "analyzer/analyzer.h"
#include "diagnostic-event-id.h"
#include "analyzer/analyzer-logging.h"
@@ -176,14 +175,12 @@ public:
return OPT_Wanalyzer_double_fclose;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
/* CWE-1341: Multiple Releases of Same Resource or Handle. */
- m.add_cwe (1341);
- return warning_meta (rich_loc, m, get_controlling_option (),
- "double %<fclose%> of FILE %qE",
- m_arg);
+ ctxt.add_cwe (1341);
+ return ctxt.warn ("double %<fclose%> of FILE %qE",
+ m_arg);
}
label_text describe_state_change (const evdesc::state_change &change)
@@ -224,19 +221,15 @@ public:
return OPT_Wanalyzer_file_leak;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
/* CWE-775: "Missing Release of File Descriptor or Handle after
Effective Lifetime". */
- m.add_cwe (775);
+ ctxt.add_cwe (775);
if (m_arg)
- return warning_meta (rich_loc, m, get_controlling_option (),
- "leak of FILE %qE",
- m_arg);
+ return ctxt.warn ("leak of FILE %qE", m_arg);
else
- return warning_meta (rich_loc, m, get_controlling_option (),
- "leak of FILE");
+ return ctxt.warn ("leak of FILE");
}
label_text describe_state_change (const evdesc::state_change &change)
diff --git a/gcc/analyzer/sm-malloc.cc b/gcc/analyzer/sm-malloc.cc
index 5af6544..bb78444 100644
--- a/gcc/analyzer/sm-malloc.cc
+++ b/gcc/analyzer/sm-malloc.cc
@@ -30,7 +30,6 @@ along with GCC; see the file COPYING3. If not see
#include "options.h"
#include "bitmap.h"
#include "diagnostic-path.h"
-#include "diagnostic-metadata.h"
#include "analyzer/analyzer.h"
#include "diagnostic-event-id.h"
#include "analyzer/analyzer-logging.h"
@@ -840,23 +839,20 @@ public:
return OPT_Wanalyzer_mismatching_deallocation;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
auto_diagnostic_group d;
- diagnostic_metadata m;
- m.add_cwe (762); /* CWE-762: Mismatched Memory Management Routines. */
+ ctxt.add_cwe (762); /* CWE-762: Mismatched Memory Management Routines. */
if (const deallocator *expected_dealloc
= m_expected_deallocators->maybe_get_single ())
- return warning_meta (rich_loc, m, get_controlling_option (),
- "%qE should have been deallocated with %qs"
- " but was deallocated with %qs",
- m_arg, expected_dealloc->m_name,
- m_actual_dealloc->m_name);
+ return ctxt.warn ("%qE should have been deallocated with %qs"
+ " but was deallocated with %qs",
+ m_arg, expected_dealloc->m_name,
+ m_actual_dealloc->m_name);
else
- return warning_meta (rich_loc, m, get_controlling_option (),
- "%qs called on %qE returned from a mismatched"
- " allocation function",
- m_actual_dealloc->m_name, m_arg);
+ return ctxt.warn ("%qs called on %qE returned from a mismatched"
+ " allocation function",
+ m_actual_dealloc->m_name, m_arg);
}
label_text describe_state_change (const evdesc::state_change &change)
@@ -919,13 +915,11 @@ public:
return OPT_Wanalyzer_double_free;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
auto_diagnostic_group d;
- diagnostic_metadata m;
- m.add_cwe (415); /* CWE-415: Double Free. */
- return warning_meta (rich_loc, m, get_controlling_option (),
- "double-%qs of %qE", m_funcname, m_arg);
+ ctxt.add_cwe (415); /* CWE-415: Double Free. */
+ return ctxt.warn ("double-%qs of %qE", m_funcname, m_arg);
}
label_text describe_state_change (const evdesc::state_change &change)
@@ -1015,13 +1009,11 @@ public:
return OPT_Wanalyzer_possible_null_dereference;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
/* CWE-690: Unchecked Return Value to NULL Pointer Dereference. */
- diagnostic_metadata m;
- m.add_cwe (690);
- return warning_meta (rich_loc, m, get_controlling_option (),
- "dereference of possibly-NULL %qE", m_arg);
+ ctxt.add_cwe (690);
+ return ctxt.warn ("dereference of possibly-NULL %qE", m_arg);
}
label_text describe_final_event (const evdesc::final_event &ev) final override
@@ -1104,16 +1096,14 @@ public:
return OPT_Wanalyzer_possible_null_argument;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
/* CWE-690: Unchecked Return Value to NULL Pointer Dereference. */
auto_diagnostic_group d;
- diagnostic_metadata m;
- m.add_cwe (690);
+ ctxt.add_cwe (690);
bool warned
- = warning_meta (rich_loc, m, get_controlling_option (),
- "use of possibly-NULL %qE where non-null expected",
- m_arg);
+ = ctxt.warn ("use of possibly-NULL %qE where non-null expected",
+ m_arg);
if (warned)
inform_nonnull_attribute (m_fndecl, m_arg_idx);
return warned;
@@ -1157,13 +1147,11 @@ public:
bool terminate_path_p () const final override { return true; }
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
/* CWE-476: NULL Pointer Dereference. */
- diagnostic_metadata m;
- m.add_cwe (476);
- return warning_meta (rich_loc, m, get_controlling_option (),
- "dereference of NULL %qE", m_arg);
+ ctxt.add_cwe (476);
+ return ctxt.warn ("dereference of NULL %qE", m_arg);
}
label_text describe_return_of_state (const evdesc::return_of_state &info)
@@ -1227,21 +1215,18 @@ public:
bool terminate_path_p () const final override { return true; }
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
/* CWE-476: NULL Pointer Dereference. */
auto_diagnostic_group d;
- diagnostic_metadata m;
- m.add_cwe (476);
+ ctxt.add_cwe (476);
bool warned;
if (zerop (m_arg))
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "use of NULL where non-null expected");
+ warned = ctxt.warn ("use of NULL where non-null expected");
else
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "use of NULL %qE where non-null expected",
- m_arg);
+ warned = ctxt.warn ("use of NULL %qE where non-null expected",
+ m_arg);
if (warned)
inform_nonnull_attribute (m_fndecl, m_arg_idx);
return warned;
@@ -1284,14 +1269,12 @@ public:
return OPT_Wanalyzer_use_after_free;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
/* CWE-416: Use After Free. */
- diagnostic_metadata m;
- m.add_cwe (416);
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use after %<%s%> of %qE",
- m_deallocator->m_name, m_arg);
+ ctxt.add_cwe (416);
+ return ctxt.warn ("use after %<%s%> of %qE",
+ m_deallocator->m_name, m_arg);
}
label_text describe_state_change (const evdesc::state_change &change)
@@ -1378,17 +1361,14 @@ public:
return OPT_Wanalyzer_malloc_leak;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
/* "CWE-401: Missing Release of Memory after Effective Lifetime". */
- diagnostic_metadata m;
- m.add_cwe (401);
+ ctxt.add_cwe (401);
if (m_arg)
- return warning_meta (rich_loc, m, get_controlling_option (),
- "leak of %qE", m_arg);
+ return ctxt.warn ("leak of %qE", m_arg);
else
- return warning_meta (rich_loc, m, get_controlling_option (),
- "leak of %qs", "<unknown>");
+ return ctxt.warn ("leak of %qs", "<unknown>");
}
label_text describe_state_change (const evdesc::state_change &change)
@@ -1452,11 +1432,10 @@ public:
return OPT_Wanalyzer_free_of_non_heap;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
auto_diagnostic_group d;
- diagnostic_metadata m;
- m.add_cwe (590); /* CWE-590: Free of Memory not on the Heap. */
+ ctxt.add_cwe (590); /* CWE-590: Free of Memory not on the Heap. */
switch (get_memory_space ())
{
default:
@@ -1466,16 +1445,14 @@ public:
case MEMSPACE_CODE:
case MEMSPACE_GLOBALS:
case MEMSPACE_READONLY_DATA:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "%<%s%> of %qE which points to memory"
- " not on the heap",
- m_funcname, m_arg);
+ return ctxt.warn ("%<%s%> of %qE which points to memory"
+ " not on the heap",
+ m_funcname, m_arg);
break;
case MEMSPACE_STACK:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "%<%s%> of %qE which points to memory"
- " on the stack",
- m_funcname, m_arg);
+ return ctxt.warn ("%<%s%> of %qE which points to memory"
+ " on the stack",
+ m_funcname, m_arg);
break;
}
}
@@ -1531,7 +1508,7 @@ public:
return OPT_Wanalyzer_deref_before_check;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
/* Don't emit the warning if we can't show where the deref
and the check occur. */
@@ -1605,10 +1582,9 @@ public:
m_deref_enode->get_supernode ()->m_bb))
return false;
- return warning_at (rich_loc, get_controlling_option (),
- "check of %qE for NULL after already"
- " dereferencing it",
- m_arg);
+ return ctxt.warn ("check of %qE for NULL after already"
+ " dereferencing it",
+ m_arg);
}
label_text describe_state_change (const evdesc::state_change &change)
diff --git a/gcc/analyzer/sm-pattern-test.cc b/gcc/analyzer/sm-pattern-test.cc
index 4c88bca..cd594e0 100644
--- a/gcc/analyzer/sm-pattern-test.cc
+++ b/gcc/analyzer/sm-pattern-test.cc
@@ -31,7 +31,6 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "tree-pretty-print.h"
#include "diagnostic-path.h"
-#include "diagnostic-metadata.h"
#include "analyzer/analyzer.h"
#include "diagnostic-event-id.h"
#include "analyzer/analyzer-logging.h"
@@ -92,11 +91,10 @@ public:
return 0;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- return warning_at (rich_loc, get_controlling_option (),
- "pattern match on %<%E %s %E%>",
- m_lhs, op_symbol_code (m_op), m_rhs);
+ return ctxt.warn ("pattern match on %<%E %s %E%>",
+ m_lhs, op_symbol_code (m_op), m_rhs);
}
private:
diff --git a/gcc/analyzer/sm-sensitive.cc b/gcc/analyzer/sm-sensitive.cc
index 0597e39..4776d64 100644
--- a/gcc/analyzer/sm-sensitive.cc
+++ b/gcc/analyzer/sm-sensitive.cc
@@ -30,7 +30,6 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "options.h"
#include "diagnostic-path.h"
-#include "diagnostic-metadata.h"
#include "analyzer/analyzer.h"
#include "diagnostic-event-id.h"
#include "analyzer/analyzer-logging.h"
@@ -95,15 +94,12 @@ public:
return OPT_Wanalyzer_exposure_through_output_file;
}
- bool emit (rich_location *rich_loc,
- logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
/* CWE-532: Information Exposure Through Log Files */
- m.add_cwe (532);
- return warning_meta (rich_loc, m, get_controlling_option (),
- "sensitive value %qE written to output file",
- m_arg);
+ ctxt.add_cwe (532);
+ return ctxt.warn ("sensitive value %qE written to output file",
+ m_arg);
}
label_text describe_state_change (const evdesc::state_change &change)
diff --git a/gcc/analyzer/sm-signal.cc b/gcc/analyzer/sm-signal.cc
index 9ebcbdb..6bca395 100644
--- a/gcc/analyzer/sm-signal.cc
+++ b/gcc/analyzer/sm-signal.cc
@@ -32,7 +32,6 @@ along with GCC; see the file COPYING3. If not see
#include "options.h"
#include "bitmap.h"
#include "diagnostic-path.h"
-#include "diagnostic-metadata.h"
#include "analyzer/analyzer.h"
#include "diagnostic-event-id.h"
#include "analyzer/analyzer-logging.h"
@@ -114,15 +113,13 @@ public:
return OPT_Wanalyzer_unsafe_call_within_signal_handler;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
auto_diagnostic_group d;
- diagnostic_metadata m;
/* CWE-479: Signal Handler Use of a Non-reentrant Function. */
- m.add_cwe (479);
- if (warning_meta (rich_loc, m, get_controlling_option (),
- "call to %qD from within signal handler",
- m_unsafe_fndecl))
+ ctxt.add_cwe (479);
+ if (ctxt.warn ("call to %qD from within signal handler",
+ m_unsafe_fndecl))
{
/* If we know a possible alternative function, add a note
suggesting the replacement. */
diff --git a/gcc/analyzer/sm-taint.cc b/gcc/analyzer/sm-taint.cc
index dfd5f7f..d01e3f0 100644
--- a/gcc/analyzer/sm-taint.cc
+++ b/gcc/analyzer/sm-taint.cc
@@ -31,7 +31,6 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "options.h"
#include "diagnostic-path.h"
-#include "diagnostic-metadata.h"
#include "analyzer/analyzer.h"
#include "analyzer/analyzer-logging.h"
#include "gimple-iterator.h"
@@ -211,33 +210,29 @@ public:
return OPT_Wanalyzer_tainted_array_index;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
/* CWE-129: "Improper Validation of Array Index". */
- m.add_cwe (129);
+ ctxt.add_cwe (129);
if (m_arg)
switch (m_has_bounds)
{
default:
gcc_unreachable ();
case BOUNDS_NONE:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE"
- " in array lookup without bounds checking",
- m_arg);
+ return ctxt.warn ("use of attacker-controlled value %qE"
+ " in array lookup without bounds checking",
+ m_arg);
break;
case BOUNDS_UPPER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE"
- " in array lookup without checking for negative",
- m_arg);
+ return ctxt.warn ("use of attacker-controlled value %qE"
+ " in array lookup without checking for negative",
+ m_arg);
break;
case BOUNDS_LOWER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE"
- " in array lookup without upper-bounds checking",
- m_arg);
+ return ctxt.warn ("use of attacker-controlled value %qE"
+ " in array lookup without upper-bounds checking",
+ m_arg);
break;
}
else
@@ -246,21 +241,18 @@ public:
default:
gcc_unreachable ();
case BOUNDS_NONE:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value"
- " in array lookup without bounds checking");
+ return ctxt.warn ("use of attacker-controlled value"
+ " in array lookup without bounds checking");
break;
case BOUNDS_UPPER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value"
- " in array lookup without checking for"
- " negative");
+ return ctxt.warn ("use of attacker-controlled value"
+ " in array lookup without checking for"
+ " negative");
break;
case BOUNDS_LOWER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value"
- " in array lookup without upper-bounds"
- " checking");
+ return ctxt.warn ("use of attacker-controlled value"
+ " in array lookup without upper-bounds"
+ " checking");
break;
}
}
@@ -327,33 +319,29 @@ public:
return OPT_Wanalyzer_tainted_offset;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
/* CWE-823: "Use of Out-of-range Pointer Offset". */
- m.add_cwe (823);
+ ctxt.add_cwe (823);
if (m_arg)
switch (m_has_bounds)
{
default:
gcc_unreachable ();
case BOUNDS_NONE:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE as offset"
- " without bounds checking",
- m_arg);
+ return ctxt.warn ("use of attacker-controlled value %qE as offset"
+ " without bounds checking",
+ m_arg);
break;
case BOUNDS_UPPER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE as offset"
- " without lower-bounds checking",
- m_arg);
+ return ctxt.warn ("use of attacker-controlled value %qE as offset"
+ " without lower-bounds checking",
+ m_arg);
break;
case BOUNDS_LOWER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE as offset"
- " without upper-bounds checking",
- m_arg);
+ return ctxt.warn ("use of attacker-controlled value %qE as offset"
+ " without upper-bounds checking",
+ m_arg);
break;
}
else
@@ -362,19 +350,16 @@ public:
default:
gcc_unreachable ();
case BOUNDS_NONE:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value as offset"
- " without bounds checking");
+ return ctxt.warn ("use of attacker-controlled value as offset"
+ " without bounds checking");
break;
case BOUNDS_UPPER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value as offset"
- " without lower-bounds checking");
+ return ctxt.warn ("use of attacker-controlled value as offset"
+ " without lower-bounds checking");
break;
case BOUNDS_LOWER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value as offset"
- " without upper-bounds checking");
+ return ctxt.warn ("use of attacker-controlled value as offset"
+ " without upper-bounds checking");
break;
}
}
@@ -437,33 +422,29 @@ public:
return OPT_Wanalyzer_tainted_size;
}
- bool emit (rich_location *rich_loc, logger *) override
+ bool emit (diagnostic_emission_context &ctxt) override
{
/* "CWE-129: Improper Validation of Array Index". */
- diagnostic_metadata m;
- m.add_cwe (129);
+ ctxt.add_cwe (129);
if (m_arg)
switch (m_has_bounds)
{
default:
gcc_unreachable ();
case BOUNDS_NONE:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE as size"
- " without bounds checking",
- m_arg);
+ return ctxt.warn ("use of attacker-controlled value %qE as size"
+ " without bounds checking",
+ m_arg);
break;
case BOUNDS_UPPER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE as size"
- " without lower-bounds checking",
- m_arg);
+ return ctxt.warn ("use of attacker-controlled value %qE as size"
+ " without lower-bounds checking",
+ m_arg);
break;
case BOUNDS_LOWER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE as size"
- " without upper-bounds checking",
- m_arg);
+ return ctxt.warn ("use of attacker-controlled value %qE as size"
+ " without upper-bounds checking",
+ m_arg);
break;
}
else
@@ -472,19 +453,16 @@ public:
default:
gcc_unreachable ();
case BOUNDS_NONE:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value as size"
- " without bounds checking");
+ return ctxt.warn ("use of attacker-controlled value as size"
+ " without bounds checking");
break;
case BOUNDS_UPPER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value as size"
- " without lower-bounds checking");
+ return ctxt.warn ("use of attacker-controlled value as size"
+ " without lower-bounds checking");
break;
case BOUNDS_LOWER:
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value as size"
- " without upper-bounds checking");
+ return ctxt.warn ("use of attacker-controlled value as size"
+ " without upper-bounds checking");
break;
}
}
@@ -547,9 +525,9 @@ public:
return "tainted_access_attrib_size";
}
- bool emit (rich_location *rich_loc, logger *logger) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- bool warned = tainted_size::emit (rich_loc, logger);
+ bool warned = tainted_size::emit (ctxt);
if (warned)
{
inform (DECL_SOURCE_LOCATION (m_callee_fndecl),
@@ -583,20 +561,17 @@ public:
return OPT_Wanalyzer_tainted_divisor;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
/* CWE-369: "Divide By Zero". */
- m.add_cwe (369);
+ ctxt.add_cwe (369);
if (m_arg)
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE as divisor"
- " without checking for zero",
- m_arg);
+ return ctxt.warn ("use of attacker-controlled value %qE as divisor"
+ " without checking for zero",
+ m_arg);
else
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value as divisor"
- " without checking for zero");
+ return ctxt.warn ("use of attacker-controlled value as divisor"
+ " without checking for zero");
}
label_text describe_final_event (const evdesc::final_event &ev) final override
@@ -645,11 +620,10 @@ public:
return OPT_Wanalyzer_tainted_allocation_size;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
/* "CWE-789: Memory Allocation with Excessive Size Value". */
- m.add_cwe (789);
+ ctxt.add_cwe (789);
bool warned;
if (m_arg)
@@ -658,24 +632,21 @@ public:
default:
gcc_unreachable ();
case BOUNDS_NONE:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE as"
- " allocation size without bounds checking",
- m_arg);
+ warned = ctxt.warn ("use of attacker-controlled value %qE as"
+ " allocation size without bounds checking",
+ m_arg);
break;
case BOUNDS_UPPER:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE as"
- " allocation size without"
- " lower-bounds checking",
- m_arg);
+ warned = ctxt.warn ("use of attacker-controlled value %qE as"
+ " allocation size without"
+ " lower-bounds checking",
+ m_arg);
break;
case BOUNDS_LOWER:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value %qE as"
- " allocation size without"
- " upper-bounds checking",
- m_arg);
+ warned = ctxt.warn ("use of attacker-controlled value %qE as"
+ " allocation size without"
+ " upper-bounds checking",
+ m_arg);
break;
}
else
@@ -684,27 +655,24 @@ public:
default:
gcc_unreachable ();
case BOUNDS_NONE:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value as"
- " allocation size without bounds"
- " checking");
+ warned = ctxt.warn ("use of attacker-controlled value as"
+ " allocation size without bounds"
+ " checking");
break;
case BOUNDS_UPPER:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value as"
- " allocation size without"
- " lower-bounds checking");
+ warned = ctxt.warn ("use of attacker-controlled value as"
+ " allocation size without"
+ " lower-bounds checking");
break;
case BOUNDS_LOWER:
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacker-controlled value as"
- " allocation size without"
- " upper-bounds checking");
+ warned = ctxt.warn ("use of attacker-controlled value as"
+ " allocation size without"
+ " upper-bounds checking");
break;
}
if (warned)
{
- location_t loc = rich_loc->get_loc ();
+ const location_t loc = ctxt.get_location ();
switch (m_mem_space)
{
default:
@@ -800,15 +768,13 @@ public:
return OPT_Wanalyzer_tainted_assertion;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
/* "CWE-617: Reachable Assertion". */
- m.add_cwe (617);
+ ctxt.add_cwe (617);
- return warning_meta (rich_loc, m, get_controlling_option (),
- "use of attacked-controlled value in"
- " condition for assertion");
+ return ctxt.warn ("use of attacked-controlled value in"
+ " condition for assertion");
}
location_t fixup_location (location_t loc,
diff --git a/gcc/analyzer/store.cc b/gcc/analyzer/store.cc
index 6025085..be1802e 100644
--- a/gcc/analyzer/store.cc
+++ b/gcc/analyzer/store.cc
@@ -38,7 +38,6 @@ along with GCC; see the file COPYING3. If not see
#include "fold-const.h"
#include "tree-pretty-print.h"
#include "diagnostic-color.h"
-#include "diagnostic-metadata.h"
#include "bitmap.h"
#include "selftest.h"
#include "analyzer/analyzer.h"
diff --git a/gcc/analyzer/varargs.cc b/gcc/analyzer/varargs.cc
index f79b2a7..7cdfb20 100644
--- a/gcc/analyzer/varargs.cc
+++ b/gcc/analyzer/varargs.cc
@@ -41,7 +41,6 @@ along with GCC; see the file COPYING3. If not see
#include "analyzer/supergraph.h"
#include "analyzer/diagnostic-manager.h"
#include "analyzer/exploded-graph.h"
-#include "diagnostic-metadata.h"
#include "analyzer/call-details.h"
#if ENABLE_ANALYZER
@@ -403,11 +402,9 @@ public:
&& 0 == strcmp (m_usage_fnname, other.m_usage_fnname));
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- auto_diagnostic_group d;
- return warning_at (rich_loc, get_controlling_option (),
- "%qs after %qs", m_usage_fnname, "va_end");
+ return ctxt.warn ("%qs after %qs", m_usage_fnname, "va_end");
}
const char *get_kind () const final override
@@ -478,11 +475,9 @@ public:
return va_list_sm_diagnostic::subclass_equal_p (other);
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- auto_diagnostic_group d;
- return warning_at (rich_loc, get_controlling_option (),
- "missing call to %qs", "va_end");
+ return ctxt.warn ("missing call to %qs", "va_end");
}
const char *get_kind () const final override { return "va_list_leak"; }
@@ -892,18 +887,15 @@ public:
return OPT_Wanalyzer_va_arg_type_mismatch;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- auto_diagnostic_group d;
- diagnostic_metadata m;
/* "CWE-686: Function Call With Incorrect Argument Type". */
- m.add_cwe (686);
+ ctxt.add_cwe (686);
bool warned
- = warning_meta (rich_loc, m, get_controlling_option (),
- "%<va_arg%> expected %qT but received %qT"
- " for variadic argument %i of %qE",
- m_expected_type, m_actual_type,
- get_variadic_index_for_diagnostic (), m_va_list_tree);
+ = ctxt.warn ("%<va_arg%> expected %qT but received %qT"
+ " for variadic argument %i of %qE",
+ m_expected_type, m_actual_type,
+ get_variadic_index_for_diagnostic (), m_va_list_tree);
return warned;
}
@@ -942,15 +934,12 @@ public:
return OPT_Wanalyzer_va_list_exhausted;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- auto_diagnostic_group d;
- diagnostic_metadata m;
/* CWE-685: Function Call With Incorrect Number of Arguments. */
- m.add_cwe (685);
- bool warned = warning_meta (rich_loc, m, get_controlling_option (),
- "%qE has no more arguments (%i consumed)",
- m_va_list_tree, get_num_consumed ());
+ ctxt.add_cwe (685);
+ bool warned = ctxt.warn ("%qE has no more arguments (%i consumed)",
+ m_va_list_tree, get_num_consumed ());
return warned;
}
diff --git a/gcc/asan.cc b/gcc/asan.cc
index 2424cf6..8d0ffb4 100644
--- a/gcc/asan.cc
+++ b/gcc/asan.cc
@@ -2291,6 +2291,8 @@ asan_protect_global (tree decl, bool ignore_decl_rtl_set_p)
|| (DECL_SECTION_NAME (decl) != NULL
&& !symtab_node::get (decl)->implicit_section
&& !section_sanitized_p (DECL_SECTION_NAME (decl)))
+ /* Don't protect variables in non-generic address-space. */
+ || !ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (TREE_TYPE (decl)))
|| DECL_SIZE (decl) == 0
|| ASAN_RED_ZONE_SIZE * BITS_PER_UNIT > MAX_OFILE_ALIGNMENT
|| TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST
diff --git a/gcc/attribs.cc b/gcc/attribs.cc
index c7209c2..ff4b638 100644
--- a/gcc/attribs.cc
+++ b/gcc/attribs.cc
@@ -27,6 +27,7 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic-core.h"
#include "attribs.h"
#include "fold-const.h"
+#include "ipa-strub.h"
#include "stor-layout.h"
#include "langhooks.h"
#include "plugin.h"
@@ -39,7 +40,7 @@ along with GCC; see the file COPYING3. If not see
/* Table of the tables of attributes (common, language, format, machine)
searched. */
-static const struct attribute_spec *attribute_tables[4];
+static array_slice<const scoped_attribute_specs *const> attribute_tables[2];
/* Substring representation. */
@@ -102,12 +103,18 @@ static const struct attribute_spec *lookup_scoped_attribute_spec (const_tree,
static bool attributes_initialized = false;
-/* Default empty table of attributes. */
+/* Do not use directly; go through get_gnu_namespace instead. */
+static GTY(()) tree gnu_namespace_cache;
-static const struct attribute_spec empty_attribute_table[] =
+/* Return the IDENTIFIER_NODE for the gnu namespace. */
+
+static tree
+get_gnu_namespace ()
{
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ if (!gnu_namespace_cache)
+ gnu_namespace_cache = get_identifier ("gnu");
+ return gnu_namespace_cache;
+}
/* Return base name of the attribute. Ie '__attr__' is turned into 'attr'.
To avoid need for copying, we simply return length of the string. */
@@ -118,21 +125,19 @@ extract_attribute_substring (struct substring *str)
canonicalize_attr_name (str->str, str->length);
}
-/* Insert an array of attributes ATTRIBUTES into a namespace. This
- array must be NULL terminated. NS is the name of attribute
- namespace. IGNORED_P is true iff all unknown attributes in this
- namespace should be ignored for the purposes of -Wattributes. The
- function returns the namespace into which the attributes have been
- registered. */
+/* Insert SPECS into its namespace. IGNORED_P is true iff all unknown
+ attributes in this namespace should be ignored for the purposes of
+ -Wattributes. The function returns the namespace into which the
+ attributes have been registered. */
scoped_attributes *
-register_scoped_attributes (const struct attribute_spec *attributes,
- const char *ns, bool ignored_p /*=false*/)
+register_scoped_attributes (const scoped_attribute_specs &specs,
+ bool ignored_p /*=false*/)
{
scoped_attributes *result = NULL;
/* See if we already have attributes in the namespace NS. */
- result = find_attribute_namespace (ns);
+ result = find_attribute_namespace (specs.ns);
if (result == NULL)
{
@@ -143,7 +148,7 @@ register_scoped_attributes (const struct attribute_spec *attributes,
attributes_table.create (64);
memset (&sa, 0, sizeof (sa));
- sa.ns = ns;
+ sa.ns = specs.ns;
sa.attributes.create (64);
sa.ignored_p = ignored_p;
result = attributes_table.safe_push (sa);
@@ -153,10 +158,10 @@ register_scoped_attributes (const struct attribute_spec *attributes,
result->ignored_p |= ignored_p;
/* Really add the attributes to their namespace now. */
- for (unsigned i = 0; attributes[i].name != NULL; ++i)
+ for (const attribute_spec &attribute : specs.attributes)
{
- result->attributes.safe_push (attributes[i]);
- register_scoped_attribute (&attributes[i], result);
+ result->attributes.safe_push (attribute);
+ register_scoped_attribute (&attribute, result);
}
gcc_assert (result != NULL);
@@ -183,49 +188,40 @@ find_attribute_namespace (const char* ns)
static void
check_attribute_tables (void)
{
- for (size_t i = 0; i < ARRAY_SIZE (attribute_tables); i++)
- for (size_t j = 0; attribute_tables[i][j].name != NULL; j++)
- {
- /* The name must not begin and end with __. */
- const char *name = attribute_tables[i][j].name;
- int len = strlen (name);
+ hash_set<pair_hash<nofree_string_hash, nofree_string_hash>> names;
+
+ for (auto scoped_array : attribute_tables)
+ for (auto scoped_attributes : scoped_array)
+ for (const attribute_spec &attribute : scoped_attributes->attributes)
+ {
+ /* The name must not begin and end with __. */
+ const char *name = attribute.name;
+ int len = strlen (name);
- gcc_assert (!(name[0] == '_' && name[1] == '_'
- && name[len - 1] == '_' && name[len - 2] == '_'));
+ gcc_assert (!(name[0] == '_' && name[1] == '_'
+ && name[len - 1] == '_' && name[len - 2] == '_'));
- /* The minimum and maximum lengths must be consistent. */
- gcc_assert (attribute_tables[i][j].min_length >= 0);
+ /* The minimum and maximum lengths must be consistent. */
+ gcc_assert (attribute.min_length >= 0);
- gcc_assert (attribute_tables[i][j].max_length == -1
- || (attribute_tables[i][j].max_length
- >= attribute_tables[i][j].min_length));
+ gcc_assert (attribute.max_length == -1
+ || attribute.max_length >= attribute.min_length);
- /* An attribute cannot require both a DECL and a TYPE. */
- gcc_assert (!attribute_tables[i][j].decl_required
- || !attribute_tables[i][j].type_required);
+ /* An attribute cannot require both a DECL and a TYPE. */
+ gcc_assert (!attribute.decl_required
+ || !attribute.type_required);
/* If an attribute requires a function type, in particular
it requires a type. */
- gcc_assert (!attribute_tables[i][j].function_type_required
- || attribute_tables[i][j].type_required);
- }
-
- /* Check that each name occurs just once in each table. */
- for (size_t i = 0; i < ARRAY_SIZE (attribute_tables); i++)
- for (size_t j = 0; attribute_tables[i][j].name != NULL; j++)
- for (size_t k = j + 1; attribute_tables[i][k].name != NULL; k++)
- gcc_assert (strcmp (attribute_tables[i][j].name,
- attribute_tables[i][k].name));
-
- /* Check that no name occurs in more than one table. Names that
- begin with '*' are exempt, and may be overridden. */
- for (size_t i = 0; i < ARRAY_SIZE (attribute_tables); i++)
- for (size_t j = i + 1; j < ARRAY_SIZE (attribute_tables); j++)
- for (size_t k = 0; attribute_tables[i][k].name != NULL; k++)
- for (size_t l = 0; attribute_tables[j][l].name != NULL; l++)
- gcc_assert (attribute_tables[i][k].name[0] == '*'
- || strcmp (attribute_tables[i][k].name,
- attribute_tables[j][l].name));
+ gcc_assert (!attribute.function_type_required
+ || attribute.type_required);
+
+ /* Check that no name occurs more than once. Names that
+ begin with '*' are exempt, and may be overridden. */
+ const char *ns = scoped_attributes->ns;
+ if (name[0] != '*' && names.add ({ ns ? ns : "", name }))
+ gcc_unreachable ();
+ }
}
/* Used to stash pointers to allocated memory so that we can free them at
@@ -281,7 +277,7 @@ handle_ignored_attributes_option (vec<char *> *v)
canonicalize_attr_name (vendor_start, vendor_len);
/* We perform all this hijinks so that we don't have to copy OPT. */
tree vendor_id = get_identifier_with_length (vendor_start, vendor_len);
- const char *attr;
+ array_slice<const attribute_spec> attrs;
/* In the "vendor::" case, we should ignore *any* attribute coming
from this attribute namespace. */
if (attr_len > 0)
@@ -293,22 +289,23 @@ handle_ignored_attributes_option (vec<char *> *v)
}
canonicalize_attr_name (attr_start, attr_len);
tree attr_id = get_identifier_with_length (attr_start, attr_len);
- attr = IDENTIFIER_POINTER (attr_id);
+ const char *attr = IDENTIFIER_POINTER (attr_id);
/* If we've already seen this vendor::attr, ignore it. Attempting to
register it twice would lead to a crash. */
if (lookup_scoped_attribute_spec (vendor_id, attr_id))
continue;
+ /* Create a table with extra attributes which we will register.
+ We can't free it here, so squirrel away the pointers. */
+ attribute_spec *table = new attribute_spec {
+ attr, 0, -2, false, false, false, false, nullptr, nullptr
+ };
+ ignored_attributes_table.safe_push (table);
+ attrs = { table, 1 };
}
- else
- attr = nullptr;
- /* Create a table with extra attributes which we will register.
- We can't free it here, so squirrel away the pointers. */
- attribute_spec *table = new attribute_spec[2];
- ignored_attributes_table.safe_push (table);
- table[0] = { attr, 0, -2, false, false, false, false, nullptr, nullptr };
- table[1] = { nullptr, 0, 0, false, false, false, false, nullptr,
- nullptr };
- register_scoped_attributes (table, IDENTIFIER_POINTER (vendor_id), !attr);
+ const scoped_attribute_specs scoped_specs = {
+ IDENTIFIER_POINTER (vendor_id), { attrs }
+ };
+ register_scoped_attributes (scoped_specs, attrs.empty ());
}
}
@@ -328,27 +325,18 @@ free_attr_data ()
void
init_attributes (void)
{
- size_t i;
-
if (attributes_initialized)
return;
- attribute_tables[0] = lang_hooks.common_attribute_table;
- attribute_tables[1] = lang_hooks.attribute_table;
- attribute_tables[2] = lang_hooks.format_attribute_table;
- attribute_tables[3] = targetm.attribute_table;
-
- /* Translate NULL pointers to pointers to the empty table. */
- for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
- if (attribute_tables[i] == NULL)
- attribute_tables[i] = empty_attribute_table;
+ attribute_tables[0] = lang_hooks.attribute_table;
+ attribute_tables[1] = targetm.attribute_table;
if (flag_checking)
check_attribute_tables ();
- for (i = 0; i < ARRAY_SIZE (attribute_tables); ++i)
- /* Put all the GNU attributes into the "gnu" namespace. */
- register_scoped_attributes (attribute_tables[i], "gnu");
+ for (auto scoped_array : attribute_tables)
+ for (auto scoped_attributes : scoped_array)
+ register_scoped_attributes (*scoped_attributes);
vec<char *> *ignored = (vec<char *> *) flag_ignored_attributes;
handle_ignored_attributes_option (ignored);
@@ -429,7 +417,7 @@ lookup_attribute_spec (const_tree name)
name = TREE_VALUE (name);
}
else
- ns = get_identifier ("gnu");
+ ns = get_gnu_namespace ();
return lookup_scoped_attribute_spec (ns, name);
}
@@ -446,7 +434,7 @@ get_attribute_namespace (const_tree attr)
{
if (cxx11_attribute_p (attr))
return TREE_PURPOSE (TREE_PURPOSE (attr));
- return get_identifier ("gnu");
+ return get_gnu_namespace ();
}
/* Check LAST_DECL and NODE of the same symbol for attributes that are
@@ -596,6 +584,23 @@ attribute_ignored_p (const attribute_spec *const as)
return as->max_length == -2;
}
+/* See whether LIST contains at least one instance of attribute ATTR
+ (possibly with different arguments). Return the first such attribute
+ if so, otherwise return null. */
+
+static tree
+find_same_attribute (const_tree attr, tree list)
+{
+ if (list == NULL_TREE)
+ return NULL_TREE;
+ tree ns = get_attribute_namespace (attr);
+ tree name = get_attribute_name (attr);
+ return private_lookup_attribute (ns ? IDENTIFIER_POINTER (ns) : nullptr,
+ IDENTIFIER_POINTER (name),
+ ns ? IDENTIFIER_LENGTH (ns) : 0,
+ IDENTIFIER_LENGTH (name), list);
+}
+
/* Process the attributes listed in ATTRIBUTES and install them in *NODE,
which is either a DECL (including a TYPE_DECL) or a TYPE. If a DECL,
it should be modified in place; if a TYPE, a copy should be created
@@ -785,8 +790,8 @@ decl_attributes (tree *node, tree attributes, int flags,
flags &= ~(int) ATTR_FLAG_TYPE_IN_PLACE;
}
- if (spec->function_type_required && TREE_CODE (*anode) != FUNCTION_TYPE
- && TREE_CODE (*anode) != METHOD_TYPE)
+ if (spec->function_type_required
+ && !FUNC_OR_METHOD_TYPE_P (*anode))
{
if (TREE_CODE (*anode) == POINTER_TYPE
&& FUNC_OR_METHOD_TYPE_P (TREE_TYPE (*anode)))
@@ -901,7 +906,24 @@ decl_attributes (tree *node, tree attributes, int flags,
TYPE_NAME (tt) = *node;
}
- *anode = cur_and_last_decl[0];
+ if (*anode != cur_and_last_decl[0])
+ {
+ /* Even if !spec->function_type_required, allow the attribute
+ handler to request the attribute to be applied to the function
+ type, rather than to the function pointer type, by setting
+ cur_and_last_decl[0] to the function type. */
+ if (!fn_ptr_tmp
+ && POINTER_TYPE_P (*anode)
+ && TREE_TYPE (*anode) == cur_and_last_decl[0]
+ && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (*anode)))
+ {
+ fn_ptr_tmp = TREE_TYPE (*anode);
+ fn_ptr_quals = TYPE_QUALS (*anode);
+ anode = &fn_ptr_tmp;
+ }
+ *anode = cur_and_last_decl[0];
+ }
+
if (ret == error_mark_node)
{
warning (OPT_Wattributes, "%qE attribute ignored", name);
@@ -928,9 +950,9 @@ decl_attributes (tree *node, tree attributes, int flags,
else
old_attrs = TYPE_ATTRIBUTES (*anode);
- for (a = lookup_attribute (spec->name, old_attrs);
+ for (a = find_same_attribute (attr, old_attrs);
a != NULL_TREE;
- a = lookup_attribute (spec->name, TREE_CHAIN (a)))
+ a = find_same_attribute (attr, TREE_CHAIN (a)))
{
if (simple_cst_equal (TREE_VALUE (a), args) == 1)
break;
@@ -961,8 +983,8 @@ decl_attributes (tree *node, tree attributes, int flags,
if (TYPE_ATTRIBUTES (variant) == old_attrs)
TYPE_ATTRIBUTES (variant)
= TYPE_ATTRIBUTES (*anode);
- else if (!lookup_attribute
- (spec->name, TYPE_ATTRIBUTES (variant)))
+ else if (!find_same_attribute
+ (attr, TYPE_ATTRIBUTES (variant)))
TYPE_ATTRIBUTES (variant) = tree_cons
(name, args, TYPE_ATTRIBUTES (variant));
}
@@ -1471,11 +1493,11 @@ comp_type_attributes (const_tree type1, const_tree type2)
const struct attribute_spec *as;
const_tree attr;
- as = lookup_attribute_spec (get_attribute_name (a));
+ as = lookup_attribute_spec (TREE_PURPOSE (a));
if (!as || as->affects_type_identity == false)
continue;
- attr = lookup_attribute (as->name, CONST_CAST_TREE (a2));
+ attr = find_same_attribute (a, CONST_CAST_TREE (a2));
if (!attr || !attribute_value_equal (a, attr))
break;
}
@@ -1485,11 +1507,11 @@ comp_type_attributes (const_tree type1, const_tree type2)
{
const struct attribute_spec *as;
- as = lookup_attribute_spec (get_attribute_name (a));
+ as = lookup_attribute_spec (TREE_PURPOSE (a));
if (!as || as->affects_type_identity == false)
continue;
- if (!lookup_attribute (as->name, CONST_CAST_TREE (a1)))
+ if (!find_same_attribute (a, CONST_CAST_TREE (a1)))
break;
/* We don't need to compare trees again, as we did this
already in first loop. */
@@ -1504,9 +1526,20 @@ comp_type_attributes (const_tree type1, const_tree type2)
if ((lookup_attribute ("nocf_check", TYPE_ATTRIBUTES (type1)) != NULL)
^ (lookup_attribute ("nocf_check", TYPE_ATTRIBUTES (type2)) != NULL))
return 0;
+ int strub_ret = strub_comptypes (CONST_CAST_TREE (type1),
+ CONST_CAST_TREE (type2));
+ if (strub_ret == 0)
+ return strub_ret;
/* As some type combinations - like default calling-convention - might
be compatible, we have to call the target hook to get the final result. */
- return targetm.comp_type_attributes (type1, type2);
+ int target_ret = targetm.comp_type_attributes (type1, type2);
+ if (target_ret == 0)
+ return target_ret;
+ if (strub_ret == 2 || target_ret == 2)
+ return 2;
+ if (strub_ret == 1 && target_ret == 1)
+ return 1;
+ gcc_unreachable ();
}
/* PREDICATE acts as a function of type:
@@ -1527,8 +1560,7 @@ remove_attributes_matching (tree attrs, Predicate predicate)
const_tree start = attrs;
for (const_tree attr = attrs; attr; attr = TREE_CHAIN (attr))
{
- tree name = get_attribute_name (attr);
- const attribute_spec *as = lookup_attribute_spec (name);
+ const attribute_spec *as = lookup_attribute_spec (TREE_PURPOSE (attr));
const_tree end;
if (!predicate (attr, as))
end = attr;
@@ -2645,10 +2677,6 @@ attr_access::array_as_string (tree type) const
namespace selftest
{
-/* Helper types to verify the consistency attribute exclusions. */
-
-typedef std::pair<const char *, const char *> excl_pair;
-
/* Self-test to verify that each attribute exclusion is symmetric,
meaning that if attribute A is encoded as incompatible with
attribute B then the opposite relationship is also encoded.
@@ -2663,55 +2691,54 @@ test_attribute_exclusions ()
/* Iterate over the array of attribute tables first (with TI0 as
the index) and over the array of attribute_spec in each table
(with SI0 as the index). */
- const size_t ntables = ARRAY_SIZE (attribute_tables);
+ hash_set<excl_hash_traits> excl_set;
- /* Set of pairs of mutually exclusive attributes. */
- typedef hash_set<excl_hash_traits> exclusion_set;
- exclusion_set excl_set;
+ for (auto scoped_array : attribute_tables)
+ for (auto scoped_attributes : scoped_array)
+ for (const attribute_spec &attribute : scoped_attributes->attributes)
+ {
+ const attribute_spec::exclusions *excl = attribute.exclude;
- for (size_t ti0 = 0; ti0 != ntables; ++ti0)
- for (size_t s0 = 0; attribute_tables[ti0][s0].name; ++s0)
- {
- const attribute_spec::exclusions *excl
- = attribute_tables[ti0][s0].exclude;
+ /* Skip each attribute that doesn't define exclusions. */
+ if (!excl)
+ continue;
- /* Skip each attribute that doesn't define exclusions. */
- if (!excl)
- continue;
+ /* Skip standard (non-GNU) attributes, since currently the
+ exclusions are implicitly for GNU attributes only.
+ Also, C++ likely and unlikely get rewritten to gnu::hot
+ and gnu::cold, so symmetry isn't necessary there. */
+ if (!scoped_attributes->ns)
+ continue;
- const char *attr_name = attribute_tables[ti0][s0].name;
+ const char *attr_name = attribute.name;
- /* Iterate over the set of exclusions for every attribute
- (with EI0 as the index) adding the exclusions defined
- for each to the set. */
- for (size_t ei0 = 0; excl[ei0].name; ++ei0)
- {
- const char *excl_name = excl[ei0].name;
+ /* Iterate over the set of exclusions for every attribute
+ (with EI0 as the index) adding the exclusions defined
+ for each to the set. */
+ for (size_t ei0 = 0; excl[ei0].name; ++ei0)
+ {
+ const char *excl_name = excl[ei0].name;
- if (!strcmp (attr_name, excl_name))
- continue;
+ if (!strcmp (attr_name, excl_name))
+ continue;
- excl_set.add (excl_pair (attr_name, excl_name));
- }
- }
+ excl_set.add ({ attr_name, excl_name });
+ }
+ }
/* Traverse the set of mutually exclusive pairs of attributes
and verify that they are symmetric. */
- for (exclusion_set::iterator it = excl_set.begin ();
- it != excl_set.end ();
- ++it)
- {
- if (!excl_set.contains (excl_pair ((*it).second, (*it).first)))
- {
- /* An exclusion for an attribute has been found that
- doesn't have a corresponding exclusion in the opposite
- direction. */
- char desc[120];
- sprintf (desc, "'%s' attribute exclusion '%s' must be symmetric",
- (*it).first, (*it).second);
- fail (SELFTEST_LOCATION, desc);
- }
- }
+ for (auto excl_pair : excl_set)
+ if (!excl_set.contains ({ excl_pair.second, excl_pair.first }))
+ {
+ /* An exclusion for an attribute has been found that
+ doesn't have a corresponding exclusion in the opposite
+ direction. */
+ char desc[120];
+ sprintf (desc, "'%s' attribute exclusion '%s' must be symmetric",
+ excl_pair.first, excl_pair.second);
+ fail (SELFTEST_LOCATION, desc);
+ }
}
void
@@ -2723,3 +2750,5 @@ attribs_cc_tests ()
} /* namespace selftest */
#endif /* CHECKING_P */
+
+#include "gt-attribs.h"
diff --git a/gcc/attribs.h b/gcc/attribs.h
index 84a4365..fdeebff 100644
--- a/gcc/attribs.h
+++ b/gcc/attribs.h
@@ -20,6 +20,13 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_ATTRIBS_H
#define GCC_ATTRIBS_H
+/* A set of attributes that belong to the same namespace, given by NS. */
+struct scoped_attribute_specs
+{
+ const char *ns;
+ array_slice<const attribute_spec> attributes;
+};
+
extern const struct attribute_spec *lookup_attribute_spec (const_tree);
extern void free_attr_data ();
extern void init_attributes (void);
@@ -42,9 +49,8 @@ extern tree make_attribute (const char *, const char *, tree);
extern bool attribute_ignored_p (tree);
extern bool attribute_ignored_p (const attribute_spec *const);
-extern struct scoped_attributes* register_scoped_attributes (const struct attribute_spec *,
- const char *,
- bool = false);
+extern struct scoped_attributes *
+ register_scoped_attributes (const scoped_attribute_specs &, bool = false);
extern char *sorted_attr_string (tree);
extern bool common_function_versions (tree, tree);
diff --git a/gcc/btfout.cc b/gcc/btfout.cc
index e07fed3..db4f108 100644
--- a/gcc/btfout.cc
+++ b/gcc/btfout.cc
@@ -158,6 +158,19 @@ get_btf_kind (uint32_t ctf_kind)
return BTF_KIND_UNKN;
}
+/* Some BTF types, like BTF_KIND_FUNC_PROTO, are anonymous. The machinery
+ in btfout to emit BTF, may reset dtd_data->ctti_name, but does not update
+ the name in the ctf_dtdef_ref type object (deliberate choice). This
+ interface helps abstract out that state of affairs, while giving access to
+ the name of the type as intended. */
+
+static const char *
+get_btf_type_name (ctf_dtdef_ref dtd)
+{
+ const char *anon = "";
+ return (dtd->dtd_data.ctti_name) ? dtd->dtd_name : anon;
+}
+
/* Helper routines to map between 'relative' and 'absolute' IDs.
In BTF all records (including variables) are output in one long list, and all
@@ -299,7 +312,7 @@ btf_calc_num_vbytes (ctf_dtdef_ref dtd)
break;
case BTF_KIND_ENUM:
- vlen_bytes += (dtd->dtd_data.ctti_size == 0x8)
+ vlen_bytes += (dtd->dtd_data.ctti_size > 4)
? vlen * sizeof (struct btf_enum64)
: vlen * sizeof (struct btf_enum);
break;
@@ -425,6 +438,7 @@ btf_collect_datasec (ctf_container_ref ctfc)
func_dtd->dtd_data = dtd->dtd_data;
func_dtd->dtd_data.ctti_type = dtd->dtd_type;
func_dtd->linkage = dtd->linkage;
+ func_dtd->dtd_name = dtd->dtd_name;
func_dtd->dtd_type = num_types_added + num_types_created;
/* Only the BTF_KIND_FUNC type actually references the name. The
@@ -472,7 +486,15 @@ btf_collect_datasec (ctf_container_ref ctfc)
/* Mark extern variables. */
if (DECL_EXTERNAL (node->decl))
- dvd->dvd_visibility = BTF_VAR_GLOBAL_EXTERN;
+ {
+ dvd->dvd_visibility = BTF_VAR_GLOBAL_EXTERN;
+
+ /* PR112849: avoid assuming a section for extern decls without
+ an explicit section, which would result in incorrectly
+ emitting a BTF_KIND_DATASEC entry for them. */
+ if (node->get_section () == NULL)
+ continue;
+ }
const char *section_name = get_section_name (node);
if (section_name == NULL)
@@ -722,7 +744,7 @@ btf_asm_type_ref (const char *prefix, ctf_container_ref ctfc, ctf_id_t ref_id)
size_t func_id = btf_relative_func_id (ref_id);
ctf_dtdef_ref ref_type = (*funcs)[func_id];
dw2_asm_output_data (4, ref_id, "%s: (BTF_KIND_FUNC '%s')",
- prefix, ref_type->dtd_name);
+ prefix, get_btf_type_name (ref_type));
}
else
{
@@ -733,7 +755,7 @@ btf_asm_type_ref (const char *prefix, ctf_container_ref ctfc, ctf_id_t ref_id)
dw2_asm_output_data (4, ref_id, "%s: (BTF_KIND_%s '%s')",
prefix, btf_kind_name (ref_kind),
- ref_type->dtd_name);
+ get_btf_type_name (ref_type));
}
}
@@ -806,10 +828,14 @@ btf_asm_type (ctf_container_ref ctfc, ctf_dtdef_ref dtd)
btf_kind = BTF_KIND_ENUM64;
}
+ /* PR debug/112656. BTF_KIND_FUNC_PROTO is always anonymous. */
+ if (btf_kind == BTF_KIND_FUNC_PROTO)
+ dtd->dtd_data.ctti_name = 0;
+
dw2_asm_output_data (4, dtd->dtd_data.ctti_name,
"TYPE %" PRIu64 " BTF_KIND_%s '%s'",
get_btf_id (dtd->dtd_type), btf_kind_name (btf_kind),
- dtd->dtd_name);
+ get_btf_type_name (dtd));
dw2_asm_output_data (4, BTF_TYPE_INFO (btf_kind, btf_kflag, btf_vlen),
"btt_info: kind=%u, kflag=%u, vlen=%u",
btf_kind, btf_kflag, btf_vlen);
@@ -914,8 +940,8 @@ btf_asm_enum_const (unsigned int size, ctf_dmdef_t * dmd, unsigned int idx)
{
dw2_asm_output_data (4, dmd->dmd_name_offset, "ENUM_CONST '%s' idx=%u",
dmd->dmd_name, idx);
- if (size == 4)
- dw2_asm_output_data (size, dmd->dmd_value, "bte_value");
+ if (size <= 4)
+ dw2_asm_output_data (size < 4 ? 4 : size, dmd->dmd_value, "bte_value");
else
{
dw2_asm_output_data (4, dmd->dmd_value & 0xffffffff, "bte_value_lo32");
@@ -950,7 +976,7 @@ btf_asm_func_type (ctf_container_ref ctfc, ctf_dtdef_ref dtd, ctf_id_t id)
ctf_id_t ref_id = dtd->dtd_data.ctti_type;
dw2_asm_output_data (4, dtd->dtd_data.ctti_name,
"TYPE %" PRIu64 " BTF_KIND_FUNC '%s'",
- btf_absolute_func_id (id), dtd->dtd_name);
+ btf_absolute_func_id (id), get_btf_type_name (dtd));
dw2_asm_output_data (4, BTF_TYPE_INFO (BTF_KIND_FUNC, 0, dtd->linkage),
"btt_info: kind=%u, kflag=%u, linkage=%u",
BTF_KIND_FUNC, 0, dtd->linkage);
diff --git a/gcc/builtins.cc b/gcc/builtins.cc
index 6af2a0b..afa9be5 100644
--- a/gcc/builtins.cc
+++ b/gcc/builtins.cc
@@ -71,6 +71,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-fold.h"
#include "intl.h"
#include "file-prefix-map.h" /* remap_macro_filename() */
+#include "ipa-strub.h" /* strub_watermark_parm() */
#include "gomp-constants.h"
#include "omp-general.h"
#include "tree-dfa.h"
@@ -151,6 +152,7 @@ static rtx expand_builtin_strnlen (tree, rtx, machine_mode);
static rtx expand_builtin_alloca (tree);
static rtx expand_builtin_unop (machine_mode, tree, rtx, rtx, optab);
static rtx expand_builtin_frame_address (tree, tree);
+static rtx expand_builtin_stack_address ();
static tree stabilize_va_list_loc (location_t, tree, int);
static rtx expand_builtin_expect (tree, rtx);
static rtx expand_builtin_expect_with_probability (tree, rtx);
@@ -1347,6 +1349,9 @@ get_memory_rtx (tree exp, tree len)
tree orig_exp = exp, base;
rtx addr, mem;
+ gcc_checking_assert
+ (ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)))));
+
/* When EXP is not resolved SAVE_EXPR, MEM_ATTRS can be still derived
from its expression, for expr->a.b only <variable>.a.b is recorded. */
if (TREE_CODE (exp) == SAVE_EXPR && !SAVE_EXPR_RESOLVED_P (exp))
@@ -3751,7 +3756,7 @@ expand_builtin_memory_copy_args (tree dest, tree src, tree len,
expected_align, expected_size,
min_size, max_size, probable_max_size,
use_mempcpy_call, &is_move_done,
- might_overlap);
+ might_overlap, tree_ctz (len));
/* Bail out when a mempcpy call would be expanded as libcall and when
we have a target that provides a fast implementation
@@ -4313,6 +4318,10 @@ try_store_by_multiple_pieces (rtx to, rtx len, unsigned int ctz_len,
int tst_bits = (max_bits != min_bits ? max_bits
: floor_log2 (max_len ^ min_len));
+ /* Save the pre-blksize values. */
+ int orig_max_bits = max_bits;
+ int orig_tst_bits = tst_bits;
+
/* Check whether it's profitable to start by storing a fixed BLKSIZE
bytes, to lower max_bits. In the unlikely case of a constant LEN
(implied by identical MAX_LEN and MIN_LEN), we want to issue a
@@ -4352,9 +4361,81 @@ try_store_by_multiple_pieces (rtx to, rtx len, unsigned int ctz_len,
if (max_bits >= 0)
xlenest += ((HOST_WIDE_INT_1U << max_bits) * 2
- (HOST_WIDE_INT_1U << ctz_len));
- if (!can_store_by_pieces (xlenest, builtin_memset_read_str,
- &valc, align, true))
- return false;
+ bool max_loop = false;
+ bool use_store_by_pieces = true;
+ /* Skip the test in case of overflow in xlenest. It shouldn't
+ happen because of the way max_bits and blksize are related, but
+ it doesn't hurt to test. */
+ if (blksize > xlenest
+ || !can_store_by_pieces (xlenest, builtin_memset_read_str,
+ &valc, align, true))
+ {
+ if (!(flag_inline_stringops & ILSOP_MEMSET))
+ return false;
+
+ for (max_bits = orig_max_bits;
+ max_bits >= sctz_len;
+ --max_bits)
+ {
+ xlenest = ((HOST_WIDE_INT_1U << max_bits) * 2
+ - (HOST_WIDE_INT_1U << ctz_len));
+ /* Check that blksize plus the bits to be stored as blocks
+ sized at powers of two can be stored by pieces. This is
+ like the test above, but with smaller max_bits. Skip
+ orig_max_bits (it would be redundant). Also skip in case
+ of overflow. */
+ if (max_bits < orig_max_bits
+ && xlenest + blksize >= xlenest
+ && can_store_by_pieces (xlenest + blksize,
+ builtin_memset_read_str,
+ &valc, align, true))
+ {
+ max_loop = true;
+ break;
+ }
+ if (blksize
+ && can_store_by_pieces (xlenest,
+ builtin_memset_read_str,
+ &valc, align, true))
+ {
+ max_len += blksize;
+ min_len += blksize;
+ tst_bits = orig_tst_bits;
+ blksize = 0;
+ max_loop = true;
+ break;
+ }
+ if (max_bits == sctz_len)
+ {
+ /* We'll get here if can_store_by_pieces refuses to
+ store even a single QImode. We'll fall back to
+ QImode stores then. */
+ if (!sctz_len)
+ {
+ blksize = 0;
+ max_loop = true;
+ use_store_by_pieces = false;
+ break;
+ }
+ --sctz_len;
+ --ctz_len;
+ }
+ }
+ if (!max_loop)
+ return false;
+ /* If the boundaries are such that min and max may run a
+ different number of trips in the initial loop, the remainder
+ needs not be between the moduli, so set tst_bits to cover all
+ bits. Otherwise, if the trip counts are the same, max_len
+ has the common prefix, and the previously-computed tst_bits
+ is usable. */
+ if (max_len >> max_bits > min_len >> max_bits)
+ tst_bits = max_bits;
+ }
+ /* ??? Do we have to check that all powers of two lengths from
+ max_bits down to ctz_len pass can_store_by_pieces? As in, could
+ it possibly be that xlenest passes while smaller power-of-two
+ sizes don't? */
by_pieces_constfn constfun;
void *constfundata;
@@ -4396,7 +4477,9 @@ try_store_by_multiple_pieces (rtx to, rtx len, unsigned int ctz_len,
the least significant bit possibly set in the length. */
for (int i = max_bits; i >= sctz_len; i--)
{
+ rtx_code_label *loop_label = NULL;
rtx_code_label *label = NULL;
+
blksize = HOST_WIDE_INT_1U << i;
/* If we're past the bits shared between min_ and max_len, expand
@@ -4410,25 +4493,57 @@ try_store_by_multiple_pieces (rtx to, rtx len, unsigned int ctz_len,
profile_probability::even ());
}
/* If we are at a bit that is in the prefix shared by min_ and
- max_len, skip this BLKSIZE if the bit is clear. */
- else if ((max_len & blksize) == 0)
+ max_len, skip the current BLKSIZE if the bit is clear, but do
+ not skip the loop, even if it doesn't require
+ prechecking. */
+ else if ((max_len & blksize) == 0
+ && !(max_loop && i == max_bits))
continue;
- /* Issue a store of BLKSIZE bytes. */
- to = store_by_pieces (to, blksize,
- constfun, constfundata,
- align, true,
- i != sctz_len ? RETURN_END : RETURN_BEGIN);
+ if (max_loop && i == max_bits)
+ {
+ loop_label = gen_label_rtx ();
+ emit_label (loop_label);
+ /* Since we may run this multiple times, don't assume we
+ know anything about the offset. */
+ clear_mem_offset (to);
+ }
+ bool update_needed = i != sctz_len || loop_label;
+ rtx next_ptr = NULL_RTX;
+ if (!use_store_by_pieces)
+ {
+ gcc_checking_assert (blksize == 1);
+ if (!val)
+ val = gen_int_mode (valc, QImode);
+ to = change_address (to, QImode, 0);
+ emit_move_insn (to, val);
+ if (update_needed)
+ next_ptr = plus_constant (ptr_mode, ptr, blksize);
+ }
+ else
+ {
+ /* Issue a store of BLKSIZE bytes. */
+ to = store_by_pieces (to, blksize,
+ constfun, constfundata,
+ align, true,
+ update_needed ? RETURN_END : RETURN_BEGIN);
+ next_ptr = XEXP (to, 0);
+ }
/* Adjust REM and PTR, unless this is the last iteration. */
- if (i != sctz_len)
+ if (update_needed)
{
- emit_move_insn (ptr, force_operand (XEXP (to, 0), NULL_RTX));
+ emit_move_insn (ptr, force_operand (next_ptr, NULL_RTX));
to = replace_equiv_address (to, ptr);
rtx rem_minus_blksize = plus_constant (ptr_mode, rem, -blksize);
emit_move_insn (rem, force_operand (rem_minus_blksize, NULL_RTX));
}
+ if (loop_label)
+ emit_cmp_and_jump_insns (rem, GEN_INT (blksize), GE, NULL,
+ ptr_mode, 1, loop_label,
+ profile_probability::likely ());
+
if (label)
{
emit_label (label);
@@ -4715,7 +4830,8 @@ expand_builtin_memcmp (tree exp, rtx target, bool result_eq)
result = emit_block_cmp_hints (arg1_rtx, arg2_rtx, len_rtx,
TREE_TYPE (len), target,
result_eq, constfn,
- CONST_CAST (char *, rep));
+ CONST_CAST (char *, rep),
+ tree_ctz (len));
if (result)
{
@@ -5259,6 +5375,252 @@ expand_builtin_frame_address (tree fndecl, tree exp)
}
}
+#if ! STACK_GROWS_DOWNWARD
+# define STACK_TOPS GT
+#else
+# define STACK_TOPS LT
+#endif
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+# define STACK_UNSIGNED POINTERS_EXTEND_UNSIGNED
+#else
+# define STACK_UNSIGNED true
+#endif
+
+/* Expand a call to builtin function __builtin_stack_address. */
+
+static rtx
+expand_builtin_stack_address ()
+{
+ return convert_to_mode (ptr_mode, copy_to_reg (stack_pointer_rtx),
+ STACK_UNSIGNED);
+}
+
+/* Expand a call to builtin function __builtin_strub_enter. */
+
+static rtx
+expand_builtin_strub_enter (tree exp)
+{
+ if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
+ return NULL_RTX;
+
+ if (optimize < 1 || flag_no_inline)
+ return NULL_RTX;
+
+ rtx stktop = expand_builtin_stack_address ();
+
+ tree wmptr = CALL_EXPR_ARG (exp, 0);
+ tree wmtype = TREE_TYPE (TREE_TYPE (wmptr));
+ tree wmtree = fold_build2 (MEM_REF, wmtype, wmptr,
+ build_int_cst (TREE_TYPE (wmptr), 0));
+ rtx wmark = expand_expr (wmtree, NULL_RTX, ptr_mode, EXPAND_MEMORY);
+
+ emit_move_insn (wmark, stktop);
+
+ return const0_rtx;
+}
+
+/* Expand a call to builtin function __builtin_strub_update. */
+
+static rtx
+expand_builtin_strub_update (tree exp)
+{
+ if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
+ return NULL_RTX;
+
+ if (optimize < 2 || flag_no_inline)
+ return NULL_RTX;
+
+ rtx stktop = expand_builtin_stack_address ();
+
+#ifdef RED_ZONE_SIZE
+ /* Here's how the strub enter, update and leave functions deal with red zones.
+
+ If it weren't for red zones, update, called from within a strub context,
+ would bump the watermark to the top of the stack. Enter and leave, running
+ in the caller, would use the caller's top of stack address both to
+ initialize the watermark passed to the callee, and to start strubbing the
+ stack afterwards.
+
+ Ideally, we'd update the watermark so as to cover the used amount of red
+ zone, and strub starting at the caller's other end of the (presumably
+ unused) red zone. Normally, only leaf functions use the red zone, but at
+ this point we can't tell whether a function is a leaf, nor can we tell how
+ much of the red zone it uses. Furthermore, some strub contexts may have
+ been inlined so that update and leave are called from the same stack frame,
+ and the strub builtins may all have been inlined, turning a strub function
+ into a leaf.
+
+ So cleaning the range from the caller's stack pointer (one end of the red
+ zone) to the (potentially inlined) callee's (other end of the) red zone
+ could scribble over the caller's own red zone.
+
+ We avoid this possibility by arranging for callers that are strub contexts
+ to use their own watermark as the strub starting point. So, if A calls B,
+ and B calls C, B will tell A to strub up to the end of B's red zone, and
+ will strub itself only the part of C's stack frame and red zone that
+ doesn't overlap with B's. With that, we don't need to know who's leaf and
+ who isn't: inlined calls will shrink their strub window to zero, each
+ remaining call will strub some portion of the stack, and eventually the
+ strub context will return to a caller that isn't a strub context itself,
+ that will therefore use its own stack pointer as the strub starting point.
+ It's not a leaf, because strub contexts can't be inlined into non-strub
+ contexts, so it doesn't use the red zone, and it will therefore correctly
+ strub up the callee's stack frame up to the end of the callee's red zone.
+ Neat! */
+ if (true /* (flags_from_decl_or_type (current_function_decl) & ECF_LEAF) */)
+ {
+ poly_int64 red_zone_size = RED_ZONE_SIZE;
+#if STACK_GROWS_DOWNWARD
+ red_zone_size = -red_zone_size;
+#endif
+ stktop = plus_constant (ptr_mode, stktop, red_zone_size);
+ stktop = force_reg (ptr_mode, stktop);
+ }
+#endif
+
+ tree wmptr = CALL_EXPR_ARG (exp, 0);
+ tree wmtype = TREE_TYPE (TREE_TYPE (wmptr));
+ tree wmtree = fold_build2 (MEM_REF, wmtype, wmptr,
+ build_int_cst (TREE_TYPE (wmptr), 0));
+ rtx wmark = expand_expr (wmtree, NULL_RTX, ptr_mode, EXPAND_MEMORY);
+
+ rtx wmarkr = force_reg (ptr_mode, wmark);
+
+ rtx_code_label *lab = gen_label_rtx ();
+ do_compare_rtx_and_jump (stktop, wmarkr, STACK_TOPS, STACK_UNSIGNED,
+ ptr_mode, NULL_RTX, lab, NULL,
+ profile_probability::very_likely ());
+ emit_move_insn (wmark, stktop);
+
+ /* If this is an inlined strub function, also bump the watermark for the
+ enclosing function. This avoids a problem with the following scenario: A
+ calls B and B calls C, and both B and C get inlined into A. B allocates
+ temporary stack space before calling C. If we don't update A's watermark,
+ we may use an outdated baseline for the post-C strub_leave, erasing B's
+ temporary stack allocation. We only need this if we're fully expanding
+ strub_leave inline. */
+ tree xwmptr = (optimize > 2
+ ? strub_watermark_parm (current_function_decl)
+ : wmptr);
+ if (wmptr != xwmptr)
+ {
+ wmptr = xwmptr;
+ wmtype = TREE_TYPE (TREE_TYPE (wmptr));
+ wmtree = fold_build2 (MEM_REF, wmtype, wmptr,
+ build_int_cst (TREE_TYPE (wmptr), 0));
+ wmark = expand_expr (wmtree, NULL_RTX, ptr_mode, EXPAND_MEMORY);
+ wmarkr = force_reg (ptr_mode, wmark);
+
+ do_compare_rtx_and_jump (stktop, wmarkr, STACK_TOPS, STACK_UNSIGNED,
+ ptr_mode, NULL_RTX, lab, NULL,
+ profile_probability::very_likely ());
+ emit_move_insn (wmark, stktop);
+ }
+
+ emit_label (lab);
+
+ return const0_rtx;
+}
+
+
+/* Expand a call to builtin function __builtin_strub_leave. */
+
+static rtx
+expand_builtin_strub_leave (tree exp)
+{
+ if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
+ return NULL_RTX;
+
+ if (optimize < 2 || optimize_size || flag_no_inline)
+ return NULL_RTX;
+
+ rtx stktop = NULL_RTX;
+
+ if (tree wmptr = (optimize
+ ? strub_watermark_parm (current_function_decl)
+ : NULL_TREE))
+ {
+ tree wmtype = TREE_TYPE (TREE_TYPE (wmptr));
+ tree wmtree = fold_build2 (MEM_REF, wmtype, wmptr,
+ build_int_cst (TREE_TYPE (wmptr), 0));
+ rtx wmark = expand_expr (wmtree, NULL_RTX, ptr_mode, EXPAND_MEMORY);
+ stktop = force_reg (ptr_mode, wmark);
+ }
+
+ if (!stktop)
+ stktop = expand_builtin_stack_address ();
+
+ tree wmptr = CALL_EXPR_ARG (exp, 0);
+ tree wmtype = TREE_TYPE (TREE_TYPE (wmptr));
+ tree wmtree = fold_build2 (MEM_REF, wmtype, wmptr,
+ build_int_cst (TREE_TYPE (wmptr), 0));
+ rtx wmark = expand_expr (wmtree, NULL_RTX, ptr_mode, EXPAND_MEMORY);
+
+ rtx wmarkr = force_reg (ptr_mode, wmark);
+
+#if ! STACK_GROWS_DOWNWARD
+ rtx base = stktop;
+ rtx end = wmarkr;
+#else
+ rtx base = wmarkr;
+ rtx end = stktop;
+#endif
+
+ /* We're going to modify it, so make sure it's not e.g. the stack pointer. */
+ base = copy_to_reg (base);
+
+ rtx_code_label *done = gen_label_rtx ();
+ do_compare_rtx_and_jump (base, end, LT, STACK_UNSIGNED,
+ ptr_mode, NULL_RTX, done, NULL,
+ profile_probability::very_likely ());
+
+ if (optimize < 3)
+ expand_call (exp, NULL_RTX, true);
+ else
+ {
+ /* Ok, now we've determined we want to copy the block, so convert the
+ addresses to Pmode, as needed to dereference them to access ptr_mode
+ memory locations, so that we don't have to convert anything within the
+ loop. */
+ base = memory_address (ptr_mode, base);
+ end = memory_address (ptr_mode, end);
+
+ rtx zero = force_operand (const0_rtx, NULL_RTX);
+ int ulen = GET_MODE_SIZE (ptr_mode);
+
+ /* ??? It would be nice to use setmem or similar patterns here,
+ but they do not necessarily obey the stack growth direction,
+ which has security implications. We also have to avoid calls
+ (memset, bzero or any machine-specific ones), which are
+ likely unsafe here (see TARGET_STRUB_MAY_USE_MEMSET). */
+#if ! STACK_GROWS_DOWNWARD
+ rtx incr = plus_constant (Pmode, base, ulen);
+ rtx dstm = gen_rtx_MEM (ptr_mode, base);
+
+ rtx_code_label *loop = gen_label_rtx ();
+ emit_label (loop);
+ emit_move_insn (dstm, zero);
+ emit_move_insn (base, force_operand (incr, NULL_RTX));
+#else
+ rtx decr = plus_constant (Pmode, end, -ulen);
+ rtx dstm = gen_rtx_MEM (ptr_mode, end);
+
+ rtx_code_label *loop = gen_label_rtx ();
+ emit_label (loop);
+ emit_move_insn (end, force_operand (decr, NULL_RTX));
+ emit_move_insn (dstm, zero);
+#endif
+ do_compare_rtx_and_jump (base, end, LT, STACK_UNSIGNED,
+ Pmode, NULL_RTX, NULL, loop,
+ profile_probability::very_likely ());
+ }
+
+ emit_label (done);
+
+ return const0_rtx;
+}
+
/* Expand EXP, a call to the alloca builtin. Return NULL_RTX if we
failed and the caller should emit a normal call. */
@@ -7358,7 +7720,15 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
&& fcode != BUILT_IN_EXECVE
&& fcode != BUILT_IN_CLEAR_CACHE
&& !ALLOCA_FUNCTION_CODE_P (fcode)
- && fcode != BUILT_IN_FREE)
+ && fcode != BUILT_IN_FREE
+ && (fcode != BUILT_IN_MEMSET
+ || !(flag_inline_stringops & ILSOP_MEMSET))
+ && (fcode != BUILT_IN_MEMCPY
+ || !(flag_inline_stringops & ILSOP_MEMCPY))
+ && (fcode != BUILT_IN_MEMMOVE
+ || !(flag_inline_stringops & ILSOP_MEMMOVE))
+ && (fcode != BUILT_IN_MEMCMP
+ || !(flag_inline_stringops & ILSOP_MEMCMP)))
return expand_call (exp, target, ignore);
/* The built-in function expanders test for target == const0_rtx
@@ -7586,6 +7956,27 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
case BUILT_IN_RETURN_ADDRESS:
return expand_builtin_frame_address (fndecl, exp);
+ case BUILT_IN_STACK_ADDRESS:
+ return expand_builtin_stack_address ();
+
+ case BUILT_IN___STRUB_ENTER:
+ target = expand_builtin_strub_enter (exp);
+ if (target)
+ return target;
+ break;
+
+ case BUILT_IN___STRUB_UPDATE:
+ target = expand_builtin_strub_update (exp);
+ if (target)
+ return target;
+ break;
+
+ case BUILT_IN___STRUB_LEAVE:
+ target = expand_builtin_strub_leave (exp);
+ if (target)
+ return target;
+ break;
+
/* Returns the address of the area where the structure is returned.
0 otherwise. */
case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
diff --git a/gcc/builtins.def b/gcc/builtins.def
index 33e6cad..f03df32f 100644
--- a/gcc/builtins.def
+++ b/gcc/builtins.def
@@ -999,6 +999,10 @@ DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG, ATTR_CONST_
DEF_GCC_BUILTIN (BUILT_IN_FFSG, "ffsg", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
DEF_EXT_LIB_BUILTIN (BUILT_IN_FORK, "fork", BT_FN_PID, ATTR_NOTHROW_LIST)
DEF_GCC_BUILTIN (BUILT_IN_FRAME_ADDRESS, "frame_address", BT_FN_PTR_UINT, ATTR_NULL)
+DEF_GCC_BUILTIN (BUILT_IN_STACK_ADDRESS, "stack_address", BT_FN_PTR, ATTR_NULL)
+DEF_BUILTIN_STUB (BUILT_IN___STRUB_ENTER, "__builtin___strub_enter")
+DEF_BUILTIN_STUB (BUILT_IN___STRUB_UPDATE, "__builtin___strub_update")
+DEF_BUILTIN_STUB (BUILT_IN___STRUB_LEAVE, "__builtin___strub_leave")
/* [trans-mem]: Adjust BUILT_IN_TM_FREE if BUILT_IN_FREE is changed. */
DEF_LIB_BUILTIN (BUILT_IN_FREE, "free", BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
DEF_GCC_BUILTIN (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr", BT_FN_PTR_PTR, ATTR_NULL)
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 25ab676..3dc8cbc 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,80 @@
+2023-12-06 Alexandre Oliva <oliva@adacore.com>
+
+ * c-attribs.cc: Include ipa-strub.h.
+ (handle_strub_attribute): New.
+ (c_common_attribute_table): Add strub.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * c-attribs.cc (c_common_gnu_attribute_table): Add extra
+ braces to work around PR 16333 in older compilers.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR c/89270
+ * c-common.cc (c_common_type_for_size): Consider
+ registered_builtin_types.
+
+2023-12-05 Marek Polacek <polacek@redhat.com>
+
+ PR c++/107687
+ PR c++/110997
+ * c-cppbuiltin.cc (c_cpp_builtins): Update __cpp_consteval.
+ * c-opts.cc (c_common_post_options): Pre-C++20, unset
+ flag_immediate_escalation.
+ * c.opt (fimmediate-escalation): New option.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * c-common.h (c_common_attribute_table): Replace with...
+ (c_common_gnu_attribute_table): ...this.
+ (c_common_format_attribute_table): Change type to
+ scoped_attribute_specs.
+ * c-attribs.cc (c_common_attribute_table): Replace with...
+ (c_common_gnu_attributes, c_common_gnu_attribute_table): ...these
+ new globals.
+ (c_common_format_attribute_table): Change type to
+ scoped_attribute_specs, using...
+ (c_common_format_attributes): ...this as the underlying array.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * c.opt (Wdeclaration-missing-parameter-type): New.
+
+2023-11-30 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/110349
+ * c-cppbuiltin.cc (c_cpp_builtins): Predefine
+ __cpp_placeholder_variables=202306L for C++26.
+
+2023-11-29 Alexandre Oliva <oliva@adacore.com>
+
+ * c-attribs.cc (c_common_attribute_table): Add hardbool.
+ (handle_hardbool_attribute): New.
+ (type_valid_for_vector_size): Reject hardbool.
+ * c-common.cc (convert_and_check): Skip warnings for convert
+ and check for hardbool.
+ (c_hardbool_type_attr_1): New.
+ * c-common.h (c_hardbool_type_attr): New.
+
+2023-11-27 Alex Coplan <alex.coplan@arm.com>
+ Iain Sandoe <iain@sandoe.co.uk>
+
+ PR c++/60512
+ * c-common.cc (struct hf_feature_info): New.
+ (c_common_register_feature): New.
+ (init_has_feature): New.
+ (has_feature_p): New.
+ * c-common.h (c_common_has_feature): New.
+ (c_family_register_lang_features): New.
+ (c_common_register_feature): New.
+ (has_feature_p): New.
+ * c-lex.cc (init_c_lex): Plumb through has_feature callback.
+ (c_common_has_builtin): Generalize and move common part ...
+ (c_common_lex_availability_macro): ... here.
+ (c_common_has_feature): New.
+ * c-ppoutput.cc (init_pp_output): Plumb through has_feature.
+
2023-11-24 Lewis Hyatt <lhyatt@gmail.com>
PR pch/112319
diff --git a/gcc/c-family/c-attribs.cc b/gcc/c-family/c-attribs.cc
index 461732f..854e987 100644
--- a/gcc/c-family/c-attribs.cc
+++ b/gcc/c-family/c-attribs.cc
@@ -41,6 +41,7 @@ along with GCC; see the file COPYING3. If not see
#include "common/common-target.h"
#include "langhooks.h"
#include "tree-inline.h"
+#include "ipa-strub.h"
#include "toplev.h"
#include "tree-iterator.h"
#include "opts.h"
@@ -69,6 +70,7 @@ static tree handle_asan_odr_indicator_attribute (tree *, tree, tree, int,
static tree handle_stack_protect_attribute (tree *, tree, tree, int, bool *);
static tree handle_no_stack_protector_function_attribute (tree *, tree,
tree, int, bool *);
+static tree handle_strub_attribute (tree *, tree, tree, int, bool *);
static tree handle_noinline_attribute (tree *, tree, tree, int, bool *);
static tree handle_noclone_attribute (tree *, tree, tree, int, bool *);
static tree handle_nocf_check_attribute (tree *, tree, tree, int, bool *);
@@ -176,6 +178,7 @@ static tree handle_objc_root_class_attribute (tree *, tree, tree, int, bool *);
static tree handle_objc_nullability_attribute (tree *, tree, tree, int, bool *);
static tree handle_signed_bool_precision_attribute (tree *, tree, tree, int,
bool *);
+static tree handle_hardbool_attribute (tree *, tree, tree, int, bool *);
static tree handle_retain_attribute (tree *, tree, tree, int, bool *);
static tree handle_fd_arg_attribute (tree *, tree, tree, int, bool *);
static tree handle_null_terminated_string_arg_attribute (tree *, tree, tree, int, bool *);
@@ -288,12 +291,14 @@ static const struct attribute_spec::exclusions attr_stack_protect_exclusions[] =
/* Table of machine-independent attributes common to all C-like languages.
Current list of processed common attributes: nonnull. */
-const struct attribute_spec c_common_attribute_table[] =
+const struct attribute_spec c_common_gnu_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "signed_bool_precision", 1, 1, false, true, false, true,
handle_signed_bool_precision_attribute, NULL },
+ { "hardbool", 0, 2, false, true, false, true,
+ handle_hardbool_attribute, NULL },
{ "packed", 0, 0, false, false, false, false,
handle_packed_attribute,
attr_aligned_exclusions },
@@ -319,6 +324,8 @@ const struct attribute_spec c_common_attribute_table[] =
{ "no_stack_protector", 0, 0, true, false, false, false,
handle_no_stack_protector_function_attribute,
attr_stack_protect_exclusions },
+ { "strub", 0, 1, false, true, false, true,
+ handle_strub_attribute, NULL },
{ "noinline", 0, 0, true, false, false, false,
handle_noinline_attribute,
attr_noinline_exclusions },
@@ -576,23 +583,31 @@ const struct attribute_spec c_common_attribute_table[] =
{ "fd_arg_write", 1, 1, false, true, true, false,
handle_fd_arg_attribute, NULL},
{ "null_terminated_string_arg", 1, 1, false, true, true, false,
- handle_null_terminated_string_arg_attribute, NULL},
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+ handle_null_terminated_string_arg_attribute, NULL}
+};
+
+const struct scoped_attribute_specs c_common_gnu_attribute_table =
+{
+ "gnu", { c_common_gnu_attributes }
};
/* Give the specifications for the format attributes, used by C and all
descendants.
Current list of processed format attributes: format, format_arg. */
-const struct attribute_spec c_common_format_attribute_table[] =
+const struct attribute_spec c_common_format_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "format", 3, 3, false, true, true, false,
handle_format_attribute, NULL },
{ "format_arg", 1, 1, false, true, true, false,
- handle_format_arg_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+ handle_format_arg_attribute, NULL }
+};
+
+const struct scoped_attribute_specs c_common_format_attribute_table =
+{
+ "gnu", { c_common_format_attributes }
};
/* Returns TRUE iff the attribute indicated by ATTR_ID takes a plain
@@ -997,6 +1012,96 @@ handle_signed_bool_precision_attribute (tree *node, tree name, tree args,
return NULL_TREE;
}
+/* Handle a "hardbool" attribute; arguments as in struct
+ attribute_spec.handler. */
+
+static tree
+handle_hardbool_attribute (tree *node, tree name, tree args,
+ int /* flags */, bool *no_add_attrs)
+{
+ if (c_language != clk_c)
+ {
+ error ("%qE attribute only supported in C", name);
+ *no_add_attrs = TRUE;
+ return NULL_TREE;
+ }
+
+ if (!TYPE_P (*node) || TREE_CODE (*node) != INTEGER_TYPE)
+ {
+ error ("%qE attribute only supported on "
+ "integral types", name);
+ *no_add_attrs = TRUE;
+ return NULL_TREE;
+ }
+
+ tree orig = *node;
+ *node = build_duplicate_type (orig);
+
+ TREE_SET_CODE (*node, ENUMERAL_TYPE);
+ ENUM_UNDERLYING_TYPE (*node) = orig;
+
+ tree false_value;
+ if (args)
+ false_value = fold_convert (*node, TREE_VALUE (args));
+ else
+ false_value = fold_convert (*node, integer_zero_node);
+
+ if (TREE_OVERFLOW_P (false_value))
+ {
+ warning (OPT_Wattributes,
+ "overflows in conversion from %qT to %qT "
+ "changes value from %qE to %qE",
+ TREE_TYPE (TREE_VALUE (args)), *node,
+ TREE_VALUE (args), false_value);
+ TREE_OVERFLOW (false_value) = false;
+ }
+
+ tree true_value;
+ if (args && TREE_CHAIN (args))
+ true_value = fold_convert (*node, TREE_VALUE (TREE_CHAIN (args)));
+ else
+ true_value = fold_build1 (BIT_NOT_EXPR, *node, false_value);
+
+ if (TREE_OVERFLOW_P (true_value))
+ {
+ warning (OPT_Wattributes,
+ "overflows in conversion from %qT to %qT "
+ "changes value from %qE to %qE",
+ TREE_TYPE (TREE_VALUE (TREE_CHAIN (args))), *node,
+ TREE_VALUE (TREE_CHAIN (args)), true_value);
+ TREE_OVERFLOW (true_value) = false;
+ }
+
+ if (tree_int_cst_compare (false_value, true_value) == 0)
+ {
+ error ("%qE attribute requires different values for"
+ " %<false%> and %<true%> for type %qT",
+ name, *node);
+ *no_add_attrs = TRUE;
+ return NULL_TREE;
+ }
+
+ tree values = build_tree_list (get_identifier ("false"),
+ false_value);
+ TREE_CHAIN (values) = build_tree_list (get_identifier ("true"),
+ true_value);
+
+ /* Do *not* set TYPE_MIN_VALUE, TYPE_MAX_VALUE, nor TYPE_PRECISION according
+ to the false and true values. That might cause the constants to be the
+ only acceptable values, which would drop the very hardening checks this
+ attribute is supposed to add. */
+
+ TYPE_ATTRIBUTES (*node) = tree_cons (name, args,
+ TYPE_ATTRIBUTES (*node));
+ *no_add_attrs = TRUE;
+
+ gcc_checking_assert (!TYPE_CACHED_VALUES_P (*node));
+ TYPE_VALUES (*node) = values;
+ TYPE_NAME (*node) = orig;
+
+ return NULL_TREE;
+}
+
/* Handle a "packed" attribute; arguments as in
struct attribute_spec.handler. */
@@ -1388,6 +1493,84 @@ handle_noipa_attribute (tree *node, tree name, tree, int, bool *no_add_attrs)
return NULL_TREE;
}
+/* Handle a "strub" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_strub_attribute (tree *node, tree name,
+ tree args,
+ int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+ bool enable = true;
+
+ if (args && FUNCTION_POINTER_TYPE_P (*node))
+ *node = TREE_TYPE (*node);
+
+ if (args && FUNC_OR_METHOD_TYPE_P (*node))
+ {
+ switch (strub_validate_fn_attr_parm (TREE_VALUE (args)))
+ {
+ case 1:
+ case 2:
+ enable = true;
+ break;
+
+ case 0:
+ warning (OPT_Wattributes,
+ "%qE attribute ignored because of argument %qE",
+ name, TREE_VALUE (args));
+ *no_add_attrs = true;
+ enable = false;
+ break;
+
+ case -1:
+ case -2:
+ enable = false;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ args = TREE_CHAIN (args);
+ }
+
+ if (args)
+ {
+ warning (OPT_Wattributes,
+ "ignoring attribute %qE because of excess arguments"
+ " starting at %qE",
+ name, TREE_VALUE (args));
+ *no_add_attrs = true;
+ enable = false;
+ }
+
+ /* Warn about unmet expectations that the strub attribute works like a
+ qualifier. ??? Could/should we extend it to the element/field types
+ here? */
+ if (TREE_CODE (*node) == ARRAY_TYPE
+ || VECTOR_TYPE_P (*node)
+ || TREE_CODE (*node) == COMPLEX_TYPE)
+ warning (OPT_Wattributes,
+ "attribute %qE does not apply to elements"
+ " of non-scalar type %qT",
+ name, *node);
+ else if (RECORD_OR_UNION_TYPE_P (*node))
+ warning (OPT_Wattributes,
+ "attribute %qE does not apply to fields"
+ " of aggregate type %qT",
+ name, *node);
+
+ /* If we see a strub-enabling attribute, and we're at the default setting,
+ implicitly or explicitly, note that the attribute was seen, so that we can
+ reduce the compile-time overhead to nearly zero when the strub feature is
+ not used. */
+ if (enable && flag_strub < -2)
+ flag_strub += 2;
+
+ return NULL_TREE;
+}
+
/* Handle a "noinline" attribute; arguments as in
struct attribute_spec.handler. */
@@ -4408,7 +4591,8 @@ static tree
type_valid_for_vector_size (tree type, tree atname, tree args,
unsigned HOST_WIDE_INT *ptrnunits)
{
- bool error_p = ptrnunits != NULL;
+ bool hardbool_p = c_hardbool_type_attr (type);
+ bool error_p = ptrnunits != NULL || hardbool_p;
/* Get the mode of the type being modified. */
machine_mode orig_mode = TYPE_MODE (type);
@@ -4421,6 +4605,7 @@ type_valid_for_vector_size (tree type, tree atname, tree args,
&& !ALL_SCALAR_FIXED_POINT_MODE_P (orig_mode))
|| !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
|| TREE_CODE (type) == BOOLEAN_TYPE
+ || hardbool_p
|| TREE_CODE (type) == BITINT_TYPE)
{
if (error_p)
diff --git a/gcc/c-family/c-common.cc b/gcc/c-family/c-common.cc
index ca7557c..d175054d 100644
--- a/gcc/c-family/c-common.cc
+++ b/gcc/c-family/c-common.cc
@@ -1826,7 +1826,8 @@ convert_and_check (location_t loc, tree type, tree expr, bool init_const)
if (c_inhibit_evaluation_warnings == 0
&& !TREE_OVERFLOW_P (expr)
- && result != error_mark_node)
+ && result != error_mark_node
+ && !c_hardbool_type_attr (type))
warnings_for_convert_and_check (loc, type, expr_for_warning, result);
return result;
@@ -2361,6 +2362,15 @@ c_common_type_for_size (unsigned int bits, int unsignedp)
return (unsignedp ? widest_unsigned_literal_type_node
: widest_integer_literal_type_node);
+ for (tree t = registered_builtin_types; t; t = TREE_CHAIN (t))
+ {
+ tree type = TREE_VALUE (t);
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && bits == TYPE_PRECISION (type)
+ && !!unsignedp == !!TYPE_UNSIGNED (type))
+ return type;
+ }
+
if (bits <= TYPE_PRECISION (intQI_type_node))
return unsignedp ? unsigned_intQI_type_node : intQI_type_node;
@@ -9988,4 +9998,22 @@ has_feature_p (const char *ident, bool strict_p)
return !strict_p || *feat_p;
}
+/* This is the slow path of c-common.h's c_hardbool_type_attr. */
+
+tree
+c_hardbool_type_attr_1 (tree type, tree *false_value, tree *true_value)
+{
+ tree attr = lookup_attribute ("hardbool", TYPE_ATTRIBUTES (type));
+ if (!attr)
+ return attr;
+
+ if (false_value)
+ *false_value = TREE_VALUE (TYPE_VALUES (type));
+
+ if (true_value)
+ *true_value = TREE_VALUE (TREE_CHAIN (TYPE_VALUES (type)));
+
+ return attr;
+}
+
#include "gt-c-family-c-common.h"
diff --git a/gcc/c-family/c-common.h b/gcc/c-family/c-common.h
index dd4fe3a..cb9b6f3 100644
--- a/gcc/c-family/c-common.h
+++ b/gcc/c-family/c-common.h
@@ -821,8 +821,8 @@ enum conversion_safety {
extern struct visibility_flags visibility_options;
/* Attribute table common to the C front ends. */
-extern const struct attribute_spec c_common_attribute_table[];
-extern const struct attribute_spec c_common_format_attribute_table[];
+extern const struct scoped_attribute_specs c_common_gnu_attribute_table;
+extern const struct scoped_attribute_specs c_common_format_attribute_table;
/* Pointer to function to lazily generate the VAR_DECL for __FUNCTION__ etc.
ID is the identifier to use, NAME is the string.
@@ -911,6 +911,7 @@ extern bool get_attribute_operand (tree, unsigned HOST_WIDE_INT *);
extern void c_common_finalize_early_debug (void);
extern unsigned int c_strict_flex_array_level_of (tree);
extern bool c_option_is_from_cpp_diagnostics (int);
+extern tree c_hardbool_type_attr_1 (tree, tree *, tree *);
/* Used by convert_and_check; in front ends. */
extern tree convert_init (tree, tree);
@@ -1356,6 +1357,23 @@ c_tree_chain_next (tree t)
return NULL;
}
+/* Return the hardbool attribute associated with TYPE, if there is one, provided
+ that TYPE looks like an enumeral type that might have been set up by
+ handle_hardbool_attribute. Return NULL otherwise.
+
+ If FALSE_VALUE or TRUE_VALUE are non-NULL and TYPE is a hardened boolean
+ type, store the corresponding representation values. */
+static inline tree
+c_hardbool_type_attr (tree type,
+ tree *false_value = NULL, tree *true_value = NULL)
+{
+ if (TREE_CODE (type) != ENUMERAL_TYPE
+ || TYPE_LANG_SPECIFIC (type))
+ return NULL_TREE;
+
+ return c_hardbool_type_attr_1 (type, false_value, true_value);
+}
+
/* Mask used by tm_stmt_attr. */
#define TM_STMT_ATTR_OUTER 2
#define TM_STMT_ATTR_ATOMIC 4
diff --git a/gcc/c-family/c-cppbuiltin.cc b/gcc/c-family/c-cppbuiltin.cc
index 56c4d63..2d1249f 100644
--- a/gcc/c-family/c-cppbuiltin.cc
+++ b/gcc/c-family/c-cppbuiltin.cc
@@ -1059,7 +1059,7 @@ c_cpp_builtins (cpp_reader *pfile)
cpp_define (pfile, "__cpp_constexpr=202002L");
cpp_define (pfile, "__cpp_constexpr_in_decltype=201711L");
cpp_define (pfile, "__cpp_conditional_explicit=201806L");
- cpp_define (pfile, "__cpp_consteval=201811L");
+ cpp_define (pfile, "__cpp_consteval=202211L");
cpp_define (pfile, "__cpp_constinit=201907L");
cpp_define (pfile, "__cpp_deduction_guides=201907L");
cpp_define (pfile, "__cpp_nontype_template_args=201911L");
@@ -1088,6 +1088,7 @@ c_cpp_builtins (cpp_reader *pfile)
/* Set feature test macros for C++26. */
cpp_define (pfile, "__cpp_constexpr=202306L");
cpp_define (pfile, "__cpp_static_assert=202306L");
+ cpp_define (pfile, "__cpp_placeholder_variables=202306L");
}
if (flag_concepts)
{
diff --git a/gcc/c-family/c-opts.cc b/gcc/c-family/c-opts.cc
index d7faff1..d484ecf 100644
--- a/gcc/c-family/c-opts.cc
+++ b/gcc/c-family/c-opts.cc
@@ -1139,6 +1139,11 @@ c_common_post_options (const char **pfilename)
if (cxx_dialect >= cxx20 || flag_concepts_ts)
flag_concepts = 1;
+ /* -fimmediate-escalation has no effect when immediate functions are not
+ supported. */
+ if (flag_immediate_escalation && cxx_dialect < cxx20)
+ flag_immediate_escalation = 0;
+
if (num_in_fnames > 1)
error ("too many filenames given; type %<%s %s%> for usage",
progname, "--help");
diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt
index a2c8cef..3706505 100644
--- a/gcc/c-family/c.opt
+++ b/gcc/c-family/c.opt
@@ -595,6 +595,10 @@ Wdeclaration-after-statement
C ObjC Var(warn_declaration_after_statement) Init(-1) Warning
Warn when a declaration is found after a statement.
+Wdeclaration-missing-parameter-type
+C ObjC Var(warn_declaration_missing_parameter) Warning Init(1)
+Warn for missing parameter types in function declarations.
+
Wdelete-incomplete
C++ ObjC++ Var(warn_delete_incomplete) Init(1) Warning
Warn when deleting a pointer to incomplete type.
@@ -1894,6 +1898,10 @@ fhuge-objects
C++ ObjC++ WarnRemoved
No longer supported.
+fimmediate-escalation
+C++ ObjC++ Var(flag_immediate_escalation) Init(1)
+Implement P2564 for consteval propagation.
+
fimplement-inlines
C++ ObjC++ Var(flag_implement_inlines) Init(1)
Export functions even if they can be inlined.
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index 46d8c59..f617577 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,112 @@
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * c-decl.cc (std_attribute_table): Add extra braces to work
+ around PR 16333 in older compilers.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR c/86869
+ * c-typeck.cc (c_build_qualified_type): Preserve address-space
+ info for ARRAY_TYPE.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * c-tree.h (std_attribute_table): Declare.
+ * c-decl.cc (std_attribute_table): Change type to
+ scoped_attribute_specs, using...
+ (std_attributes): ...this as the underlying array.
+ (c_init_decl_processing): Remove call to register_scoped_attributes.
+ * c-objc-common.h (c_objc_attribute_table): New global.
+ (LANG_HOOKS_ATTRIBUTE_TABLE): Use it.
+ (LANG_HOOKS_COMMON_ATTRIBUTE_TABLE): Delete.
+ (LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE): Delete.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ PR other/44209
+ * c-decl.cc (grokparms): Issue permerror for
+ OPT_Wdeclaration_missing_parameter_type instead of a pedwarn.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ PR c/96284
+ * c-typeck.cc (build_conditional_expr): Upgrade most pointer
+ type mismatches to a permerror.
+ (convert_for_assignment): Use permerror_opt and
+ permerror_init for OPT_Wincompatible_pointer_types warnings.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ PR c/96284
+ * c-typeck.cc (c_finish_return): Use permerrors
+ for OPT_Wreturn_mismatch diagnostics.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * c-decl.cc (grokdeclarator): Do not skip -Wimplicit-int
+ warnings or errors in system headers.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * c-decl.cc (warn_defaults_to): Remove.
+ (grok_declarator, start_function): Call permerror_opt
+ instead of warn_defaults_to.
+ (store_parm_decls_oldstyle): Call permerror_opt for
+ OPT_Wimplicit_int.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ PR c/91092
+ PR c/96284
+ * c-decl.cc (implicit_decl_permerror): Rename from
+ implicit_decl_warning. Call permerror_opt instead of
+ pedwarn and warning_at.
+ (implicitly_declare): Adjust callers.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ PR c/96284
+ PR c/106416
+ * c-typeck.cc (build_conditional_expr): Use permerror_opt for
+ pointer/integer type mismatches, based on -Wint-conversion.
+ (pedwarn_permerror_init, permerror_init): New function.
+ (pedwarn_init): Call pedwarn_permerror_init.
+ (convert_for_assignment): Use permerror_opt and
+ permerror_init for -Wint-conversion warnings.
+
+2023-11-29 Alexandre Oliva <oliva@adacore.com>
+
+ * c-typeck.cc (convert_lvalue_to_rvalue): Decay hardbools.
+ * c-convert.cc (convert): Convert to hardbool through
+ truthvalue.
+ * c-decl.cc (check_bitfield_type_and_width): Skip enumeral
+ truncation warnings for hardbool.
+ (finish_struct): Propagate hardbool attribute to bitfield
+ types.
+ (digest_init): Convert to hardbool.
+
+2023-11-28 Jason Merrill <jason@redhat.com>
+
+ PR c++/94264
+ PR c++/53220
+ * c-typeck.cc (array_to_pointer_conversion): Adjust -Wc++-compat
+ diagnostic.
+
+2023-11-28 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/112741
+ * gimple-parser.cc (c_parser_parse_gimple_body): Also
+ set DECL_SEEN_IN_BIND_EXPR_Pfor locals.
+
+2023-11-27 Alex Coplan <alex.coplan@arm.com>
+ Iain Sandoe <iain@sandoe.co.uk>
+
+ PR c++/60512
+ * c-lang.cc (c_family_register_lang_features): New.
+ * c-objc-common.cc (struct c_feature_info): New.
+ (c_register_features): New.
+ * c-objc-common.h (c_register_features): New.
+
2023-11-24 Tobias Burnus <tobias@codesourcery.com>
* c-parser.cc (c_parser_omp_clause_num_threads,
diff --git a/gcc/c/c-convert.cc b/gcc/c/c-convert.cc
index 71e618c..918effd 100644
--- a/gcc/c/c-convert.cc
+++ b/gcc/c/c-convert.cc
@@ -105,6 +105,20 @@ c_convert (tree type, tree expr, bool init_const)
return error_mark_node;
}
+ {
+ tree false_value, true_value;
+ if (c_hardbool_type_attr (type, &false_value, &true_value))
+ {
+ bool save = in_late_binary_op;
+ in_late_binary_op = true;
+ expr = c_objc_common_truthvalue_conversion (input_location, expr);
+ in_late_binary_op = save;
+
+ return fold_build3_loc (loc, COND_EXPR, type,
+ expr, true_value, false_value);
+ }
+ }
+
switch (code)
{
case VOID_TYPE:
diff --git a/gcc/c/c-decl.cc b/gcc/c/c-decl.cc
index 439a312..92c83e1 100644
--- a/gcc/c/c-decl.cc
+++ b/gcc/c/c-decl.cc
@@ -647,8 +647,6 @@ static tree grokdeclarator (const struct c_declarator *,
bool *, enum deprecated_states);
static tree grokparms (struct c_arg_info *, bool);
static void layout_array_type (tree);
-static void warn_defaults_to (location_t, int, const char *, ...)
- ATTRIBUTE_GCC_DIAG(3,4);
static const char *header_for_builtin_fn (tree);
/* T is a statement. Add it to the statement-tree. This is the
@@ -3493,12 +3491,12 @@ pushdecl (tree x)
}
-/* Issue a warning about implicit function declaration. ID is the function
+/* Issue a permerror about implicit function declaration. ID is the function
identifier, OLDDECL is a declaration of the function in a different scope,
or NULL_TREE. */
static void
-implicit_decl_warning (location_t loc, tree id, tree olddecl)
+implicit_decl_permerror (location_t loc, tree id, tree olddecl)
{
if (!warn_implicit_function_declaration)
return;
@@ -3515,14 +3513,14 @@ implicit_decl_warning (location_t loc, tree id, tree olddecl)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (suggestion);
- warned = pedwarn (&richloc, OPT_Wimplicit_function_declaration,
- "implicit declaration of function %qE;"
- " did you mean %qs?",
- id, suggestion);
+ warned = permerror_opt (&richloc, OPT_Wimplicit_function_declaration,
+ "implicit declaration of function %qE;"
+ " did you mean %qs?",
+ id, suggestion);
}
else
- warned = pedwarn (loc, OPT_Wimplicit_function_declaration,
- "implicit declaration of function %qE", id);
+ warned = permerror_opt (loc, OPT_Wimplicit_function_declaration,
+ "implicit declaration of function %qE", id);
}
else if (const char *suggestion = hint.suggestion ())
{
@@ -3812,7 +3810,7 @@ implicitly_declare (location_t loc, tree functionid)
then recycle the old declaration but with the new type. */
if (!C_DECL_IMPLICIT (decl))
{
- implicit_decl_warning (loc, functionid, decl);
+ implicit_decl_permerror (loc, functionid, decl);
C_DECL_IMPLICIT (decl) = 1;
}
if (fndecl_built_in_p (decl))
@@ -3865,7 +3863,7 @@ implicitly_declare (location_t loc, tree functionid)
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
C_DECL_IMPLICIT (decl) = 1;
- implicit_decl_warning (loc, functionid, 0);
+ implicit_decl_permerror (loc, functionid, 0);
asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL);
if (asmspec_tree)
set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree));
@@ -4635,7 +4633,7 @@ handle_std_noreturn_attribute (tree *node, tree name, tree args,
}
/* Table of supported standard (C23) attributes. */
-const struct attribute_spec std_attribute_table[] =
+static const attribute_spec std_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -4650,8 +4648,12 @@ const struct attribute_spec std_attribute_table[] =
{ "nodiscard", 0, 1, false, false, false, false,
handle_nodiscard_attribute, NULL },
{ "noreturn", 0, 0, false, false, false, false,
- handle_std_noreturn_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+ handle_std_noreturn_attribute, NULL }
+};
+
+const scoped_attribute_specs std_attribute_table =
+{
+ nullptr, { std_attributes }
};
/* Create the predefined scalar types of C,
@@ -4667,8 +4669,6 @@ c_init_decl_processing (void)
/* Initialize reserved words for parser. */
c_parse_init ();
- register_scoped_attributes (std_attribute_table, NULL);
-
current_function_decl = NULL_TREE;
gcc_obstack_init (&parser_obstack);
@@ -6532,6 +6532,12 @@ check_bitfield_type_and_width (location_t loc, tree *type, tree *width,
else
w = tree_to_uhwi (*width);
+ /* Truncation of hardbool false and true representation values is always safe:
+ either the values remain different, or we'll report a problem when creating
+ the narrower type. */
+ if (c_hardbool_type_attr (*type))
+ return;
+
if (TREE_CODE (*type) == ENUMERAL_TYPE)
{
struct lang_type *lt = TYPE_LANG_SPECIFIC (*type);
@@ -6570,23 +6576,6 @@ warn_variable_length_array (tree name, tree size)
}
}
-/* Print warning about defaulting to int if necessary. */
-
-static void
-warn_defaults_to (location_t location, int opt, const char *gmsgid, ...)
-{
- diagnostic_info diagnostic;
- va_list ap;
- rich_location richloc (line_table, location);
-
- va_start (ap, gmsgid);
- diagnostic_set_info (&diagnostic, gmsgid, &ap, &richloc,
- flag_isoc99 ? DK_PEDWARN : DK_WARNING);
- diagnostic.option_index = opt;
- diagnostic_report_diagnostic (global_dc, &diagnostic);
- va_end (ap);
-}
-
/* Returns the smallest location != UNKNOWN_LOCATION in LOCATIONS,
considering only those c_declspec_words found in LIST, which
must be terminated by cdw_number_of_elements. */
@@ -6864,7 +6853,7 @@ grokdeclarator (const struct c_declarator *declarator,
/* Diagnose defaulting to "int". */
- if (declspecs->default_int_p && !in_system_header_at (input_location))
+ if (declspecs->default_int_p)
{
/* Issue a warning if this is an ISO C 99 program or if
-Wreturn-type and this is a function, or if -Wimplicit;
@@ -6875,12 +6864,12 @@ grokdeclarator (const struct c_declarator *declarator,
else
{
if (name)
- warn_defaults_to (loc, OPT_Wimplicit_int,
- "type defaults to %<int%> in declaration "
- "of %qE", name);
+ permerror_opt (loc, OPT_Wimplicit_int,
+ "type defaults to %<int%> in declaration "
+ "of %qE", name);
else
- warn_defaults_to (loc, OPT_Wimplicit_int,
- "type defaults to %<int%> in type name");
+ permerror_opt (loc, OPT_Wimplicit_int,
+ "type defaults to %<int%> in type name");
}
}
@@ -8356,8 +8345,10 @@ grokparms (struct c_arg_info *arg_info, bool funcdef_flag)
{
if (!funcdef_flag)
{
- pedwarn (input_location, 0, "parameter names (without types) in "
- "function declaration");
+ permerror_opt (input_location,
+ OPT_Wdeclaration_missing_parameter_type,
+ "parameter names (without types) in "
+ "function declaration");
arg_info->parms = NULL_TREE;
}
else
@@ -9469,6 +9460,10 @@ finish_struct (location_t loc, tree t, tree fieldlist, tree attributes,
TREE_TYPE (field)
= c_build_bitfield_integer_type (width,
TYPE_UNSIGNED (type));
+ if (tree attr = c_hardbool_type_attr (type))
+ decl_attributes (&TREE_TYPE (field),
+ copy_list (attr),
+ 0, NULL_TREE);
SET_DECL_MODE (field, TYPE_MODE (TREE_TYPE (field)));
}
DECL_INITIAL (field) = NULL_TREE;
@@ -10290,10 +10285,10 @@ start_function (struct c_declspecs *declspecs, struct c_declarator *declarator,
}
if (warn_about_return_type)
- warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int
- : (warn_return_type > 0 ? OPT_Wreturn_type
- : OPT_Wimplicit_int),
- "return type defaults to %<int%>");
+ permerror_opt (loc, flag_isoc99 ? OPT_Wimplicit_int
+ : (warn_return_type > 0 ? OPT_Wreturn_type
+ : OPT_Wimplicit_int),
+ "return type defaults to %<int%>");
/* Make the init_value nonzero so pushdecl knows this is not tentative.
error_mark_node is replaced below (in pop_scope) with the BLOCK. */
@@ -10635,9 +10630,9 @@ store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info)
warn_if_shadowing (decl);
if (flag_isoc99)
- pedwarn (DECL_SOURCE_LOCATION (decl),
- OPT_Wimplicit_int, "type of %qD defaults to %<int%>",
- decl);
+ permerror_opt (DECL_SOURCE_LOCATION (decl),
+ OPT_Wimplicit_int, "type of %qD defaults to %<int%>",
+ decl);
else
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wmissing_parameter_type,
diff --git a/gcc/c/c-objc-common.h b/gcc/c/c-objc-common.h
index 63aff70..426d938 100644
--- a/gcc/c/c-objc-common.h
+++ b/gcc/c/c-objc-common.h
@@ -75,11 +75,15 @@ extern void c_register_features ();
#undef LANG_HOOKS_FINALIZE_EARLY_DEBUG
#define LANG_HOOKS_FINALIZE_EARLY_DEBUG c_common_finalize_early_debug
-/* Attribute hooks. */
-#undef LANG_HOOKS_COMMON_ATTRIBUTE_TABLE
-#define LANG_HOOKS_COMMON_ATTRIBUTE_TABLE c_common_attribute_table
-#undef LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE
-#define LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE c_common_format_attribute_table
+static const scoped_attribute_specs *const c_objc_attribute_table[] =
+{
+ &std_attribute_table,
+ &c_common_gnu_attribute_table,
+ &c_common_format_attribute_table
+};
+
+#undef LANG_HOOKS_ATTRIBUTE_TABLE
+#define LANG_HOOKS_ATTRIBUTE_TABLE c_objc_attribute_table
#undef LANG_HOOKS_TREE_DUMP_DUMP_TREE_FN
#define LANG_HOOKS_TREE_DUMP_DUMP_TREE_FN c_dump_tree
diff --git a/gcc/c/c-tree.h b/gcc/c/c-tree.h
index c6f38ec..d0bdc3d 100644
--- a/gcc/c/c-tree.h
+++ b/gcc/c/c-tree.h
@@ -910,6 +910,8 @@ extern vec<tree> incomplete_record_decls;
extern const char *c_get_sarif_source_language (const char *filename);
+extern const struct scoped_attribute_specs std_attribute_table;
+
#if CHECKING_P
namespace selftest {
extern void run_c_tests (void);
diff --git a/gcc/c/c-typeck.cc b/gcc/c/c-typeck.cc
index 1dbb447..8368939 100644
--- a/gcc/c/c-typeck.cc
+++ b/gcc/c/c-typeck.cc
@@ -1748,7 +1748,7 @@ array_to_pointer_conversion (location_t loc, tree exp)
if (!TREE_READONLY (decl) && !TREE_STATIC (decl))
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"converting an array compound literal to a pointer "
- "is ill-formed in C++");
+ "leads to a dangling pointer in C++");
}
adr = build_unary_op (loc, ADDR_EXPR, exp, true);
@@ -2065,6 +2065,35 @@ convert_lvalue_to_rvalue (location_t loc, struct c_expr exp,
exp.value = convert (build_qualified_type (TREE_TYPE (exp.value), TYPE_UNQUALIFIED), exp.value);
if (force_non_npc)
exp.value = build1 (NOP_EXPR, TREE_TYPE (exp.value), exp.value);
+
+ {
+ tree false_value, true_value;
+ if (convert_p && !error_operand_p (exp.value)
+ && c_hardbool_type_attr (TREE_TYPE (exp.value),
+ &false_value, &true_value))
+ {
+ tree t = save_expr (exp.value);
+
+ mark_exp_read (exp.value);
+
+ tree trapfn = builtin_decl_explicit (BUILT_IN_TRAP);
+ tree expr = build_call_expr_loc (loc, trapfn, 0);
+ expr = build_compound_expr (loc, expr, boolean_true_node);
+ expr = fold_build3_loc (loc, COND_EXPR, boolean_type_node,
+ fold_build2_loc (loc, NE_EXPR,
+ boolean_type_node,
+ t, true_value),
+ expr, boolean_true_node);
+ expr = fold_build3_loc (loc, COND_EXPR, boolean_type_node,
+ fold_build2_loc (loc, NE_EXPR,
+ boolean_type_node,
+ t, false_value),
+ expr, boolean_false_node);
+
+ exp.value = expr;
+ }
+ }
+
return exp;
}
@@ -5434,8 +5463,15 @@ build_conditional_expr (location_t colon_loc, tree ifexp, bool ifexp_bcp,
else
{
int qual = ENCODE_QUAL_ADDR_SPACE (as_common);
- if (emit_diagnostic (bltin1 && bltin2 ? DK_WARNING : DK_PEDWARN,
- colon_loc, OPT_Wincompatible_pointer_types,
+ diagnostic_t kind = DK_PERMERROR;
+ if (!flag_isoc99)
+ /* This downgrade to a warning ensures that -std=gnu89
+ -pedantic-errors does not flag these mismatches between
+ builtins as errors (as DK_PERMERROR would). ISO C99
+ and later do not have implicit function declarations,
+ so the mismatch cannot occur naturally there. */
+ kind = bltin1 && bltin2 ? DK_WARNING : DK_PEDWARN;
+ if (emit_diagnostic (kind, colon_loc, OPT_Wincompatible_pointer_types,
"pointer type mismatch "
"in conditional expression"))
{
@@ -5450,8 +5486,9 @@ build_conditional_expr (location_t colon_loc, tree ifexp, bool ifexp_bcp,
&& (code2 == INTEGER_TYPE || code2 == BITINT_TYPE))
{
if (!null_pointer_constant_p (orig_op2))
- pedwarn (colon_loc, OPT_Wint_conversion,
- "pointer/integer type mismatch in conditional expression");
+ permerror_opt (colon_loc, OPT_Wint_conversion,
+ "pointer/integer type mismatch "
+ "in conditional expression");
else
{
op2 = null_pointer_node;
@@ -5462,8 +5499,9 @@ build_conditional_expr (location_t colon_loc, tree ifexp, bool ifexp_bcp,
&& (code1 == INTEGER_TYPE || code1 == BITINT_TYPE))
{
if (!null_pointer_constant_p (orig_op1))
- pedwarn (colon_loc, OPT_Wint_conversion,
- "pointer/integer type mismatch in conditional expression");
+ permerror_opt (colon_loc, OPT_Wint_conversion,
+ "pointer/integer type mismatch "
+ "in conditional expression");
else
{
op1 = null_pointer_node;
@@ -6559,28 +6597,48 @@ error_init (location_t loc, const char *gmsgid, ...)
inform (loc, "(near initialization for %qs)", ofwhat);
}
-/* Issue a pedantic warning for a bad initializer component. OPT is
- the option OPT_* (from options.h) controlling this warning or 0 if
- it is unconditionally given. GMSGID identifies the message. The
- component name is taken from the spelling stack. */
+/* Used to implement pedwarn_init and permerror_init. */
static void ATTRIBUTE_GCC_DIAG (3,0)
-pedwarn_init (location_t loc, int opt, const char *gmsgid, ...)
+pedwarn_permerror_init (location_t loc, int opt, const char *gmsgid,
+ va_list *ap, diagnostic_t kind)
{
/* Use the location where a macro was expanded rather than where
it was defined to make sure macros defined in system headers
but used incorrectly elsewhere are diagnosed. */
location_t exploc = expansion_point_location_if_in_system_header (loc);
auto_diagnostic_group d;
- va_list ap;
- va_start (ap, gmsgid);
- bool warned = emit_diagnostic_valist (DK_PEDWARN, exploc, opt, gmsgid, &ap);
- va_end (ap);
+ bool warned = emit_diagnostic_valist (kind, exploc, opt, gmsgid, ap);
char *ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
if (*ofwhat && warned)
inform (exploc, "(near initialization for %qs)", ofwhat);
}
+/* Issue a pedantic warning for a bad initializer component. OPT is
+ the option OPT_* (from options.h) controlling this warning or 0 if
+ it is unconditionally given. GMSGID identifies the message. The
+ component name is taken from the spelling stack. */
+
+static void ATTRIBUTE_GCC_DIAG (3,0)
+pedwarn_init (location_t loc, int opt, const char *gmsgid, ...)
+{
+ va_list ap;
+ va_start (ap, gmsgid);
+ pedwarn_permerror_init (loc, opt, gmsgid, &ap, DK_PEDWARN);
+ va_end (ap);
+}
+
+/* Like pedwarn_init, but issue a permerror. */
+
+static void ATTRIBUTE_GCC_DIAG (3,0)
+permerror_init (location_t loc, int opt, const char *gmsgid, ...)
+{
+ va_list ap;
+ va_start (ap, gmsgid);
+ pedwarn_permerror_init (loc, opt, gmsgid, &ap, DK_PERMERROR);
+ va_end (ap);
+}
+
/* Issue a warning for a bad initializer component.
OPT is the OPT_W* value corresponding to the warning option that
@@ -7551,46 +7609,47 @@ convert_for_assignment (location_t location, location_t expr_loc, tree type,
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
- if (pedwarn (&richloc, OPT_Wincompatible_pointer_types,
- "passing argument %d of %qE from incompatible "
- "pointer type", parmnum, rname))
+ if (permerror_opt (&richloc, OPT_Wincompatible_pointer_types,
+ "passing argument %d of %qE from "
+ "incompatible pointer type",
+ parmnum, rname))
inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype);
}
break;
case ic_assign:
if (bltin)
- pedwarn (location, OPT_Wincompatible_pointer_types,
- "assignment to %qT from pointer to "
- "%qD with incompatible type %qT",
- type, bltin, rhstype);
+ permerror_opt (location, OPT_Wincompatible_pointer_types,
+ "assignment to %qT from pointer to "
+ "%qD with incompatible type %qT",
+ type, bltin, rhstype);
else
- pedwarn (location, OPT_Wincompatible_pointer_types,
- "assignment to %qT from incompatible pointer type %qT",
- type, rhstype);
+ permerror_opt (location, OPT_Wincompatible_pointer_types,
+ "assignment to %qT from incompatible pointer "
+ "type %qT", type, rhstype);
break;
case ic_init:
case ic_init_const:
if (bltin)
- pedwarn_init (location, OPT_Wincompatible_pointer_types,
- "initialization of %qT from pointer to "
- "%qD with incompatible type %qT",
- type, bltin, rhstype);
+ permerror_init (location, OPT_Wincompatible_pointer_types,
+ "initialization of %qT from pointer to "
+ "%qD with incompatible type %qT",
+ type, bltin, rhstype);
else
- pedwarn_init (location, OPT_Wincompatible_pointer_types,
- "initialization of %qT from incompatible "
- "pointer type %qT",
- type, rhstype);
+ permerror_init (location, OPT_Wincompatible_pointer_types,
+ "initialization of %qT from incompatible "
+ "pointer type %qT",
+ type, rhstype);
break;
case ic_return:
if (bltin)
- pedwarn (location, OPT_Wincompatible_pointer_types,
- "returning pointer to %qD of type %qT from "
- "a function with incompatible type %qT",
- bltin, rhstype, type);
+ permerror_opt (location, OPT_Wincompatible_pointer_types,
+ "returning pointer to %qD of type %qT from "
+ "a function with incompatible type %qT",
+ bltin, rhstype, type);
else
- pedwarn (location, OPT_Wincompatible_pointer_types,
- "returning %qT from a function with incompatible "
- "return type %qT", rhstype, type);
+ permerror_opt (location, OPT_Wincompatible_pointer_types,
+ "returning %qT from a function with "
+ "incompatible return type %qT", rhstype, type);
break;
default:
gcc_unreachable ();
@@ -7630,27 +7689,28 @@ convert_for_assignment (location_t location, location_t expr_loc, tree type,
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
- if (pedwarn (&richloc, OPT_Wint_conversion,
- "passing argument %d of %qE makes pointer from "
- "integer without a cast", parmnum, rname))
+ if (permerror_opt (&richloc, OPT_Wint_conversion,
+ "passing argument %d of %qE makes pointer "
+ "from integer without a cast", parmnum, rname))
inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype);
}
break;
case ic_assign:
- pedwarn (location, OPT_Wint_conversion,
- "assignment to %qT from %qT makes pointer from integer "
- "without a cast", type, rhstype);
+ permerror_opt (location, OPT_Wint_conversion,
+ "assignment to %qT from %qT makes pointer from "
+ "integer without a cast", type, rhstype);
break;
case ic_init:
case ic_init_const:
- pedwarn_init (location, OPT_Wint_conversion,
- "initialization of %qT from %qT makes pointer from "
- "integer without a cast", type, rhstype);
+ permerror_init (location, OPT_Wint_conversion,
+ "initialization of %qT from %qT makes pointer "
+ "from integer without a cast", type, rhstype);
break;
case ic_return:
- pedwarn (location, OPT_Wint_conversion, "returning %qT from a "
- "function with return type %qT makes pointer from "
- "integer without a cast", rhstype, type);
+ permerror_init (location, OPT_Wint_conversion,
+ "returning %qT from a function with return type "
+ "%qT makes pointer from integer without a cast",
+ rhstype, type);
break;
default:
gcc_unreachable ();
@@ -7668,27 +7728,27 @@ convert_for_assignment (location_t location, location_t expr_loc, tree type,
auto_diagnostic_group d;
range_label_for_type_mismatch rhs_label (rhstype, type);
gcc_rich_location richloc (expr_loc, &rhs_label);
- if (pedwarn (&richloc, OPT_Wint_conversion,
- "passing argument %d of %qE makes integer from "
- "pointer without a cast", parmnum, rname))
+ if (permerror_opt (&richloc, OPT_Wint_conversion,
+ "passing argument %d of %qE makes integer from "
+ "pointer without a cast", parmnum, rname))
inform_for_arg (fundecl, expr_loc, parmnum, type, rhstype);
}
break;
case ic_assign:
- pedwarn (location, OPT_Wint_conversion,
- "assignment to %qT from %qT makes integer from pointer "
- "without a cast", type, rhstype);
+ permerror_opt (location, OPT_Wint_conversion,
+ "assignment to %qT from %qT makes integer from "
+ "pointer without a cast", type, rhstype);
break;
case ic_init:
case ic_init_const:
- pedwarn_init (location, OPT_Wint_conversion,
- "initialization of %qT from %qT makes integer from "
- "pointer without a cast", type, rhstype);
+ permerror_init (location, OPT_Wint_conversion,
+ "initialization of %qT from %qT makes integer "
+ "from pointer without a cast", type, rhstype);
break;
case ic_return:
- pedwarn (location, OPT_Wint_conversion, "returning %qT from a "
- "function with return type %qT makes integer from "
- "pointer without a cast", rhstype, type);
+ permerror_opt (location, OPT_Wint_conversion, "returning %qT from a "
+ "function with return type %qT makes integer from "
+ "pointer without a cast", rhstype, type);
break;
default:
gcc_unreachable ();
@@ -8396,7 +8456,7 @@ digest_init (location_t init_loc, tree type, tree init, tree origtype,
}
}
- if (code == VECTOR_TYPE)
+ if (code == VECTOR_TYPE || c_hardbool_type_attr (type))
/* Although the types are compatible, we may require a
conversion. */
inside_init = convert (type, inside_init);
@@ -11182,7 +11242,7 @@ c_finish_return (location_t loc, tree retval, tree origtype)
&& valtype != NULL_TREE && TREE_CODE (valtype) != VOID_TYPE)
{
no_warning = true;
- if (emit_diagnostic (flag_isoc99 ? DK_PEDWARN : DK_WARNING,
+ if (emit_diagnostic (flag_isoc99 ? DK_PERMERROR : DK_WARNING,
loc, OPT_Wreturn_mismatch,
"%<return%> with no value,"
" in function returning non-void"))
@@ -11195,7 +11255,7 @@ c_finish_return (location_t loc, tree retval, tree origtype)
current_function_returns_null = 1;
bool warned_here;
if (TREE_CODE (TREE_TYPE (retval)) != VOID_TYPE)
- warned_here = pedwarn
+ warned_here = permerror_opt
(xloc, OPT_Wreturn_mismatch,
"%<return%> with a value, in function returning void");
else
@@ -16203,6 +16263,7 @@ c_build_qualified_type (tree type, int type_quals, tree orig_qual_type,
t = build_variant_type_copy (type);
TREE_TYPE (t) = element_type;
+ TYPE_ADDR_SPACE (t) = TYPE_ADDR_SPACE (element_type);
if (TYPE_STRUCTURAL_EQUALITY_P (element_type)
|| (domain && TYPE_STRUCTURAL_EQUALITY_P (domain)))
diff --git a/gcc/c/gimple-parser.cc b/gcc/c/gimple-parser.cc
index f43c039..72bff1c 100644
--- a/gcc/c/gimple-parser.cc
+++ b/gcc/c/gimple-parser.cc
@@ -280,7 +280,13 @@ c_parser_parse_gimple_body (c_parser *cparser, char *gimple_pass,
for (tree var = BIND_EXPR_VARS (stmt); var; var = DECL_CHAIN (var))
if (VAR_P (var)
&& !DECL_EXTERNAL (var))
- add_local_decl (cfun, var);
+ {
+ add_local_decl (cfun, var);
+ /* When the middle-end re-gimplifies any expression we might
+ run into the assertion that we've seen the decl in a BIND. */
+ if (!TREE_STATIC (var))
+ DECL_SEEN_IN_BIND_EXPR_P (var) = 1;
+ }
/* We have a CFG. Build the edges. */
for (unsigned i = 0; i < parser.edges.length (); ++i)
{
diff --git a/gcc/calls.cc b/gcc/calls.cc
index 9edb583..4a1aea1 100644
--- a/gcc/calls.cc
+++ b/gcc/calls.cc
@@ -2502,7 +2502,8 @@ can_implement_as_sibling_call_p (tree exp,
tree addr,
const args_size &args_size)
{
- if (!targetm.have_sibcall_epilogue ())
+ if (!targetm.have_sibcall_epilogue ()
+ && !targetm.emit_epilogue_for_sibcall)
{
maybe_complain_about_tail_call
(exp,
@@ -3562,15 +3563,26 @@ expand_call (tree exp, rtx target, int ignore)
sibcall_failure = true;
}
+ /* Set up the next argument register. For sibling calls on machines
+ with register windows this should be the incoming register. */
+ if (pass == 0)
+ next_arg_reg = targetm.calls.function_incoming_arg
+ (args_so_far, function_arg_info::end_marker ());
+ else
+ next_arg_reg = targetm.calls.function_arg
+ (args_so_far, function_arg_info::end_marker ());
+
+ targetm.calls.start_call_args (args_so_far);
+
bool any_regs = false;
for (i = 0; i < num_actuals; i++)
if (args[i].reg != NULL_RTX)
{
any_regs = true;
- targetm.calls.call_args (args[i].reg, funtype);
+ targetm.calls.call_args (args_so_far, args[i].reg, funtype);
}
if (!any_regs)
- targetm.calls.call_args (pc_rtx, funtype);
+ targetm.calls.call_args (args_so_far, pc_rtx, funtype);
/* Figure out the register where the value, if any, will come back. */
valreg = 0;
@@ -3633,15 +3645,6 @@ expand_call (tree exp, rtx target, int ignore)
later safely search backwards to find the CALL_INSN. */
before_call = get_last_insn ();
- /* Set up next argument register. For sibling calls on machines
- with register windows this should be the incoming register. */
- if (pass == 0)
- next_arg_reg = targetm.calls.function_incoming_arg
- (args_so_far, function_arg_info::end_marker ());
- else
- next_arg_reg = targetm.calls.function_arg
- (args_so_far, function_arg_info::end_marker ());
-
if (pass == 1 && (return_flags & ERF_RETURNS_ARG))
{
int arg_nr = return_flags & ERF_RETURN_ARG_MASK;
@@ -3940,7 +3943,7 @@ expand_call (tree exp, rtx target, int ignore)
for (i = 0; i < num_actuals; ++i)
free (args[i].aligned_regs);
- targetm.calls.end_call_args ();
+ targetm.calls.end_call_args (args_so_far);
insns = get_insns ();
end_sequence ();
@@ -4498,17 +4501,9 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
}
#endif
- /* When expanding a normal call, args are stored in push order,
- which is the reverse of what we have here. */
- bool any_regs = false;
- for (int i = nargs; i-- > 0; )
- if (argvec[i].reg != NULL_RTX)
- {
- targetm.calls.call_args (argvec[i].reg, NULL_TREE);
- any_regs = true;
- }
- if (!any_regs)
- targetm.calls.call_args (pc_rtx, NULL_TREE);
+ rtx call_cookie
+ = targetm.calls.function_arg (args_so_far,
+ function_arg_info::end_marker ());
/* Push the args that need to be pushed. */
@@ -4626,6 +4621,20 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
fun = prepare_call_address (NULL, fun, NULL, &call_fusage, 0, 0);
+ targetm.calls.start_call_args (args_so_far);
+
+ /* When expanding a normal call, args are stored in push order,
+ which is the reverse of what we have here. */
+ bool any_regs = false;
+ for (int i = nargs; i-- > 0; )
+ if (argvec[i].reg != NULL_RTX)
+ {
+ targetm.calls.call_args (args_so_far, argvec[i].reg, NULL_TREE);
+ any_regs = true;
+ }
+ if (!any_regs)
+ targetm.calls.call_args (args_so_far, pc_rtx, NULL_TREE);
+
/* Now load any reg parms into their regs. */
/* ARGNUM indexes the ARGVEC array in the order in which the arguments
@@ -4732,10 +4741,7 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
get_identifier (XSTR (orgfun, 0)),
build_function_type (tfom, NULL_TREE),
original_args_size.constant, args_size.constant,
- struct_value_size,
- targetm.calls.function_arg (args_so_far,
- function_arg_info::end_marker ()),
- valreg,
+ struct_value_size, call_cookie, valreg,
old_inhibit_defer_pop + 1, call_fusage, flags, args_so_far);
if (flag_ipa_ra)
@@ -4755,7 +4761,7 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
valreg = gen_rtx_REG (TYPE_MODE (tfom), REGNO (valreg));
}
- targetm.calls.end_call_args ();
+ targetm.calls.end_call_args (args_so_far);
/* For calls to `setjmp', etc., inform function.cc:setjmp_warnings
that it should complain if nonvolatile values are live. For
diff --git a/gcc/cfgexpand.cc b/gcc/cfgexpand.cc
index 3fee256..b860be8 100644
--- a/gcc/cfgexpand.cc
+++ b/gcc/cfgexpand.cc
@@ -2874,6 +2874,7 @@ expand_asm_loc (tree string, int vol, location_t locus)
auto_vec<rtx> input_rvec, output_rvec;
auto_vec<machine_mode> input_mode;
auto_vec<const char *> constraints;
+ auto_vec<rtx> use_rvec;
auto_vec<rtx> clobber_rvec;
HARD_REG_SET clobbered_regs;
CLEAR_HARD_REG_SET (clobbered_regs);
@@ -2883,16 +2884,20 @@ expand_asm_loc (tree string, int vol, location_t locus)
if (targetm.md_asm_adjust)
targetm.md_asm_adjust (output_rvec, input_rvec, input_mode,
- constraints, clobber_rvec, clobbered_regs,
- locus);
+ constraints, use_rvec, clobber_rvec,
+ clobbered_regs, locus);
asm_op = body;
nclobbers = clobber_rvec.length ();
- body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (1 + nclobbers));
+ auto nuses = use_rvec.length ();
+ body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (1 + nuses + nclobbers));
- XVECEXP (body, 0, 0) = asm_op;
- for (i = 0; i < nclobbers; i++)
- XVECEXP (body, 0, i + 1) = gen_rtx_CLOBBER (VOIDmode, clobber_rvec[i]);
+ i = 0;
+ XVECEXP (body, 0, i++) = asm_op;
+ for (rtx use : use_rvec)
+ XVECEXP (body, 0, i++) = gen_rtx_USE (VOIDmode, use);
+ for (rtx clobber : clobber_rvec)
+ XVECEXP (body, 0, i++) = gen_rtx_CLOBBER (VOIDmode, clobber);
}
emit_insn (body);
@@ -3444,11 +3449,12 @@ expand_asm_stmt (gasm *stmt)
maintaining source-level compatibility means automatically clobbering
the flags register. */
rtx_insn *after_md_seq = NULL;
+ auto_vec<rtx> use_rvec;
if (targetm.md_asm_adjust)
after_md_seq
= targetm.md_asm_adjust (output_rvec, input_rvec, input_mode,
- constraints, clobber_rvec, clobbered_regs,
- locus);
+ constraints, use_rvec, clobber_rvec,
+ clobbered_regs, locus);
/* Do not allow the hook to change the output and input count,
lest it mess up the operand numbering. */
@@ -3456,7 +3462,8 @@ expand_asm_stmt (gasm *stmt)
gcc_assert (input_rvec.length() == ninputs);
gcc_assert (constraints.length() == noutputs + ninputs);
- /* But it certainly can adjust the clobbers. */
+ /* But it certainly can adjust the uses and clobbers. */
+ unsigned nuses = use_rvec.length ();
unsigned nclobbers = clobber_rvec.length ();
/* Third pass checks for easy conflicts. */
@@ -3528,7 +3535,7 @@ expand_asm_stmt (gasm *stmt)
ARGVEC CONSTRAINTS OPNAMES))
If there is more than one, put them inside a PARALLEL. */
- if (noutputs == 0 && nclobbers == 0)
+ if (noutputs == 0 && nuses == 0 && nclobbers == 0)
{
/* No output operands: put in a raw ASM_OPERANDS rtx. */
if (nlabels > 0)
@@ -3536,7 +3543,7 @@ expand_asm_stmt (gasm *stmt)
else
emit_insn (body);
}
- else if (noutputs == 1 && nclobbers == 0)
+ else if (noutputs == 1 && nuses == 0 && nclobbers == 0)
{
ASM_OPERANDS_OUTPUT_CONSTRAINT (body) = constraints[0];
if (nlabels > 0)
@@ -3552,7 +3559,8 @@ expand_asm_stmt (gasm *stmt)
if (num == 0)
num = 1;
- body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num + nclobbers));
+ body = gen_rtx_PARALLEL (VOIDmode,
+ rtvec_alloc (num + nuses + nclobbers));
/* For each output operand, store a SET. */
for (i = 0; i < noutputs; ++i)
@@ -3579,6 +3587,11 @@ expand_asm_stmt (gasm *stmt)
if (i == 0)
XVECEXP (body, 0, i++) = obody;
+ /* Add the uses specified by the target hook. No checking should
+ be needed since this doesn't come directly from user code. */
+ for (rtx use : use_rvec)
+ XVECEXP (body, 0, i++) = gen_rtx_USE (VOIDmode, use);
+
/* Store (clobber REG) for each clobbered register specified. */
for (unsigned j = 0; j < nclobbers; ++j)
{
diff --git a/gcc/cfgloopmanip.h b/gcc/cfgloopmanip.h
index 2dda504..45aed0d 100644
--- a/gcc/cfgloopmanip.h
+++ b/gcc/cfgloopmanip.h
@@ -47,6 +47,7 @@ extern class loop *create_empty_loop_on_edge (edge, tree, tree, tree, tree,
extern void unloop (class loop *, bool *, bitmap);
extern void unloop_loops (vec<class loop *> &loops_to_unloop,
vec<int> &loops_to_unloop_nunroll,
+ vec<edge> &edges_to_remove,
bitmap loop_closed_ssa_invalidated,
bool *irred_invalidated);
extern void copy_loop_info (class loop *loop, class loop *target);
diff --git a/gcc/cfgrtl.cc b/gcc/cfgrtl.cc
index abcb472..2a3f853 100644
--- a/gcc/cfgrtl.cc
+++ b/gcc/cfgrtl.cc
@@ -4385,7 +4385,7 @@ duplicate_insn_chain (rtx_insn *from, rtx_insn *to,
{
gcc_assert
(MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique);
- newc = ++cfun->last_clique;
+ newc = get_new_clique (cfun);
}
/* We cannot adjust MR_DEPENDENCE_CLIQUE in-place
since MEM_EXPR is shared so make a copy and
diff --git a/gcc/cgraph.h b/gcc/cgraph.h
index cfdd9f6..2b32055 100644
--- a/gcc/cgraph.h
+++ b/gcc/cgraph.h
@@ -153,7 +153,7 @@ public:
void remove (void);
/* Undo any definition or use of the symbol. */
- void reset (void);
+ void reset (bool preserve_comdat_group = false);
/* Dump symtab node to F. */
void dump (FILE *f);
diff --git a/gcc/cgraphunit.cc b/gcc/cgraphunit.cc
index bccd2f2..9a550a5 100644
--- a/gcc/cgraphunit.cc
+++ b/gcc/cgraphunit.cc
@@ -384,7 +384,7 @@ symbol_table::process_new_functions (void)
functions or variables. */
void
-symtab_node::reset (void)
+symtab_node::reset (bool preserve_comdat_group)
{
/* Reset our data structures so we can analyze the function again. */
analyzed = false;
@@ -395,7 +395,8 @@ symtab_node::reset (void)
cpp_implicit_alias = false;
remove_all_references ();
- remove_from_same_comdat_group ();
+ if (!preserve_comdat_group)
+ remove_from_same_comdat_group ();
if (cgraph_node *cn = dyn_cast <cgraph_node *> (this))
{
diff --git a/gcc/common.opt b/gcc/common.opt
index 736a465..f070aff 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -1955,6 +1955,40 @@ finline-atomics
Common Var(flag_inline_atomics) Init(1) Optimization
Inline __atomic operations when a lock free instruction sequence is available.
+finline-stringops
+Common RejectNegative Enum(ilsop_fn) Var(flag_inline_stringops, ILSOP_ALL) Enum(ilsop_fn) Init(ILSOP_NONE) Optimization Undocumented
+
+fno-inline-stringops
+Common RejectNegative Enum(ilsop_fn) Var(flag_inline_stringops, ILSOP_NONE) Enum(ilsop_fn) Optimization Undocumented
+
+finline-stringops=
+Common Joined Var(flag_inline_stringops) EnumSet Enum(ilsop_fn) Optimization
+-finline-stringops[=memcmp|memcpy|memmove|memset]
+Expand supported mem/str operations inline, even if against optimization.
+
+Enum
+Name(ilsop_fn) Type(enum ilsop_fn) UnknownError(unavailable stringop for inlining %qs)
+
+; This is not part of any set.
+; EnumValue
+; Enum(ilsop_fn) String(none) Value(ILSOP_NONE)
+
+EnumValue
+Enum(ilsop_fn) String(memcmp) Value(ILSOP_MEMCMP) Set(1)
+
+EnumValue
+Enum(ilsop_fn) String(memcpy) Value(ILSOP_MEMCPY) Set(2)
+
+EnumValue
+Enum(ilsop_fn) String(memmove) Value(ILSOP_MEMMOVE) Set(3)
+
+EnumValue
+Enum(ilsop_fn) String(memset) Value(ILSOP_MEMSET) Set(4)
+
+; This is not part of any set either.
+; EnumValue
+; Enum(ilsop_fn) String(all) Value(ILSOP_ALL)
+
fcf-protection
Common RejectNegative Alias(fcf-protection=,full)
@@ -2883,6 +2917,35 @@ fstrict-overflow
Common
Treat signed overflow as undefined. Negated as -fwrapv -fwrapv-pointer.
+fstrub=disable
+Common RejectNegative Var(flag_strub, 0)
+Disable stack scrub entirely, disregarding strub attributes.
+
+fstrub=strict
+Common RejectNegative Var(flag_strub, -4)
+Enable stack scrub as per attributes, with strict call checking.
+
+; If any strub-enabling attribute is seen when the default or strict
+; initializer values are in effect, flag_strub is bumped up by 2. The
+; scrub mode gate function will then bump these initializer values to
+; 0 if no strub-enabling attribute is seen. This minimizes the strub
+; overhead.
+fstrub=relaxed
+Common RejectNegative Var(flag_strub, -3) Init(-3)
+Restore default strub mode: as per attributes, with relaxed checking.
+
+fstrub=all
+Common RejectNegative Var(flag_strub, 3)
+Enable stack scrubbing for all viable functions.
+
+fstrub=at-calls
+Common RejectNegative Var(flag_strub, 1)
+Enable at-calls stack scrubbing for all viable functions.
+
+fstrub=internal
+Common RejectNegative Var(flag_strub, 2)
+Enable internal stack scrubbing for all viable functions.
+
fsync-libcalls
Common Var(flag_sync_libcalls) Init(1)
Implement __atomic operations via libcalls to legacy __sync functions.
diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
index f90fb4d..a1eb285 100644
--- a/gcc/common/config/i386/cpuinfo.h
+++ b/gcc/common/config/i386/cpuinfo.h
@@ -861,8 +861,6 @@ get_available_features (struct __processor_model *cpu_model,
set_feature (FEATURE_IBT);
if (edx & bit_UINTR)
set_feature (FEATURE_UINTR);
- if (edx & bit_USER_MSR)
- set_feature (FEATURE_USER_MSR);
if (amx_usable)
{
if (edx & bit_AMX_TILE)
@@ -921,6 +919,8 @@ get_available_features (struct __processor_model *cpu_model,
set_feature (FEATURE_PREFETCHI);
if (eax & bit_RAOINT)
set_feature (FEATURE_RAOINT);
+ if (edx & bit_USER_MSR)
+ set_feature (FEATURE_USER_MSR);
if (avx_usable)
{
if (eax & bit_AVXVNNI)
diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc
index ded85b4..4d5a2f8 100644
--- a/gcc/common/config/riscv/riscv-common.cc
+++ b/gcc/common/config/riscv/riscv-common.cc
@@ -38,11 +38,36 @@ along with GCC; see the file COPYING3. If not see
#define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_ENDIAN)
#endif
+typedef bool (*riscv_implied_predicator_t) (const riscv_subset_list *);
+
/* Type for implied ISA info. */
struct riscv_implied_info_t
{
+ constexpr riscv_implied_info_t (const char *ext, const char *implied_ext,
+ riscv_implied_predicator_t predicator
+ = nullptr)
+ : ext (ext), implied_ext (implied_ext), predicator (predicator){};
+
+ bool match (const riscv_subset_list *subset_list, const char *ext_name) const
+ {
+ if (strcmp (ext_name, ext) != 0)
+ return false;
+
+ if (predicator && !predicator (subset_list))
+ return false;
+
+ return true;
+ }
+
+ bool match (const riscv_subset_list *subset_list,
+ const riscv_subset_t *subset) const
+ {
+ return match (subset_list, subset->name.c_str());
+ }
+
const char *ext;
const char *implied_ext;
+ riscv_implied_predicator_t predicator;
};
/* Implied ISA info, must end with NULL sentinel. */
@@ -106,7 +131,7 @@ static const riscv_implied_info_t riscv_implied_info[] =
{"zvkn", "zvkned"},
{"zvkn", "zvknhb"},
- {"zvkn", "zvbb"},
+ {"zvkn", "zvkb"},
{"zvkn", "zvkt"},
{"zvknc", "zvkn"},
{"zvknc", "zvbc"},
@@ -114,7 +139,7 @@ static const riscv_implied_info_t riscv_implied_info[] =
{"zvkng", "zvkg"},
{"zvks", "zvksed"},
{"zvks", "zvksh"},
- {"zvks", "zvbb"},
+ {"zvks", "zvkb"},
{"zvks", "zvkt"},
{"zvksc", "zvks"},
{"zvksc", "zvbc"},
@@ -143,6 +168,11 @@ static const riscv_implied_info_t riscv_implied_info[] =
{"zcmp", "zca"},
{"zcmt", "zca"},
{"zcmt", "zicsr"},
+ {"zcf", "f",
+ [] (const riscv_subset_list *subset_list) -> bool
+ {
+ return subset_list->xlen () == 32 && subset_list->lookup ("f");
+ }},
{"smaia", "ssaia"},
{"smstateen", "ssstateen"},
@@ -253,6 +283,7 @@ static const struct riscv_ext_version riscv_ext_version_table[] =
{"zvbb", ISA_SPEC_CLASS_NONE, 1, 0},
{"zvbc", ISA_SPEC_CLASS_NONE, 1, 0},
+ {"zvkb", ISA_SPEC_CLASS_NONE, 1, 0},
{"zvkg", ISA_SPEC_CLASS_NONE, 1, 0},
{"zvkned", ISA_SPEC_CLASS_NONE, 1, 0},
{"zvknha", ISA_SPEC_CLASS_NONE, 1, 0},
@@ -1093,7 +1124,7 @@ riscv_subset_list::handle_implied_ext (const char *ext)
implied_info->ext;
++implied_info)
{
- if (strcmp (ext, implied_info->ext) != 0)
+ if (!implied_info->match (this, ext))
continue;
/* Skip if implied extension already present. */
@@ -1131,7 +1162,7 @@ riscv_subset_list::check_implied_ext ()
for (implied_info = &riscv_implied_info[0]; implied_info->ext;
++implied_info)
{
- if (strcmp (itr->name.c_str(), implied_info->ext) != 0)
+ if (!implied_info->match (this, itr))
continue;
if (!lookup (implied_info->implied_ext))
@@ -1160,8 +1191,7 @@ riscv_subset_list::handle_combine_ext ()
for (implied_info = &riscv_implied_info[0]; implied_info->ext;
++implied_info)
{
- /* Skip if implied extension don't match combine extension */
- if (strcmp (combine_info->name, implied_info->ext) != 0)
+ if (!implied_info->match (this, combine_info->name))
continue;
if (lookup (implied_info->implied_ext))
@@ -1185,6 +1215,32 @@ riscv_subset_list::handle_combine_ext ()
}
}
+void
+riscv_subset_list::check_conflict_ext ()
+{
+ if (lookup ("zcf") && m_xlen == 64)
+ error_at (m_loc, "%<-march=%s%>: zcf extension supports in rv32 only",
+ m_arch);
+
+ if (lookup ("zfinx") && lookup ("f"))
+ error_at (m_loc,
+ "%<-march=%s%>: z*inx conflicts with floating-point "
+ "extensions",
+ m_arch);
+
+ /* 'H' hypervisor extension requires base ISA with 32 registers. */
+ if (lookup ("e") && lookup ("h"))
+ error_at (m_loc, "%<-march=%s%>: h extension requires i extension", m_arch);
+
+ if (lookup ("zcd"))
+ {
+ if (lookup ("zcmt"))
+ error_at (m_loc, "%<-march=%s%>: zcd conflicts with zcmt", m_arch);
+ if (lookup ("zcmp"))
+ error_at (m_loc, "%<-march=%s%>: zcd conflicts with zcmp", m_arch);
+ }
+}
+
/* Parsing function for multi-letter extensions.
Return Value:
@@ -1484,29 +1540,11 @@ riscv_subset_list::parse (const char *arch, location_t loc)
subset_list->handle_implied_ext (itr->name.c_str ());
}
- /* Zce only implies zcf when RV32 and 'f' extension exist. */
- if (subset_list->lookup ("zce") != NULL
- && subset_list->m_xlen == 32
- && subset_list->lookup ("f") != NULL
- && subset_list->lookup ("zcf") == NULL)
- subset_list->add ("zcf", false);
-
/* Make sure all implied extensions are included. */
gcc_assert (subset_list->check_implied_ext ());
subset_list->handle_combine_ext ();
-
- if (subset_list->lookup ("zcf") && subset_list->m_xlen == 64)
- error_at (loc, "%<-march=%s%>: zcf extension supports in rv32 only"
- , arch);
-
- if (subset_list->lookup ("zfinx") && subset_list->lookup ("f"))
- error_at (loc, "%<-march=%s%>: z*inx conflicts with floating-point "
- "extensions", arch);
-
- /* 'H' hypervisor extension requires base ISA with 32 registers. */
- if (subset_list->lookup ("e") && subset_list->lookup ("h"))
- error_at (loc, "%<-march=%s%>: h extension requires i extension", arch);
+ subset_list->check_conflict_ext ();
return subset_list;
@@ -1624,6 +1662,7 @@ static const riscv_ext_flag_table_t riscv_ext_flag_table[] =
{"zvbb", &gcc_options::x_riscv_zvb_subext, MASK_ZVBB},
{"zvbc", &gcc_options::x_riscv_zvb_subext, MASK_ZVBC},
+ {"zvkb", &gcc_options::x_riscv_zvb_subext, MASK_ZVKB},
{"zvkg", &gcc_options::x_riscv_zvk_subext, MASK_ZVKG},
{"zvkned", &gcc_options::x_riscv_zvk_subext, MASK_ZVKNED},
{"zvknha", &gcc_options::x_riscv_zvk_subext, MASK_ZVKNHA},
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 3000379..6450448 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -345,11 +345,11 @@ m32c*-*-*)
;;
aarch64*-*-*)
cpu_type=aarch64
- extra_headers="arm_fp16.h arm_neon.h arm_bf16.h arm_acle.h arm_sve.h"
+ extra_headers="arm_fp16.h arm_neon.h arm_bf16.h arm_acle.h arm_sve.h arm_sme.h"
c_target_objs="aarch64-c.o"
cxx_target_objs="aarch64-c.o"
d_target_objs="aarch64-d.o"
- extra_objs="aarch64-builtins.o aarch-common.o aarch64-sve-builtins.o aarch64-sve-builtins-shapes.o aarch64-sve-builtins-base.o aarch64-sve-builtins-sve2.o cortex-a57-fma-steering.o aarch64-speculation.o falkor-tag-collision-avoidance.o aarch-bti-insert.o aarch64-cc-fusion.o"
+ extra_objs="aarch64-builtins.o aarch-common.o aarch64-sve-builtins.o aarch64-sve-builtins-shapes.o aarch64-sve-builtins-base.o aarch64-sve-builtins-sve2.o aarch64-sve-builtins-sme.o cortex-a57-fma-steering.o aarch64-speculation.o falkor-tag-collision-avoidance.o aarch-bti-insert.o aarch64-cc-fusion.o"
target_gtfiles="\$(srcdir)/config/aarch64/aarch64-builtins.cc \$(srcdir)/config/aarch64/aarch64-sve-builtins.h \$(srcdir)/config/aarch64/aarch64-sve-builtins.cc"
target_has_targetm_common=yes
;;
@@ -1770,7 +1770,6 @@ amdgcn-*-amdhsa)
native_system_header_dir=/include
extra_modes=gcn/gcn-modes.def
extra_objs="${extra_objs} gcn-tree.o"
- extra_gcc_objs="driver-gcn.o"
case "$host" in
x86_64*-*-linux-gnu )
if test "$ac_cv_search_dlopen" != no; then
@@ -5973,6 +5972,9 @@ case ${target} in
visium-*-*)
target_cpu_default2="TARGET_CPU_$with_cpu"
;;
+ x86_64-*-gnu*)
+ tmake_file="$tmake_file i386/t-gnu64"
+ ;;
esac
t=
diff --git a/gcc/config/aarch64/aarch64-builtins.cc b/gcc/config/aarch64/aarch64-builtins.cc
index 04f59fd..f780b10 100644
--- a/gcc/config/aarch64/aarch64-builtins.cc
+++ b/gcc/config/aarch64/aarch64-builtins.cc
@@ -47,6 +47,7 @@
#include "stringpool.h"
#include "attribs.h"
#include "gimple-fold.h"
+#include "builtins.h"
#define v8qi_UP E_V8QImode
#define v8di_UP E_V8DImode
@@ -808,6 +809,17 @@ enum aarch64_builtins
AARCH64_RBIT,
AARCH64_RBITL,
AARCH64_RBITLL,
+ /* System register builtins. */
+ AARCH64_RSR,
+ AARCH64_RSRP,
+ AARCH64_RSR64,
+ AARCH64_RSRF,
+ AARCH64_RSRF64,
+ AARCH64_WSR,
+ AARCH64_WSRP,
+ AARCH64_WSR64,
+ AARCH64_WSRF,
+ AARCH64_WSRF64,
AARCH64_BUILTIN_MAX
};
@@ -1798,6 +1810,65 @@ aarch64_init_rng_builtins (void)
AARCH64_BUILTIN_RNG_RNDRRS);
}
+/* Add builtins for reading system register. */
+static void
+aarch64_init_rwsr_builtins (void)
+{
+ tree fntype = NULL;
+ tree const_char_ptr_type
+ = build_pointer_type (build_type_variant (char_type_node, true, false));
+
+#define AARCH64_INIT_RWSR_BUILTINS_DECL(F, N, T) \
+ aarch64_builtin_decls[AARCH64_##F] \
+ = aarch64_general_add_builtin ("__builtin_aarch64_"#N, T, AARCH64_##F);
+
+ fntype
+ = build_function_type_list (uint32_type_node, const_char_ptr_type, NULL);
+ AARCH64_INIT_RWSR_BUILTINS_DECL (RSR, rsr, fntype);
+
+ fntype
+ = build_function_type_list (ptr_type_node, const_char_ptr_type, NULL);
+ AARCH64_INIT_RWSR_BUILTINS_DECL (RSRP, rsrp, fntype);
+
+ fntype
+ = build_function_type_list (uint64_type_node, const_char_ptr_type, NULL);
+ AARCH64_INIT_RWSR_BUILTINS_DECL (RSR64, rsr64, fntype);
+
+ fntype
+ = build_function_type_list (float_type_node, const_char_ptr_type, NULL);
+ AARCH64_INIT_RWSR_BUILTINS_DECL (RSRF, rsrf, fntype);
+
+ fntype
+ = build_function_type_list (double_type_node, const_char_ptr_type, NULL);
+ AARCH64_INIT_RWSR_BUILTINS_DECL (RSRF64, rsrf64, fntype);
+
+ fntype
+ = build_function_type_list (void_type_node, const_char_ptr_type,
+ uint32_type_node, NULL);
+
+ AARCH64_INIT_RWSR_BUILTINS_DECL (WSR, wsr, fntype);
+
+ fntype
+ = build_function_type_list (void_type_node, const_char_ptr_type,
+ const_ptr_type_node, NULL);
+ AARCH64_INIT_RWSR_BUILTINS_DECL (WSRP, wsrp, fntype);
+
+ fntype
+ = build_function_type_list (void_type_node, const_char_ptr_type,
+ uint64_type_node, NULL);
+ AARCH64_INIT_RWSR_BUILTINS_DECL (WSR64, wsr64, fntype);
+
+ fntype
+ = build_function_type_list (void_type_node, const_char_ptr_type,
+ float_type_node, NULL);
+ AARCH64_INIT_RWSR_BUILTINS_DECL (WSRF, wsrf, fntype);
+
+ fntype
+ = build_function_type_list (void_type_node, const_char_ptr_type,
+ double_type_node, NULL);
+ AARCH64_INIT_RWSR_BUILTINS_DECL (WSRF64, wsrf64, fntype);
+}
+
/* Initialize the memory tagging extension (MTE) builtins. */
struct
{
@@ -2019,6 +2090,8 @@ aarch64_general_init_builtins (void)
aarch64_init_rng_builtins ();
aarch64_init_data_intrinsics ();
+ aarch64_init_rwsr_builtins ();
+
tree ftype_jcvt
= build_function_type_list (intSI_type_node, double_type_node, NULL);
aarch64_builtin_decls[AARCH64_JSCVT]
@@ -2054,6 +2127,37 @@ aarch64_general_builtin_decl (unsigned code, bool)
return aarch64_builtin_decls[code];
}
+bool
+aarch64_general_check_builtin_call (location_t location, vec<location_t>,
+ unsigned int code, tree fndecl,
+ unsigned int nargs ATTRIBUTE_UNUSED, tree *args)
+{
+ switch (code)
+ {
+ case AARCH64_RSR:
+ case AARCH64_RSRP:
+ case AARCH64_RSR64:
+ case AARCH64_RSRF:
+ case AARCH64_RSRF64:
+ case AARCH64_WSR:
+ case AARCH64_WSRP:
+ case AARCH64_WSR64:
+ case AARCH64_WSRF:
+ case AARCH64_WSRF64:
+ tree addr = STRIP_NOPS (args[0]);
+ if (TREE_CODE (TREE_TYPE (addr)) != POINTER_TYPE
+ || TREE_CODE (addr) != ADDR_EXPR
+ || TREE_CODE (TREE_OPERAND (addr, 0)) != STRING_CST)
+ {
+ error_at (location, "first argument to %qD must be a string literal",
+ fndecl);
+ return false;
+ }
+ }
+ /* Default behavior. */
+ return true;
+}
+
typedef enum
{
SIMD_ARG_COPY_TO_REG,
@@ -2599,6 +2703,109 @@ aarch64_expand_rng_builtin (tree exp, rtx target, int fcode, int ignore)
return target;
}
+/* Expand the read/write system register builtin EXPs. */
+rtx
+aarch64_expand_rwsr_builtin (tree exp, rtx target, int fcode)
+{
+ tree arg0, arg1;
+ rtx const_str, input_val, subreg;
+ enum machine_mode mode;
+ class expand_operand ops[2];
+
+ arg0 = CALL_EXPR_ARG (exp, 0);
+
+ bool write_op = (fcode == AARCH64_WSR
+ || fcode == AARCH64_WSRP
+ || fcode == AARCH64_WSR64
+ || fcode == AARCH64_WSRF
+ || fcode == AARCH64_WSRF64);
+
+ /* Argument 0 (system register name) must be a string literal. */
+ gcc_assert (TREE_CODE (arg0) == ADDR_EXPR
+ && TREE_CODE (TREE_TYPE (arg0)) == POINTER_TYPE
+ && TREE_CODE (TREE_OPERAND (arg0, 0)) == STRING_CST);
+
+ const char *name_input = TREE_STRING_POINTER (TREE_OPERAND (arg0, 0));
+
+ tree len_tree = c_strlen (arg0, 1);
+ if (len_tree == NULL_TREE)
+ {
+ error_at (EXPR_LOCATION (exp), "invalid system register name provided");
+ return const0_rtx;
+ }
+
+ size_t len = TREE_INT_CST_LOW (len_tree);
+ char *sysreg_name = xstrdup (name_input);
+
+ for (unsigned pos = 0; pos <= len; pos++)
+ sysreg_name[pos] = TOLOWER (sysreg_name[pos]);
+
+ const char *name_output = aarch64_retrieve_sysreg (sysreg_name, write_op);
+ if (name_output == NULL)
+ {
+ error_at (EXPR_LOCATION (exp), "invalid system register name %qs",
+ sysreg_name);
+ return const0_rtx;
+ }
+
+ /* Assign the string corresponding to the system register name to an RTX. */
+ const_str = rtx_alloc (CONST_STRING);
+ PUT_CODE (const_str, CONST_STRING);
+ XSTR (const_str, 0) = ggc_strdup (name_output);
+
+ /* Set up expander operands and call instruction expansion. */
+ if (write_op)
+ {
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ mode = TYPE_MODE (TREE_TYPE (arg1));
+ input_val = copy_to_mode_reg (mode, expand_normal (arg1));
+
+ switch (fcode)
+ {
+ case AARCH64_WSR:
+ case AARCH64_WSRP:
+ case AARCH64_WSR64:
+ case AARCH64_WSRF64:
+ subreg = lowpart_subreg (DImode, input_val, mode);
+ break;
+ case AARCH64_WSRF:
+ subreg = gen_lowpart_SUBREG (SImode, input_val);
+ subreg = gen_lowpart_SUBREG (DImode, subreg);
+ break;
+ }
+
+ create_fixed_operand (&ops[0], const_str);
+ create_input_operand (&ops[1], subreg, DImode);
+ expand_insn (CODE_FOR_aarch64_write_sysregdi, 2, ops);
+
+ return target;
+ }
+
+ /* Read operations are implied by !write_op. */
+ gcc_assert (call_expr_nargs (exp) == 1);
+
+ /* Emit the initial read_sysregdi rtx. */
+ create_output_operand (&ops[0], target, DImode);
+ create_fixed_operand (&ops[1], const_str);
+ expand_insn (CODE_FOR_aarch64_read_sysregdi, 2, ops);
+ target = ops[0].value;
+
+ /* Do any necessary post-processing on the result. */
+ switch (fcode)
+ {
+ case AARCH64_RSR:
+ case AARCH64_RSRP:
+ case AARCH64_RSR64:
+ case AARCH64_RSRF64:
+ return lowpart_subreg (TYPE_MODE (TREE_TYPE (exp)), target, DImode);
+ case AARCH64_RSRF:
+ subreg = gen_lowpart_SUBREG (SImode, target);
+ return gen_lowpart_SUBREG (SFmode, subreg);
+ default:
+ gcc_unreachable ();
+ }
+}
+
/* Expand an expression EXP that calls a MEMTAG built-in FCODE
with result going to TARGET. */
static rtx
@@ -2832,6 +3039,17 @@ aarch64_general_expand_builtin (unsigned int fcode, tree exp, rtx target,
case AARCH64_BUILTIN_RNG_RNDR:
case AARCH64_BUILTIN_RNG_RNDRRS:
return aarch64_expand_rng_builtin (exp, target, fcode, ignore);
+ case AARCH64_RSR:
+ case AARCH64_RSRP:
+ case AARCH64_RSR64:
+ case AARCH64_RSRF:
+ case AARCH64_RSRF64:
+ case AARCH64_WSR:
+ case AARCH64_WSRP:
+ case AARCH64_WSR64:
+ case AARCH64_WSRF:
+ case AARCH64_WSRF64:
+ return aarch64_expand_rwsr_builtin (exp, target, fcode);
}
if (fcode >= AARCH64_SIMD_BUILTIN_BASE && fcode <= AARCH64_SIMD_BUILTIN_MAX)
diff --git a/gcc/config/aarch64/aarch64-c.cc b/gcc/config/aarch64/aarch64-c.cc
index ab8844f..18422bb 100644
--- a/gcc/config/aarch64/aarch64-c.cc
+++ b/gcc/config/aarch64/aarch64-c.cc
@@ -72,6 +72,54 @@ aarch64_define_unconditional_macros (cpp_reader *pfile)
builtin_define_with_int_value ("__ARM_SIZEOF_WCHAR_T", WCHAR_TYPE_SIZE / 8);
builtin_define ("__GCC_ASM_FLAG_OUTPUTS__");
+
+ builtin_define ("__ARM_STATE_ZA");
+ builtin_define ("__ARM_STATE_ZT0");
+
+ /* Define keyword attributes like __arm_streaming as macros that expand
+ to the associated [[...]] attribute. Use __extension__ in the attribute
+ for C, since the [[...]] syntax was only added in C23. */
+#define DEFINE_ARM_KEYWORD_MACRO(NAME) \
+ builtin_define_with_value ("__arm_" NAME, \
+ lang_GNU_CXX () \
+ ? "[[arm::" NAME "]]" \
+ : "[[__extension__ arm::" NAME "]]", 0);
+
+ DEFINE_ARM_KEYWORD_MACRO ("streaming");
+ DEFINE_ARM_KEYWORD_MACRO ("streaming_compatible");
+ DEFINE_ARM_KEYWORD_MACRO ("locally_streaming");
+
+#undef DEFINE_ARM_KEYWORD_MACRO
+
+ /* Same for the keyword attributes that take arguments. The snag here
+ is that some old modes warn about or reject variadic arguments. */
+ auto *cpp_opts = cpp_get_options (parse_in);
+ if (!cpp_opts->traditional)
+ {
+ auto old_warn_variadic_macros = cpp_opts->warn_variadic_macros;
+ auto old_cpp_warn_c90_c99_compat = cpp_opts->cpp_warn_c90_c99_compat;
+
+ cpp_opts->warn_variadic_macros = false;
+ cpp_opts->cpp_warn_c90_c99_compat = 0;
+
+#define DEFINE_ARM_KEYWORD_MACRO_ARGS(NAME) \
+ builtin_define_with_value ("__arm_" NAME "(...)", \
+ lang_GNU_CXX () \
+ ? "[[arm::" NAME "(__VA_ARGS__)]]" \
+ : "[[__extension__ arm::" NAME \
+ "(__VA_ARGS__)]]", 0);
+
+ DEFINE_ARM_KEYWORD_MACRO_ARGS ("new");
+ DEFINE_ARM_KEYWORD_MACRO_ARGS ("preserves");
+ DEFINE_ARM_KEYWORD_MACRO_ARGS ("in");
+ DEFINE_ARM_KEYWORD_MACRO_ARGS ("out");
+ DEFINE_ARM_KEYWORD_MACRO_ARGS ("inout");
+
+#undef DEFINE_ARM_KEYWORD_MACRO_ARGS
+
+ cpp_opts->warn_variadic_macros = old_warn_variadic_macros;
+ cpp_opts->cpp_warn_c90_c99_compat = old_cpp_warn_c90_c99_compat;
+ }
}
/* Undefine/redefine macros that depend on the current backend state and may
@@ -207,6 +255,11 @@ aarch64_update_cpp_builtins (cpp_reader *pfile)
"__ARM_FEATURE_LS64", pfile);
aarch64_def_or_undef (AARCH64_ISA_RCPC, "__ARM_FEATURE_RCPC", pfile);
+ aarch64_def_or_undef (TARGET_SME, "__ARM_FEATURE_SME", pfile);
+ aarch64_def_or_undef (TARGET_SME_I16I64, "__ARM_FEATURE_SME_I16I64", pfile);
+ aarch64_def_or_undef (TARGET_SME_F64F64, "__ARM_FEATURE_SME_F64F64", pfile);
+ aarch64_def_or_undef (TARGET_SME2, "__ARM_FEATURE_SME2", pfile);
+
/* Not for ACLE, but required to keep "float.h" correct if we switch
target between implementations that do or do not support ARMv8.2-A
16-bit floating-point extensions. */
@@ -291,6 +344,8 @@ aarch64_pragma_aarch64 (cpp_reader *)
const char *name = TREE_STRING_POINTER (x);
if (strcmp (name, "arm_sve.h") == 0)
aarch64_sve::handle_arm_sve_h ();
+ else if (strcmp (name, "arm_sme.h") == 0)
+ aarch64_sve::handle_arm_sme_h ();
else if (strcmp (name, "arm_neon.h") == 0)
handle_arm_neon_h ();
else if (strcmp (name, "arm_acle.h") == 0)
@@ -339,8 +394,8 @@ aarch64_check_builtin_call (location_t loc, vec<location_t> arg_loc,
switch (code & AARCH64_BUILTIN_CLASS)
{
case AARCH64_BUILTIN_GENERAL:
- return true;
-
+ return aarch64_general_check_builtin_call (loc, arg_loc, subcode,
+ orig_fndecl, nargs, args);
case AARCH64_BUILTIN_SVE:
return aarch64_sve::check_builtin_call (loc, arg_loc, subcode,
orig_fndecl, nargs, args);
diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
index 16752b7..ad896a8 100644
--- a/gcc/config/aarch64/aarch64-cores.def
+++ b/gcc/config/aarch64/aarch64-cores.def
@@ -74,6 +74,7 @@ AARCH64_CORE("thunderxt83", thunderxt83, thunderx, V8A, (CRC, CRYPTO), thu
/* Ampere Computing ('\xC0') cores. */
AARCH64_CORE("ampere1", ampere1, cortexa57, V8_6A, (F16, RNG, AES, SHA3), ampere1, 0xC0, 0xac3, -1)
AARCH64_CORE("ampere1a", ampere1a, cortexa57, V8_6A, (F16, RNG, AES, SHA3, SM4, MEMTAG), ampere1a, 0xC0, 0xac4, -1)
+AARCH64_CORE("ampere1b", ampere1b, cortexa57, V8_7A, (F16, RNG, AES, SHA3, SM4, MEMTAG, CSSC), ampere1b, 0xC0, 0xac5, -1)
/* Do not swap around "emag" and "xgene1",
this order is required to handle variant correctly. */
AARCH64_CORE("emag", emag, xgene1, V8A, (CRC, CRYPTO), emag, 0x50, 0x000, 3)
diff --git a/gcc/config/aarch64/aarch64-cost-tables.h b/gcc/config/aarch64/aarch64-cost-tables.h
index 0cb638f..4c8da7f 100644
--- a/gcc/config/aarch64/aarch64-cost-tables.h
+++ b/gcc/config/aarch64/aarch64-cost-tables.h
@@ -882,4 +882,111 @@ const struct cpu_cost_table ampere1a_extra_costs =
}
};
+const struct cpu_cost_table ampere1b_extra_costs =
+{
+ /* ALU */
+ {
+ 0, /* arith. */
+ 0, /* logical. */
+ 0, /* shift. */
+ COSTS_N_INSNS (1), /* shift_reg. */
+ 0, /* arith_shift. */
+ COSTS_N_INSNS (1), /* arith_shift_reg. */
+ 0, /* log_shift. */
+ COSTS_N_INSNS (1), /* log_shift_reg. */
+ 0, /* extend. */
+ COSTS_N_INSNS (1), /* extend_arith. */
+ 0, /* bfi. */
+ 0, /* bfx. */
+ 0, /* clz. */
+ 0, /* rev. */
+ 0, /* non_exec. */
+ true /* non_exec_costs_exec. */
+ },
+ {
+ /* MULT SImode */
+ {
+ COSTS_N_INSNS (2), /* simple. */
+ COSTS_N_INSNS (2), /* flag_setting. */
+ COSTS_N_INSNS (2), /* extend. */
+ COSTS_N_INSNS (3), /* add. */
+ COSTS_N_INSNS (3), /* extend_add. */
+ COSTS_N_INSNS (12) /* idiv. */
+ },
+ /* MULT DImode */
+ {
+ COSTS_N_INSNS (2), /* simple. */
+ 0, /* flag_setting (N/A). */
+ COSTS_N_INSNS (2), /* extend. */
+ COSTS_N_INSNS (3), /* add. */
+ COSTS_N_INSNS (3), /* extend_add. */
+ COSTS_N_INSNS (18) /* idiv. */
+ }
+ },
+ /* LD/ST */
+ {
+ COSTS_N_INSNS (2), /* load. */
+ COSTS_N_INSNS (2), /* load_sign_extend. */
+ 0, /* ldrd (n/a). */
+ 0, /* ldm_1st. */
+ 0, /* ldm_regs_per_insn_1st. */
+ 0, /* ldm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (3), /* loadf. */
+ COSTS_N_INSNS (3), /* loadd. */
+ COSTS_N_INSNS (3), /* load_unaligned. */
+ 0, /* store. */
+ 0, /* strd. */
+ 0, /* stm_1st. */
+ 0, /* stm_regs_per_insn_1st. */
+ 0, /* stm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (1), /* storef. */
+ COSTS_N_INSNS (1), /* stored. */
+ COSTS_N_INSNS (1), /* store_unaligned. */
+ COSTS_N_INSNS (3), /* loadv. */
+ COSTS_N_INSNS (3) /* storev. */
+ },
+ {
+ /* FP SFmode */
+ {
+ COSTS_N_INSNS (18), /* div. */
+ COSTS_N_INSNS (3), /* mult. */
+ COSTS_N_INSNS (3), /* mult_addsub. */
+ COSTS_N_INSNS (3), /* fma. */
+ COSTS_N_INSNS (2), /* addsub. */
+ COSTS_N_INSNS (1), /* fpconst. */
+ COSTS_N_INSNS (2), /* neg. */
+ COSTS_N_INSNS (2), /* compare. */
+ COSTS_N_INSNS (2), /* widen. */
+ COSTS_N_INSNS (2), /* narrow. */
+ COSTS_N_INSNS (6), /* toint. */
+ COSTS_N_INSNS (4), /* fromint. */
+ COSTS_N_INSNS (2) /* roundint. */
+ },
+ /* FP DFmode */
+ {
+ COSTS_N_INSNS (18), /* div. */
+ COSTS_N_INSNS (3), /* mult. */
+ COSTS_N_INSNS (3), /* mult_addsub. */
+ COSTS_N_INSNS (3), /* fma. */
+ COSTS_N_INSNS (2), /* addsub. */
+ COSTS_N_INSNS (1), /* fpconst. */
+ COSTS_N_INSNS (2), /* neg. */
+ COSTS_N_INSNS (2), /* compare. */
+ COSTS_N_INSNS (2), /* widen. */
+ COSTS_N_INSNS (2), /* narrow. */
+ COSTS_N_INSNS (6), /* toint. */
+ COSTS_N_INSNS (4), /* fromint. */
+ COSTS_N_INSNS (2) /* roundint. */
+ }
+ },
+ /* Vector */
+ {
+ COSTS_N_INSNS (1), /* alu. */
+ COSTS_N_INSNS (2), /* mult. */
+ COSTS_N_INSNS (1), /* movi. */
+ COSTS_N_INSNS (1), /* dup. */
+ COSTS_N_INSNS (1) /* extract. */
+ }
+};
+
#endif
diff --git a/gcc/config/aarch64/aarch64-isa-modes.def b/gcc/config/aarch64/aarch64-isa-modes.def
new file mode 100644
index 0000000..c0ada35
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-isa-modes.def
@@ -0,0 +1,40 @@
+/* Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* This file defines a set of "ISA modes"; in other words, it defines
+ various bits of runtime state that control the set of available
+ instructions or that affect the semantics of instructions in some way.
+
+ Before using #include to read this file, define a macro:
+
+ DEF_AARCH64_ISA_MODE(NAME)
+
+ where NAME is the name of the mode. */
+
+/* Indicates that PSTATE.SM is known to be 1 or 0 respectively. These
+ modes are mutually exclusive. If neither mode is active then the state
+ of PSTATE.SM is not known at compile time. */
+DEF_AARCH64_ISA_MODE(SM_ON)
+DEF_AARCH64_ISA_MODE(SM_OFF)
+
+/* Indicates that PSTATE.ZA is known to be 1. The converse is that
+ PSTATE.ZA might be 0 or 1, depending on whether there is an uncommitted
+ lazy save. */
+DEF_AARCH64_ISA_MODE(ZA_ON)
+
+#undef DEF_AARCH64_ISA_MODE
diff --git a/gcc/config/aarch64/aarch64-modes.def b/gcc/config/aarch64/aarch64-modes.def
index 6b4f4e1..ffca551 100644
--- a/gcc/config/aarch64/aarch64-modes.def
+++ b/gcc/config/aarch64/aarch64-modes.def
@@ -48,16 +48,19 @@ ADJUST_FLOAT_FORMAT (HF, &ieee_half_format);
/* Vector modes. */
+VECTOR_BOOL_MODE (VNx32BI, 32, BI, 4);
VECTOR_BOOL_MODE (VNx16BI, 16, BI, 2);
VECTOR_BOOL_MODE (VNx8BI, 8, BI, 2);
VECTOR_BOOL_MODE (VNx4BI, 4, BI, 2);
VECTOR_BOOL_MODE (VNx2BI, 2, BI, 2);
+ADJUST_NUNITS (VNx32BI, aarch64_sve_vg * 16);
ADJUST_NUNITS (VNx16BI, aarch64_sve_vg * 8);
ADJUST_NUNITS (VNx8BI, aarch64_sve_vg * 4);
ADJUST_NUNITS (VNx4BI, aarch64_sve_vg * 2);
ADJUST_NUNITS (VNx2BI, aarch64_sve_vg);
+ADJUST_ALIGNMENT (VNx32BI, 2);
ADJUST_ALIGNMENT (VNx16BI, 2);
ADJUST_ALIGNMENT (VNx8BI, 2);
ADJUST_ALIGNMENT (VNx4BI, 2);
@@ -156,7 +159,7 @@ ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2)
for 8-bit, 16-bit, 32-bit and 64-bit elements respectively. It isn't
strictly necessary to set the alignment here, since the default would
be clamped to BIGGEST_ALIGNMENT anyhow, but it seems clearer. */
-#define SVE_MODES(NVECS, VB, VH, VS, VD) \
+#define SVE_MODES(NVECS, VB, VH, VS, VD, VT) \
VECTOR_MODES_WITH_PREFIX (VNx, INT, 16 * NVECS, NVECS == 1 ? 1 : 4); \
VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 16 * NVECS, NVECS == 1 ? 1 : 4); \
\
@@ -164,6 +167,7 @@ ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2)
ADJUST_NUNITS (VH##HI, aarch64_sve_vg * NVECS * 4); \
ADJUST_NUNITS (VS##SI, aarch64_sve_vg * NVECS * 2); \
ADJUST_NUNITS (VD##DI, aarch64_sve_vg * NVECS); \
+ ADJUST_NUNITS (VT##TI, exact_div (aarch64_sve_vg * NVECS, 2)); \
ADJUST_NUNITS (VH##BF, aarch64_sve_vg * NVECS * 4); \
ADJUST_NUNITS (VH##HF, aarch64_sve_vg * NVECS * 4); \
ADJUST_NUNITS (VS##SF, aarch64_sve_vg * NVECS * 2); \
@@ -173,17 +177,23 @@ ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2)
ADJUST_ALIGNMENT (VH##HI, 16); \
ADJUST_ALIGNMENT (VS##SI, 16); \
ADJUST_ALIGNMENT (VD##DI, 16); \
+ ADJUST_ALIGNMENT (VT##TI, 16); \
ADJUST_ALIGNMENT (VH##BF, 16); \
ADJUST_ALIGNMENT (VH##HF, 16); \
ADJUST_ALIGNMENT (VS##SF, 16); \
ADJUST_ALIGNMENT (VD##DF, 16);
-/* Give SVE vectors the names normally used for 256-bit vectors.
- The actual number depends on command-line flags. */
-SVE_MODES (1, VNx16, VNx8, VNx4, VNx2)
-SVE_MODES (2, VNx32, VNx16, VNx8, VNx4)
-SVE_MODES (3, VNx48, VNx24, VNx12, VNx6)
-SVE_MODES (4, VNx64, VNx32, VNx16, VNx8)
+/* Give SVE vectors names of the form VNxX, where X describes what is
+ stored in each 128-bit unit. The actual size of the mode depends
+ on command-line flags.
+
+ VNx1TI isn't really a native SVE mode, but it can be useful in some
+ limited situations. */
+VECTOR_MODE_WITH_PREFIX (VNx, INT, TI, 1, 1);
+SVE_MODES (1, VNx16, VNx8, VNx4, VNx2, VNx1)
+SVE_MODES (2, VNx32, VNx16, VNx8, VNx4, VNx2)
+SVE_MODES (3, VNx48, VNx24, VNx12, VNx6, VNx3)
+SVE_MODES (4, VNx64, VNx32, VNx16, VNx8, VNx4)
/* Partial SVE vectors:
diff --git a/gcc/config/aarch64/aarch64-option-extensions.def b/gcc/config/aarch64/aarch64-option-extensions.def
index 825f3bf..c156d2e 100644
--- a/gcc/config/aarch64/aarch64-option-extensions.def
+++ b/gcc/config/aarch64/aarch64-option-extensions.def
@@ -151,4 +151,12 @@ AARCH64_OPT_EXTENSION("mops", MOPS, (), (), (), "")
AARCH64_OPT_EXTENSION("cssc", CSSC, (), (), (), "cssc")
+AARCH64_OPT_EXTENSION("sme", SME, (BF16, SVE2), (), (), "sme")
+
+AARCH64_OPT_EXTENSION("sme-i16i64", SME_I16I64, (SME), (), (), "")
+
+AARCH64_OPT_EXTENSION("sme-f64f64", SME_F64F64, (SME), (), (), "")
+
+AARCH64_OPT_EXTENSION("sme2", SME2, (SME), (), (), "sme2")
+
#undef AARCH64_OPT_EXTENSION
diff --git a/gcc/config/aarch64/aarch64-passes.def b/gcc/config/aarch64/aarch64-passes.def
index 6ace797..662a13f 100644
--- a/gcc/config/aarch64/aarch64-passes.def
+++ b/gcc/config/aarch64/aarch64-passes.def
@@ -20,6 +20,7 @@
INSERT_PASS_AFTER (pass_regrename, 1, pass_fma_steering);
INSERT_PASS_BEFORE (pass_reorder_blocks, 1, pass_track_speculation);
+INSERT_PASS_BEFORE (pass_late_thread_prologue_and_epilogue, 1, pass_switch_pstate_sm);
INSERT_PASS_AFTER (pass_machine_reorg, 1, pass_tag_collision_avoidance);
INSERT_PASS_BEFORE (pass_shorten_branches, 1, pass_insert_bti);
INSERT_PASS_AFTER (pass_if_after_combine, 1, pass_cc_fusion);
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 36d6c68..60ff61f 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -767,6 +767,7 @@ bool aarch64_constant_address_p (rtx);
bool aarch64_emit_approx_div (rtx, rtx, rtx);
bool aarch64_emit_approx_sqrt (rtx, rtx, bool);
tree aarch64_vector_load_decl (tree);
+rtx aarch64_gen_callee_cookie (aarch64_feature_flags, arm_pcs);
void aarch64_expand_call (rtx, rtx, rtx, bool);
bool aarch64_expand_cpymem_mops (rtx *, bool);
bool aarch64_expand_cpymem (rtx *);
@@ -798,10 +799,17 @@ bool aarch64_sve_mode_p (machine_mode);
HOST_WIDE_INT aarch64_fold_sve_cnt_pat (aarch64_svpattern, unsigned int);
bool aarch64_sve_cnt_immediate_p (rtx);
bool aarch64_sve_scalar_inc_dec_immediate_p (rtx);
+bool aarch64_sve_rdvl_immediate_p (rtx);
bool aarch64_sve_addvl_addpl_immediate_p (rtx);
bool aarch64_sve_vector_inc_dec_immediate_p (rtx);
int aarch64_add_offset_temporaries (rtx);
void aarch64_split_add_offset (scalar_int_mode, rtx, rtx, rtx, rtx, rtx);
+bool aarch64_rdsvl_immediate_p (const_rtx);
+rtx aarch64_sme_vq_immediate (machine_mode mode, HOST_WIDE_INT,
+ aarch64_feature_flags);
+char *aarch64_output_rdsvl (const_rtx);
+bool aarch64_addsvl_addspl_immediate_p (const_rtx);
+char *aarch64_output_addsvl_addspl (rtx);
bool aarch64_mov_operand_p (rtx, machine_mode);
rtx aarch64_reverse_mask (machine_mode, unsigned int);
bool aarch64_offset_7bit_signed_scaled_p (machine_mode, poly_int64);
@@ -810,6 +818,7 @@ char *aarch64_output_sve_prefetch (const char *, rtx, const char *);
char *aarch64_output_sve_cnt_immediate (const char *, const char *, rtx);
char *aarch64_output_sve_cnt_pat_immediate (const char *, const char *, rtx *);
char *aarch64_output_sve_scalar_inc_dec (rtx);
+char *aarch64_output_sve_rdvl (rtx);
char *aarch64_output_sve_addvl_addpl (rtx);
char *aarch64_output_sve_vector_inc_dec (const char *, rtx);
char *aarch64_output_scalar_simd_mov_immediate (rtx, scalar_int_mode);
@@ -830,6 +839,8 @@ bool aarch64_simd_shift_imm_p (rtx, machine_mode, bool);
bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
enum simd_immediate_check w = AARCH64_CHECK_MOV);
+bool aarch64_valid_sysreg_name_p (const char *);
+const char *aarch64_retrieve_sysreg (const char *, bool);
rtx aarch64_check_zero_based_sve_index_immediate (rtx);
bool aarch64_maybe_generate_simd_constant (rtx, rtx, machine_mode);
bool aarch64_simd_special_constant_p (rtx, machine_mode);
@@ -849,8 +860,9 @@ bool aarch64_is_mov_xn_imm (unsigned HOST_WIDE_INT);
bool aarch64_use_return_insn_p (void);
const char *aarch64_output_casesi (rtx *);
const char *aarch64_output_load_tp (rtx);
+const char *aarch64_output_sme_zero_za (rtx);
-unsigned int aarch64_tlsdesc_abi_id ();
+arm_pcs aarch64_tlsdesc_abi_id ();
enum aarch64_symbol_type aarch64_classify_symbol (rtx, HOST_WIDE_INT);
enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
enum reg_class aarch64_regno_regclass (unsigned);
@@ -861,9 +873,7 @@ machine_mode aarch64_hard_regno_caller_save_mode (unsigned, unsigned,
machine_mode);
int aarch64_uxt_size (int, HOST_WIDE_INT);
int aarch64_vec_fpconst_pow_of_2 (rtx);
-rtx aarch64_eh_return_handler_rtx (void);
rtx aarch64_mask_from_zextract_ops (rtx, rtx);
-const char *aarch64_output_move_struct (rtx *operands);
rtx aarch64_return_addr_rtx (void);
rtx aarch64_return_addr (int, rtx);
rtx aarch64_simd_gen_const_vector_dup (machine_mode, HOST_WIDE_INT);
@@ -877,6 +887,7 @@ bool aarch64_sve_ldnf1_operand_p (rtx);
bool aarch64_sve_ldr_operand_p (rtx);
bool aarch64_sve_prefetch_operand_p (rtx, machine_mode);
bool aarch64_sve_struct_memory_operand_p (rtx);
+bool aarch64_sme_ldr_vnum_offset_p (rtx, rtx);
rtx aarch64_simd_vect_par_cnst_half (machine_mode, int, bool);
rtx aarch64_gen_stepped_int_parallel (unsigned int, int, int);
bool aarch64_stepped_int_parallel_p (rtx, int);
@@ -889,7 +900,7 @@ const char * aarch64_gen_far_branch (rtx *, int, const char *, const char *);
const char * aarch64_output_probe_stack_range (rtx, rtx);
const char * aarch64_output_probe_sve_stack_clash (rtx, rtx, rtx, rtx);
void aarch64_err_no_fpadvsimd (machine_mode);
-void aarch64_expand_epilogue (bool);
+void aarch64_expand_epilogue (rtx_call_insn *);
rtx aarch64_ptrue_all (unsigned int);
opt_machine_mode aarch64_ptrue_all_mode (rtx);
rtx aarch64_convert_sve_data_to_pred (rtx, machine_mode, rtx);
@@ -910,7 +921,7 @@ void aarch64_sve_expand_vector_init (rtx, rtx);
void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx,
const_tree, unsigned, bool = false);
void aarch64_init_expanders (void);
-void aarch64_emit_call_insn (rtx);
+rtx_call_insn *aarch64_emit_call_insn (rtx);
void aarch64_register_pragmas (void);
void aarch64_relayout_simd_types (void);
void aarch64_reset_previous_fndecl (void);
@@ -938,6 +949,7 @@ rtx aarch64_simd_expand_builtin (int, tree, rtx);
void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT, const_tree);
rtx aarch64_endian_lane_rtx (machine_mode, unsigned int);
+void aarch64_split_double_move (rtx, rtx, machine_mode);
void aarch64_split_128bit_move (rtx, rtx);
bool aarch64_split_128bit_move_p (rtx, rtx);
@@ -990,9 +1002,14 @@ tree aarch64_general_builtin_rsqrt (unsigned int);
void handle_arm_acle_h (void);
void handle_arm_neon_h (void);
+bool aarch64_general_check_builtin_call (location_t, vec<location_t>,
+ unsigned int, tree, unsigned int,
+ tree *);
+
namespace aarch64_sve {
void init_builtins ();
void handle_arm_sve_h ();
+ void handle_arm_sme_h ();
tree builtin_decl (unsigned, bool);
bool builtin_type_p (const_tree);
bool builtin_type_p (const_tree, unsigned int *, unsigned int *);
@@ -1051,6 +1068,7 @@ rtl_opt_pass *make_pass_track_speculation (gcc::context *);
rtl_opt_pass *make_pass_tag_collision_avoidance (gcc::context *);
rtl_opt_pass *make_pass_insert_bti (gcc::context *ctxt);
rtl_opt_pass *make_pass_cc_fusion (gcc::context *ctxt);
+rtl_opt_pass *make_pass_switch_pstate_sm (gcc::context *ctxt);
poly_uint64 aarch64_regmode_natural_size (machine_mode);
@@ -1080,4 +1098,7 @@ extern void aarch64_output_patchable_area (unsigned int, bool);
extern void aarch64_adjust_reg_alloc_order ();
+bool aarch64_optimize_mode_switching (aarch64_mode_entity);
+void aarch64_restore_za (rtx);
+
#endif /* GCC_AARCH64_PROTOS_H */
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index ad79a81..50b6855 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -149,20 +149,20 @@
&& (register_operand (operands[0], <MODE>mode)
|| aarch64_simd_reg_or_zero (operands[1], <MODE>mode))"
{@ [cons: =0, 1; attrs: type, arch, length]
- [w , m ; neon_load1_1reg<q> , * , *] ldr\t%d0, %1
- [r , m ; load_8 , * , *] ldr\t%x0, %1
- [m , Dz; store_8 , * , *] str\txzr, %0
- [m , w ; neon_store1_1reg<q>, * , *] str\t%d1, %0
- [m , r ; store_8 , * , *] str\t%x1, %0
- [w , w ; neon_logic<q> , simd, *] mov\t%0.<Vbtype>, %1.<Vbtype>
- [w , w ; neon_logic<q> , * , *] fmov\t%d0, %d1
- [?r, w ; neon_to_gp<q> , simd, *] umov\t%0, %1.d[0]
- [?r, w ; neon_to_gp<q> , * , *] fmov\t%x0, %d1
- [?w, r ; f_mcr , * , *] fmov\t%d0, %1
- [?r, r ; mov_reg , * , *] mov\t%0, %1
- [w , Dn; neon_move<q> , simd, *] << aarch64_output_simd_mov_immediate (operands[1], 64);
- [w , Dz; f_mcr , * , *] fmov\t%d0, xzr
- [w , Dx; neon_move , simd, 8] #
+ [w , m ; neon_load1_1reg<q> , * , *] ldr\t%d0, %1
+ [r , m ; load_8 , * , *] ldr\t%x0, %1
+ [m , Dz; store_8 , * , *] str\txzr, %0
+ [m , w ; neon_store1_1reg<q>, * , *] str\t%d1, %0
+ [m , r ; store_8 , * , *] str\t%x1, %0
+ [w , w ; neon_logic<q> , simd , *] mov\t%0.<Vbtype>, %1.<Vbtype>
+ [w , w ; neon_logic<q> , * , *] fmov\t%d0, %d1
+ [?r, w ; neon_to_gp<q> , base_simd, *] umov\t%0, %1.d[0]
+ [?r, w ; neon_to_gp<q> , * , *] fmov\t%x0, %d1
+ [?w, r ; f_mcr , * , *] fmov\t%d0, %1
+ [?r, r ; mov_reg , * , *] mov\t%0, %1
+ [w , Dn; neon_move<q> , simd , *] << aarch64_output_simd_mov_immediate (operands[1], 64);
+ [w , Dz; f_mcr , * , *] fmov\t%d0, xzr
+ [w , Dx; neon_move , simd , 8] #
}
"CONST_INT_P (operands[1])
&& aarch64_simd_special_constant_p (operands[1], <MODE>mode)
@@ -185,6 +185,7 @@
[Umn, Dz; store_16 , * , 4] stp\txzr, xzr, %0
[m , w ; neon_store1_1reg<q>, * , 4] str\t%q1, %0
[w , w ; neon_logic<q> , simd, 4] mov\t%0.<Vbtype>, %1.<Vbtype>
+ [w , w ; * , sve , 4] mov\t%Z0.d, %Z1.d
[?r , w ; multiple , * , 8] #
[?w , r ; multiple , * , 8] #
[?r , r ; multiple , * , 8] #
@@ -225,7 +226,7 @@
[(set (match_operand:<VEL> 0 "memory_operand" "=m")
(vec_select:<VEL> (match_operand:VALL_F16 1 "register_operand" "w")
(parallel [(match_operand 2 "const_int_operand" "n")])))]
- "TARGET_SIMD
+ "TARGET_FLOAT
&& ENDIAN_LANE_N (<nunits>, INTVAL (operands[2])) == 0"
"str\\t%<Vetype>1, %0"
[(set_attr "type" "neon_store1_1reg<q>")]
@@ -374,18 +375,18 @@
(vec_select:<VHALF>
(match_operand:VQMOV_NO2E 1 "register_operand")
(match_operand:VQMOV_NO2E 2 "vect_par_cnst_lo_half")))]
- "TARGET_SIMD"
- {@ [ cons: =0 , 1 ; attrs: type ]
- [ w , w ; mov_reg ] #
- [ ?r , w ; neon_to_gp<q> ] umov\t%0, %1.d[0]
+ "TARGET_FLOAT"
+ {@ [ cons: =0 , 1 ; attrs: type , arch ]
+ [ w , w ; mov_reg , simd ] #
+ [ ?r , w ; neon_to_gp<q> , base_simd ] umov\t%0, %1.d[0]
+ [ ?r , w ; f_mrc , * ] fmov\t%0, %d1
}
"&& reload_completed && aarch64_simd_register (operands[0], <VHALF>mode)"
[(set (match_dup 0) (match_dup 1))]
{
operands[1] = aarch64_replace_reg_mode (operands[1], <VHALF>mode);
}
- [
- (set_attr "length" "4")]
+ [(set_attr "length" "4")]
)
(define_insn "aarch64_simd_mov_from_<mode>high"
@@ -396,12 +397,11 @@
"TARGET_FLOAT"
{@ [ cons: =0 , 1 ; attrs: type , arch ]
[ w , w ; neon_dup<q> , simd ] dup\t%d0, %1.d[1]
+ [ w , w ; * , sve ] ext\t%Z0.b, %Z0.b, %Z0.b, #8
[ ?r , w ; neon_to_gp<q> , simd ] umov\t%0, %1.d[1]
[ ?r , w ; f_mrc , * ] fmov\t%0, %1.d[1]
}
- [
-
- (set_attr "length" "4")]
+ [(set_attr "length" "4")]
)
(define_insn "orn<mode>3<vczle><vczbe>"
diff --git a/gcc/config/aarch64/aarch64-sme.md b/gcc/config/aarch64/aarch64-sme.md
new file mode 100644
index 0000000..6cba6ab
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-sme.md
@@ -0,0 +1,1984 @@
+;; Machine description for AArch64 SME.
+;; Copyright (C) 2023 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; The file is organised into the following sections (search for the full
+;; line):
+;;
+;; == State management
+;; ---- Test current state
+;; ---- PSTATE.SM management
+;; ---- PSTATE.ZA management
+;;
+;; == Loads, stores and moves
+;; ---- Single-vector loads
+;; ---- Table loads
+;; ---- Single-vector stores
+;; ---- Table stores
+;; ---- Single-vector moves
+;; ---- Multi-vector moves
+;; ---- Zeroing
+;;
+;; == Binary arithmetic
+;; ---- Binary arithmetic on ZA tile
+;; ---- Binary arithmetic on ZA slice
+;; ---- Binary arithmetic, writing to ZA slice
+;;
+;; == Ternary arithmetic
+;; ---- [INT] Dot product
+;; ---- [INT] Ternary widening arithmetic on ZA slice
+;; ---- [INT] Sum of outer products
+;; ---- [FP] Dot product
+;; ---- [FP] Ternary arithmetic on ZA slice
+;; ---- [FP] Ternary widening arithmetic on ZA slice
+;; ---- [FP] Sum of outer products
+;;
+;; == Table lookup
+;; ---- Table lookup
+
+;; =========================================================================
+;; == State management
+;; =========================================================================
+;;
+;; Many of the instructions in this section are only valid when SME is
+;; present. However, they don't have a TARGET_SME condition since
+;; (a) they are only emitted under direct control of aarch64 code and
+;; (b) they are sometimes used conditionally, particularly in streaming-
+;; compatible code.
+;;
+;; =========================================================================
+
+;; -------------------------------------------------------------------------
+;; ---- Test current state
+;; -------------------------------------------------------------------------
+
+(define_c_enum "unspec" [
+ UNSPEC_OLD_VG_SAVED
+ UNSPEC_UPDATE_VG
+ UNSPEC_GET_SME_STATE
+ UNSPEC_READ_SVCR
+])
+
+;; A marker instruction to say that the old value of the DWARF VG register
+;; has been saved to the stack, for CFI purposes. Operand 0 is the old
+;; value of the register and operand 1 is the save slot.
+(define_insn "aarch64_old_vg_saved"
+ [(set (reg:DI VG_REGNUM)
+ (unspec:DI [(match_operand 0)
+ (match_operand 1)] UNSPEC_OLD_VG_SAVED))]
+ ""
+ ""
+ [(set_attr "type" "no_insn")]
+)
+
+;; A marker to indicate places where a call temporarily changes VG.
+(define_insn "aarch64_update_vg"
+ [(set (reg:DI VG_REGNUM)
+ (unspec:DI [(reg:DI VG_REGNUM)] UNSPEC_UPDATE_VG))]
+ ""
+ ""
+ [(set_attr "type" "no_insn")]
+)
+
+(define_insn "aarch64_get_sme_state"
+ [(set (reg:TI R0_REGNUM)
+ (unspec_volatile:TI [(const_int 0)] UNSPEC_GET_SME_STATE))
+ (clobber (reg:DI R16_REGNUM))
+ (clobber (reg:DI R17_REGNUM))
+ (clobber (reg:DI R18_REGNUM))
+ (clobber (reg:DI R30_REGNUM))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "bl\t__arm_sme_state"
+)
+
+(define_insn "aarch64_read_svcr"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI [(const_int 0)] UNSPEC_READ_SVCR))]
+ ""
+ "mrs\t%0, svcr"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- PSTATE.SM management
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - SMSTART SM
+;; - SMSTOP SM
+;; -------------------------------------------------------------------------
+
+(define_c_enum "unspec" [
+ UNSPEC_SMSTART_SM
+ UNSPEC_SMSTOP_SM
+])
+
+;; Turn on streaming mode. This clobbers all SVE state.
+;;
+;; Depend on VG_REGNUM to ensure that the VG save slot has already been
+;; initialized.
+(define_insn "aarch64_smstart_sm"
+ [(unspec_volatile [(const_int 0)] UNSPEC_SMSTART_SM)
+ (use (reg:DI VG_REGNUM))
+ (clobber (reg:V4x16QI V0_REGNUM))
+ (clobber (reg:V4x16QI V4_REGNUM))
+ (clobber (reg:V4x16QI V8_REGNUM))
+ (clobber (reg:V4x16QI V12_REGNUM))
+ (clobber (reg:V4x16QI V16_REGNUM))
+ (clobber (reg:V4x16QI V20_REGNUM))
+ (clobber (reg:V4x16QI V24_REGNUM))
+ (clobber (reg:V4x16QI V28_REGNUM))
+ (clobber (reg:VNx16BI P0_REGNUM))
+ (clobber (reg:VNx16BI P1_REGNUM))
+ (clobber (reg:VNx16BI P2_REGNUM))
+ (clobber (reg:VNx16BI P3_REGNUM))
+ (clobber (reg:VNx16BI P4_REGNUM))
+ (clobber (reg:VNx16BI P5_REGNUM))
+ (clobber (reg:VNx16BI P6_REGNUM))
+ (clobber (reg:VNx16BI P7_REGNUM))
+ (clobber (reg:VNx16BI P8_REGNUM))
+ (clobber (reg:VNx16BI P9_REGNUM))
+ (clobber (reg:VNx16BI P10_REGNUM))
+ (clobber (reg:VNx16BI P11_REGNUM))
+ (clobber (reg:VNx16BI P12_REGNUM))
+ (clobber (reg:VNx16BI P13_REGNUM))
+ (clobber (reg:VNx16BI P14_REGNUM))
+ (clobber (reg:VNx16BI P15_REGNUM))]
+ ""
+ "smstart\tsm"
+)
+
+;; Turn off streaming mode. This clobbers all SVE state.
+;;
+;; Depend on VG_REGNUM to ensure that the VG save slot has already been
+;; initialized.
+(define_insn "aarch64_smstop_sm"
+ [(unspec_volatile [(const_int 0)] UNSPEC_SMSTOP_SM)
+ (use (reg:DI VG_REGNUM))
+ (clobber (reg:V4x16QI V0_REGNUM))
+ (clobber (reg:V4x16QI V4_REGNUM))
+ (clobber (reg:V4x16QI V8_REGNUM))
+ (clobber (reg:V4x16QI V12_REGNUM))
+ (clobber (reg:V4x16QI V16_REGNUM))
+ (clobber (reg:V4x16QI V20_REGNUM))
+ (clobber (reg:V4x16QI V24_REGNUM))
+ (clobber (reg:V4x16QI V28_REGNUM))
+ (clobber (reg:VNx16BI P0_REGNUM))
+ (clobber (reg:VNx16BI P1_REGNUM))
+ (clobber (reg:VNx16BI P2_REGNUM))
+ (clobber (reg:VNx16BI P3_REGNUM))
+ (clobber (reg:VNx16BI P4_REGNUM))
+ (clobber (reg:VNx16BI P5_REGNUM))
+ (clobber (reg:VNx16BI P6_REGNUM))
+ (clobber (reg:VNx16BI P7_REGNUM))
+ (clobber (reg:VNx16BI P8_REGNUM))
+ (clobber (reg:VNx16BI P9_REGNUM))
+ (clobber (reg:VNx16BI P10_REGNUM))
+ (clobber (reg:VNx16BI P11_REGNUM))
+ (clobber (reg:VNx16BI P12_REGNUM))
+ (clobber (reg:VNx16BI P13_REGNUM))
+ (clobber (reg:VNx16BI P14_REGNUM))
+ (clobber (reg:VNx16BI P15_REGNUM))]
+ ""
+ "smstop\tsm"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- PSTATE.ZA management
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - SMSTART ZA
+;; - SMSTOP ZA
+;; plus calls to support routines.
+;; -------------------------------------------------------------------------
+
+(define_c_enum "unspec" [
+ UNSPEC_SMSTOP_ZA
+ UNSPEC_INITIAL_ZERO_ZA
+ UNSPEC_TPIDR2_SAVE
+ UNSPEC_TPIDR2_RESTORE
+ UNSPEC_READ_TPIDR2
+ UNSPEC_WRITE_TPIDR2
+ UNSPEC_SETUP_LOCAL_TPIDR2
+ UNSPEC_RESTORE_ZA
+ UNSPEC_START_PRIVATE_ZA_CALL
+ UNSPEC_END_PRIVATE_ZA_CALL
+ UNSPEC_COMMIT_LAZY_SAVE
+])
+
+(define_c_enum "unspecv" [
+ UNSPECV_ASM_UPDATE_ZA
+ UNSPECV_ASM_UPDATE_ZT0
+])
+
+;; Use the ABI-defined routine to commit an uncommitted lazy save.
+;; This relies on the current PSTATE.ZA, so depends on SME_STATE_REGNUM.
+;; The fake TPIDR2_SETUP_REGNUM register initially holds the incoming
+;; value of the architected TPIDR2_EL0.
+(define_insn "aarch64_tpidr2_save"
+ [(set (reg:DI ZA_FREE_REGNUM)
+ (unspec:DI [(reg:DI SME_STATE_REGNUM)
+ (reg:DI TPIDR2_SETUP_REGNUM)] UNSPEC_TPIDR2_SAVE))
+ (clobber (reg:DI R14_REGNUM))
+ (clobber (reg:DI R15_REGNUM))
+ (clobber (reg:DI R16_REGNUM))
+ (clobber (reg:DI R17_REGNUM))
+ (clobber (reg:DI R18_REGNUM))
+ (clobber (reg:DI R30_REGNUM))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "bl\t__arm_tpidr2_save"
+)
+
+;; Set PSTATE.ZA to 1. If ZA was previously dormant or active,
+;; it remains in the same state afterwards, with the same contents.
+;; Otherwise, it goes from off to on with zeroed contents.
+;;
+;; Later writes of TPIDR2_EL0 to a nonzero value must not be moved
+;; up past this instruction, since that could create an invalid
+;; combination of having an active lazy save while ZA is off.
+;; Create an anti-dependence by reading the current contents
+;; of TPIDR2_SETUP_REGNUM.
+;;
+;; Making this depend on ZA_FREE_REGNUM ensures that contents belonging
+;; to the caller have already been saved. That isn't necessary for this
+;; instruction itself, since PSTATE.ZA is already 1 if it contains data.
+;; But doing this here means that other uses of ZA can just depend on
+;; SME_STATE_REGNUM, rather than both SME_STATE_REGNUM and ZA_FREE_REGNUM.
+(define_insn "aarch64_smstart_za"
+ [(set (reg:DI SME_STATE_REGNUM)
+ (const_int 1))
+ (use (reg:DI TPIDR2_SETUP_REGNUM))
+ (use (reg:DI ZA_FREE_REGNUM))]
+ ""
+ "smstart\tza"
+)
+
+;; Disable ZA and discard its current contents.
+;;
+;; The ABI says that the ZA save buffer must be null whenever PSTATE.ZA
+;; is zero, so earlier writes to TPIDR2_EL0 must not be moved down past
+;; this instruction. Depend on TPIDR2_SETUP_REGNUM to ensure this.
+;;
+;; We can only turn ZA off once we know that it is free (i.e. doesn't
+;; contain data belonging to the caller). Depend on ZA_FREE_REGNUM
+;; to ensure this.
+;;
+;; We only turn ZA off when the current function's ZA state is dead,
+;; or perhaps if we're sure that the contents are saved. Either way,
+;; we know whether ZA is saved or not.
+(define_insn "aarch64_smstop_za"
+ [(set (reg:DI SME_STATE_REGNUM)
+ (const_int 0))
+ (set (reg:DI ZA_SAVED_REGNUM)
+ (unspec:DI [(reg:DI TPIDR2_SETUP_REGNUM)
+ (reg:DI ZA_FREE_REGNUM)] UNSPEC_SMSTOP_ZA))]
+ ""
+ "smstop\tza"
+)
+
+;; Zero ZA after committing a lazy save. The sequencing is enforced
+;; by reading ZA_FREE_REGNUM.
+(define_insn "aarch64_initial_zero_za"
+ [(set (reg:DI ZA_REGNUM)
+ (unspec:DI [(reg:DI SME_STATE_REGNUM)
+ (reg:DI ZA_FREE_REGNUM)] UNSPEC_INITIAL_ZERO_ZA))]
+ ""
+ "zero\t{ za }"
+)
+
+;; Initialize the abstract TPIDR2_BLOCK_REGNUM from the contents of
+;; the current function's TPIDR2 block. Other instructions can then
+;; depend on TPIDR2_BLOCK_REGNUM rather than on the memory block.
+(define_insn "aarch64_setup_local_tpidr2"
+ [(set (reg:DI TPIDR2_BLOCK_REGNUM)
+ (unspec:DI [(match_operand:V16QI 0 "memory_operand" "m")]
+ UNSPEC_SETUP_LOCAL_TPIDR2))]
+ ""
+ ""
+ [(set_attr "type" "no_insn")]
+)
+
+;; Clear TPIDR2_EL0, cancelling any uncommitted lazy save.
+(define_insn "aarch64_clear_tpidr2"
+ [(set (reg:DI TPIDR2_SETUP_REGNUM)
+ (const_int 0))]
+ ""
+ "msr\ttpidr2_el0, xzr"
+)
+
+;; Point TPIDR2_EL0 to the current function's TPIDR2 block, whose address
+;; is given by operand 0. TPIDR2_BLOCK_REGNUM represents the contents of the
+;; pointed-to block.
+(define_insn "aarch64_write_tpidr2"
+ [(set (reg:DI TPIDR2_SETUP_REGNUM)
+ (unspec:DI [(match_operand 0 "pmode_register_operand" "r")
+ (reg:DI TPIDR2_BLOCK_REGNUM)] UNSPEC_WRITE_TPIDR2))]
+ ""
+ "msr\ttpidr2_el0, %0"
+)
+
+;; Check whether ZA has been saved. The system depends on the value that
+;; we wrote to TPIDR2_EL0 previously, so it depends on TPDIR2_SETUP_REGNUM.
+(define_insn "aarch64_read_tpidr2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(reg:DI TPIDR2_SETUP_REGNUM)
+ (reg:DI ZA_SAVED_REGNUM)] UNSPEC_READ_TPIDR2))]
+ ""
+ "mrs\t%0, tpidr2_el0"
+)
+
+;; Use the ABI-defined routine to restore lazy-saved ZA contents
+;; from the TPIDR2 block pointed to by X0. ZA must already be active.
+(define_insn "aarch64_tpidr2_restore"
+ [(set (reg:DI ZA_SAVED_REGNUM)
+ (unspec:DI [(reg:DI R0_REGNUM)] UNSPEC_TPIDR2_RESTORE))
+ (set (reg:DI SME_STATE_REGNUM)
+ (unspec:DI [(reg:DI SME_STATE_REGNUM)] UNSPEC_TPIDR2_RESTORE))
+ (clobber (reg:DI R14_REGNUM))
+ (clobber (reg:DI R15_REGNUM))
+ (clobber (reg:DI R16_REGNUM))
+ (clobber (reg:DI R17_REGNUM))
+ (clobber (reg:DI R18_REGNUM))
+ (clobber (reg:DI R30_REGNUM))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "bl\t__arm_tpidr2_restore"
+)
+
+;; Check whether a lazy save set up by aarch64_save_za was committed
+;; and restore the saved contents if so.
+;;
+;; Operand 0 is the address of the current function's TPIDR2 block.
+(define_insn_and_split "aarch64_restore_za"
+ [(set (reg:DI ZA_SAVED_REGNUM)
+ (unspec:DI [(match_operand 0 "pmode_register_operand" "r")
+ (reg:DI SME_STATE_REGNUM)
+ (reg:DI TPIDR2_SETUP_REGNUM)
+ (reg:DI ZA_SAVED_REGNUM)] UNSPEC_RESTORE_ZA))
+ (clobber (reg:DI R0_REGNUM))
+ (clobber (reg:DI R14_REGNUM))
+ (clobber (reg:DI R15_REGNUM))
+ (clobber (reg:DI R16_REGNUM))
+ (clobber (reg:DI R17_REGNUM))
+ (clobber (reg:DI R18_REGNUM))
+ (clobber (reg:DI R30_REGNUM))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "#"
+ "&& epilogue_completed"
+ [(const_int 0)]
+ {
+ auto label = gen_label_rtx ();
+ auto tpidr2 = gen_rtx_REG (DImode, R16_REGNUM);
+ emit_insn (gen_aarch64_read_tpidr2 (tpidr2));
+ auto jump = emit_likely_jump_insn (gen_aarch64_cbnedi1 (tpidr2, label));
+ JUMP_LABEL (jump) = label;
+
+ aarch64_restore_za (operands[0]);
+ emit_label (label);
+ DONE;
+ }
+)
+
+;; This instruction is emitted after asms that alter ZA, in order to model
+;; the effect on dataflow. The asm itself can't have ZA as an input or
+;; an output, since there is no associated data type. Instead it retains
+;; the original "za" clobber, which on its own would indicate that ZA
+;; is dead.
+;;
+;; The operand is a unique identifier.
+(define_insn "aarch64_asm_update_za"
+ [(set (reg:VNx16QI ZA_REGNUM)
+ (unspec_volatile:VNx16QI
+ [(reg:VNx16QI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand 0 "const_int_operand")]
+ UNSPECV_ASM_UPDATE_ZA))]
+ ""
+ ""
+ [(set_attr "type" "no_insn")]
+)
+
+;; A similar pattern for ZT0.
+(define_insn "aarch64_asm_update_zt0"
+ [(set (reg:V8DI ZT0_REGNUM)
+ (unspec_volatile:V8DI
+ [(reg:V8DI ZT0_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand 0 "const_int_operand")]
+ UNSPECV_ASM_UPDATE_ZT0))]
+ ""
+ ""
+ [(set_attr "type" "no_insn")]
+)
+
+;; This pseudo-instruction is emitted as part of a call to a private-ZA
+;; function from a function with ZA state. It marks a natural place to set
+;; up a lazy save, if that turns out to be necessary. The save itself
+;; is managed by the mode-switching pass.
+(define_insn "aarch64_start_private_za_call"
+ [(set (reg:DI LOWERING_REGNUM)
+ (unspec:DI [(reg:DI LOWERING_REGNUM)] UNSPEC_START_PRIVATE_ZA_CALL))]
+ ""
+ ""
+ [(set_attr "type" "no_insn")]
+)
+
+;; This pseudo-instruction is emitted as part of a call to a private-ZA
+;; function from a function with ZA state. It marks a natural place to restore
+;; the current function's ZA contents from the lazy save buffer, if that
+;; turns out to be necessary. The save itself is managed by the
+;; mode-switching pass.
+(define_insn "aarch64_end_private_za_call"
+ [(set (reg:DI LOWERING_REGNUM)
+ (unspec:DI [(reg:DI LOWERING_REGNUM)] UNSPEC_END_PRIVATE_ZA_CALL))]
+ ""
+ ""
+ [(set_attr "type" "no_insn")]
+)
+
+;; This pseudo-instruction is emitted before a private-ZA function uses
+;; PSTATE.ZA state for the first time. The instruction checks whether
+;; ZA currently contains data belonging to a caller and commits the
+;; lazy save if so.
+;;
+;; Operand 0 is the incoming value of TPIDR2_EL0. Operand 1 is nonzero
+;; if ZA is live, and should therefore be zeroed after committing a save.
+;;
+;; The instruction is generated by the mode-switching pass. It is a
+;; define_insn_and_split rather than a define_expand because of the
+;; internal control flow.
+(define_insn_and_split "aarch64_commit_lazy_save"
+ [(set (reg:DI ZA_FREE_REGNUM)
+ (unspec:DI [(match_operand 0 "pmode_register_operand" "r")
+ (match_operand 1 "const_int_operand")
+ (reg:DI SME_STATE_REGNUM)
+ (reg:DI TPIDR2_SETUP_REGNUM)
+ (reg:VNx16QI ZA_REGNUM)] UNSPEC_COMMIT_LAZY_SAVE))
+ (set (reg:DI ZA_REGNUM)
+ (unspec:DI [(reg:DI SME_STATE_REGNUM)
+ (reg:DI ZA_FREE_REGNUM)] UNSPEC_INITIAL_ZERO_ZA))
+ (clobber (reg:DI R14_REGNUM))
+ (clobber (reg:DI R15_REGNUM))
+ (clobber (reg:DI R16_REGNUM))
+ (clobber (reg:DI R17_REGNUM))
+ (clobber (reg:DI R18_REGNUM))
+ (clobber (reg:DI R30_REGNUM))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "#"
+ "true"
+ [(const_int 0)]
+ {
+ auto label = gen_label_rtx ();
+ auto jump = emit_jump_insn (gen_aarch64_cbeqdi1 (operands[0], label));
+ JUMP_LABEL (jump) = label;
+ emit_insn (gen_aarch64_tpidr2_save ());
+ emit_insn (gen_aarch64_clear_tpidr2 ());
+ if (INTVAL (operands[1]) != 0)
+ emit_insn (gen_aarch64_initial_zero_za ());
+ emit_label (label);
+ DONE;
+ }
+)
+
+;; =========================================================================
+;; == Loads, stores and moves
+;; =========================================================================
+
+;; -------------------------------------------------------------------------
+;; ---- Single-vector loads
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - LD1
+;; - LDR
+;; -------------------------------------------------------------------------
+
+(define_c_enum "unspec" [
+ UNSPEC_SME_LDR
+])
+
+(define_insn "@aarch64_sme_<optab><mode>"
+ [(set (reg:SME_ZA_I ZA_REGNUM)
+ (unspec:SME_ZA_I
+ [(reg:SME_ZA_I ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (match_operand:SI 1 "register_operand" "Ucj")
+ (match_operand:<VPRED> 2 "register_operand" "Upl")
+ (match_operand:SME_ZA_I 3 "aarch64_sve_ldff1_operand" "Utf")]
+ SME_LD1))]
+ "TARGET_STREAMING_SME"
+ "ld1<Vesize>\t{ za%0<hv>.<Vetype>[%w1, 0] }, %2/z, %3"
+)
+
+(define_insn "@aarch64_sme_<optab><mode>_plus"
+ [(set (reg:SME_ZA_I ZA_REGNUM)
+ (unspec:SME_ZA_I
+ [(reg:SME_ZA_I ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (plus:SI (match_operand:SI 1 "register_operand" "Ucj")
+ (match_operand:SI 2 "const_int_operand"))
+ (match_operand:<VPRED> 3 "register_operand" "Upl")
+ (match_operand:SME_ZA_I 4 "aarch64_sve_ldff1_operand" "Utf")]
+ SME_LD1))]
+ "TARGET_STREAMING_SME
+ && UINTVAL (operands[2]) < 128 / <elem_bits>"
+ "ld1<Vesize>\t{ za%0<hv>.<Vetype>[%w1, %2] }, %3/z, %4"
+)
+
+(define_insn "aarch64_sme_ldr0"
+ [(set (reg:VNx16QI ZA_REGNUM)
+ (unspec:VNx16QI
+ [(reg:VNx16QI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Ucj")
+ (mem:VNx16QI (match_operand 1 "pmode_register_operand" "rk"))]
+ UNSPEC_SME_LDR))]
+ "TARGET_SME"
+ "ldr\tza[%w0, 0], [%1, #0, mul vl]"
+)
+
+(define_insn "@aarch64_sme_ldrn<mode>"
+ [(set (reg:VNx16QI ZA_REGNUM)
+ (unspec:VNx16QI
+ [(reg:VNx16QI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Ucj")
+ (match_operand:SI 1 "const_int_operand"))
+ (mem:VNx16QI
+ (plus:P (match_operand:P 2 "register_operand" "rk")
+ (match_operand:P 3 "aarch64_mov_operand")))]
+ UNSPEC_SME_LDR))]
+ "TARGET_SME
+ && aarch64_sme_ldr_vnum_offset_p (operands[1], operands[3])"
+ "ldr\tza[%w0, %1], [%2, #%1, mul vl]"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- Table loads
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - LDR
+;; -------------------------------------------------------------------------
+
+(define_c_enum "unspec" [
+ UNSPEC_RESTORE_ZT0
+])
+
+(define_insn "aarch64_sme_ldr_zt0"
+ [(set (reg:V8DI ZT0_REGNUM)
+ (match_operand:V8DI 0 "aarch64_sync_memory_operand" "Q"))
+ (use (reg:DI SME_STATE_REGNUM))]
+ "TARGET_SME2"
+ "ldr\tzt0, %0"
+)
+
+;; This version is used after calls to private-ZA functions. Since ZT0_REGNUM
+;; represents the current function's state, it isn't clobbered by private-ZA
+;; functions, so we need to make it depend on the ZA reinitialization code.
+(define_insn "aarch64_restore_zt0"
+ [(set (reg:V8DI ZT0_REGNUM)
+ (unspec:V8DI
+ [(reg:DI SME_STATE_REGNUM)
+ (match_operand:V8DI 0 "aarch64_sync_memory_operand" "Q")]
+ UNSPEC_RESTORE_ZT0))]
+ "TARGET_SME2"
+ "ldr\tzt0, %0"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- Single-vector stores
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - ST1
+;; - STR
+;; -------------------------------------------------------------------------
+
+(define_c_enum "unspec" [
+ UNSPEC_SME_STR
+])
+
+(define_insn "@aarch64_sme_<optab><mode>"
+ [(set (match_operand:SME_ZA_I 0 "aarch64_sve_ldff1_operand" "+Utf")
+ (unspec:SME_ZA_I
+ [(reg:SME_ZA_I ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_dup 0)
+ (match_operand:DI 1 "const_int_operand")
+ (match_operand:SI 2 "register_operand" "Ucj")
+ (match_operand:<VPRED> 3 "register_operand" "Upl")]
+ SME_ST1))]
+ "TARGET_STREAMING_SME"
+ "st1<Vesize>\t{ za%1<hv>.<Vetype>[%w2, 0] }, %3, %0"
+)
+
+(define_insn "@aarch64_sme_<optab><mode>_plus"
+ [(set (match_operand:SME_ZA_I 0 "aarch64_sve_ldff1_operand" "+Utf")
+ (unspec:SME_ZA_I
+ [(reg:SME_ZA_I ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_dup 0)
+ (match_operand:DI 1 "const_int_operand")
+ (plus:SI (match_operand:SI 2 "register_operand" "Ucj")
+ (match_operand:SI 3 "const_int_operand"))
+ (match_operand:<VPRED> 4 "register_operand" "Upl")]
+ SME_ST1))]
+ "TARGET_STREAMING_SME
+ && UINTVAL (operands[3]) < 128 / <elem_bits>"
+ "st1<Vesize>\t{ za%1<hv>.<Vetype>[%w2, %3] }, %4, %0"
+)
+
+(define_insn "aarch64_sme_str0"
+ [(set (mem:VNx16QI (match_operand 1 "pmode_register_operand" "rk"))
+ (unspec:VNx16QI
+ [(reg:VNx16QI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (mem:VNx16QI (match_dup 1))
+ (match_operand:SI 0 "register_operand" "Ucj")]
+ UNSPEC_SME_STR))]
+ "TARGET_SME"
+ "str\tza[%w0, 0], [%1, #0, mul vl]"
+)
+
+(define_insn "@aarch64_sme_strn<mode>"
+ [(set (mem:VNx16QI
+ (plus:P (match_operand:P 2 "register_operand" "rk")
+ (match_operand:P 3 "aarch64_mov_operand")))
+ (unspec:VNx16QI
+ [(reg:VNx16QI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (mem:VNx16QI (plus:P (match_dup 2) (match_dup 3)))
+ (plus:SI (match_operand:SI 0 "register_operand" "Ucj")
+ (match_operand:SI 1 "const_int_operand"))]
+ UNSPEC_SME_STR))]
+ "TARGET_SME
+ && aarch64_sme_ldr_vnum_offset_p (operands[1], operands[3])"
+ "str\tza[%w0, %1], [%2, #%1, mul vl]"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- Table stores
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - STR
+;; -------------------------------------------------------------------------
+
+(define_insn "aarch64_sme_str_zt0"
+ [(set (match_operand:V8DI 0 "aarch64_sync_memory_operand" "=Q")
+ (reg:V8DI ZT0_REGNUM))
+ (use (reg:DI SME_STATE_REGNUM))]
+ "TARGET_SME2"
+ "str\tzt0, %0"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- Single-vector moves
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - MOVA
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><v_int_container><mode>"
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
+ [(reg:<V_INT_CONTAINER> ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SVE_FULL 1 "register_operand" "0")
+ (match_operand:<VPRED> 2 "register_operand" "Upl")
+ (match_operand:DI 3 "const_int_operand")
+ (match_operand:SI 4 "register_operand" "Ucj")]
+ SME_READ))]
+ "TARGET_STREAMING_SME"
+ "mova\t%0.<Vetype>, %2/m, za%3<hv>.<Vetype>[%w4, 0]"
+)
+
+(define_insn "*aarch64_sme_<optab><v_int_container><mode>_plus"
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
+ [(reg:<V_INT_CONTAINER> ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SVE_FULL 1 "register_operand" "0")
+ (match_operand:<VPRED> 2 "register_operand" "Upl")
+ (match_operand:DI 3 "const_int_operand")
+ (plus:SI (match_operand:SI 4 "register_operand" "Ucj")
+ (match_operand:SI 5 "const_int_operand"))]
+ SME_READ))]
+ "TARGET_STREAMING_SME
+ && UINTVAL (operands[5]) < 128 / <elem_bits>"
+ "mova\t%0.<Vetype>, %2/m, za%3<hv>.<Vetype>[%w4, %5]"
+)
+
+(define_insn "@aarch64_sme_<optab><VNx1TI_ONLY:mode><SVE_FULL:mode>"
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
+ [(reg:VNx1TI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SVE_FULL 1 "register_operand" "0")
+ (match_operand:VNx2BI 2 "register_operand" "Upl")
+ (match_operand:DI 3 "const_int_operand")
+ (match_operand:SI 4 "register_operand" "Ucj")]
+ SME_READ))]
+ "TARGET_STREAMING_SME"
+ "mova\t%0.q, %2/m, za%3<hv>.q[%w4, 0]"
+)
+
+(define_insn "@aarch64_sme_<optab><v_int_container><mode>"
+ [(set (reg:<V_INT_CONTAINER> ZA_REGNUM)
+ (unspec:<V_INT_CONTAINER>
+ [(reg:SVE_FULL ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (match_operand:SI 1 "register_operand" "Ucj")
+ (match_operand:<VPRED> 2 "register_operand" "Upl")
+ (match_operand:SVE_FULL 3 "register_operand" "w")]
+ SME_WRITE))]
+ "TARGET_STREAMING_SME"
+ "mova\tza%0<hv>.<Vetype>[%w1, 0], %2/m, %3.<Vetype>"
+)
+
+(define_insn "*aarch64_sme_<optab><v_int_container><mode>_plus"
+ [(set (reg:<V_INT_CONTAINER> ZA_REGNUM)
+ (unspec:<V_INT_CONTAINER>
+ [(reg:SVE_FULL ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (plus:SI (match_operand:SI 1 "register_operand" "Ucj")
+ (match_operand:SI 2 "const_int_operand"))
+ (match_operand:<VPRED> 3 "register_operand" "Upl")
+ (match_operand:SVE_FULL 4 "register_operand" "w")]
+ SME_WRITE))]
+ "TARGET_STREAMING_SME
+ && UINTVAL (operands[2]) < 128 / <elem_bits>"
+ "mova\tza%0<hv>.<Vetype>[%w1, %2], %3/m, %4.<Vetype>"
+)
+
+(define_insn "@aarch64_sme_<optab><VNx1TI_ONLY:mode><SVE_FULL:mode>"
+ [(set (reg:VNx1TI_ONLY ZA_REGNUM)
+ (unspec:VNx1TI_ONLY
+ [(reg:VNx1TI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (match_operand:SI 1 "register_operand" "Ucj")
+ (match_operand:VNx2BI 2 "register_operand" "Upl")
+ (match_operand:SVE_FULL 3 "register_operand" "w")]
+ SME_WRITE))]
+ "TARGET_STREAMING_SME"
+ "mova\tza%0<hv>.q[%w1, 0], %2/m, %3.q"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- Multi-vector moves
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - MOVA
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><mode><mode>"
+ [(set (match_operand:SVE_FULLx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_FULLx24
+ [(reg:SVE_FULLx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 1 "const_int_operand")
+ (match_operand:SI 2 "register_operand" "Ucj")]
+ SME_READ))]
+ "TARGET_STREAMING_SME2"
+ {
+ operands[3] = GEN_INT (<vector_count> - 1);
+ return "mova\t%0, za%1<hv>.<Vetype>[%w2, 0:%3]";
+ }
+)
+
+(define_insn "*aarch64_sme_<optab><mode><mode>_plus"
+ [(set (match_operand:SVE_FULLx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_FULLx24
+ [(reg:SVE_FULLx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 1 "const_int_operand")
+ (plus:SI
+ (match_operand:SI 2 "register_operand" "Ucj")
+ (match_operand:SI 3 "const_int_operand"))]
+ SME_READ))]
+ "TARGET_STREAMING_SME2
+ && UINTVAL (operands[3]) % <vector_count> == 0
+ && UINTVAL (operands[3]) < 128 / <elem_bits>"
+ {
+ operands[4] = GEN_INT (INTVAL (operands[3]) + <vector_count> - 1);
+ return "mova\t%0, za%1<hv>.<Vetype>[%w2, %3:%4]";
+ }
+)
+
+(define_insn "@aarch64_sme_read<mode>"
+ [(set (match_operand:SVE_DIx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_DIx24
+ [(reg:SVE_DIx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 1 "register_operand" "Uci")]
+ UNSPEC_SME_READ))]
+ "TARGET_STREAMING_SME2"
+ "mova\t%0, za.d[%w1, 0, vgx<vector_count>]"
+)
+
+(define_insn "*aarch64_sme_read<mode>_plus"
+ [(set (match_operand:SVE_DIx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_DIx24
+ [(reg:SVE_DIx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 1 "register_operand" "Uci")
+ (match_operand:SI 2 "const_0_to_7_operand"))]
+ UNSPEC_SME_READ))]
+ "TARGET_STREAMING_SME2"
+ "mova\t%0, za.d[%w1, %2, vgx<vector_count>]"
+)
+
+(define_insn "@aarch64_sme_<optab><mode><mode>"
+ [(set (reg:SVE_FULLx24 ZA_REGNUM)
+ (unspec:SVE_FULLx24
+ [(reg:SVE_FULLx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (match_operand:SI 1 "register_operand" "Ucj")
+ (match_operand:SVE_FULLx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SME_WRITE))]
+ "TARGET_STREAMING_SME2"
+ {
+ operands[3] = GEN_INT (<vector_count> - 1);
+ return "mova\tza%0<hv>.<Vetype>[%w1, 0:%3], %2";
+ }
+)
+
+(define_insn "*aarch64_sme_<optab><mode><mode>_plus"
+ [(set (reg:SVE_FULLx24 ZA_REGNUM)
+ (unspec:SVE_FULLx24
+ [(reg:SVE_FULLx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "Ucj")
+ (match_operand:SI 2 "const_int_operand"))
+ (match_operand:SVE_FULLx24 3 "aligned_register_operand" "Uw<vector_count>")]
+ SME_WRITE))]
+ "TARGET_STREAMING_SME2
+ && UINTVAL (operands[2]) % <vector_count> == 0
+ && UINTVAL (operands[2]) < 128 / <elem_bits>"
+ {
+ operands[4] = GEN_INT (INTVAL (operands[2]) + <vector_count> - 1);
+ return "mova\tza%0<hv>.<Vetype>[%w1, %2:%4], %3";
+ }
+)
+
+(define_insn "@aarch64_sme_write<mode>"
+ [(set (reg:SVE_DIx24 ZA_REGNUM)
+ (unspec:SVE_DIx24
+ [(reg:SVE_DIx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SVE_DIx24 1 "aligned_register_operand" "Uw<vector_count>")]
+ UNSPEC_SME_READ))]
+ "TARGET_STREAMING_SME2"
+ "mova\tza.d[%w0, 0, vgx<vector_count>], %1"
+)
+
+(define_insn "*aarch64_sme_write<mode>_plus"
+ [(set (reg:SVE_DIx24 ZA_REGNUM)
+ (unspec:SVE_DIx24
+ [(reg:SVE_DIx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SVE_DIx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ UNSPEC_SME_READ))]
+ "TARGET_STREAMING_SME2"
+ "mova\tza.d[%w0, %1, vgx<vector_count>], %2"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- Zeroing
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - ZERO
+;; -------------------------------------------------------------------------
+
+(define_c_enum "unspec" [UNSPEC_SME_ZERO])
+
+(define_insn "aarch64_sme_zero_za"
+ [(set (reg:VNx16QI ZA_REGNUM)
+ (unspec:VNx16QI [(reg:VNx16QI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")]
+ UNSPEC_SME_ZERO))]
+ "TARGET_SME"
+ {
+ return aarch64_output_sme_zero_za (operands[0]);
+ }
+)
+
+(define_insn "aarch64_sme_zero_zt0"
+ [(set (reg:V8DI ZT0_REGNUM)
+ (const_int 0))
+ (use (reg:DI SME_STATE_REGNUM))]
+ "TARGET_SME2"
+ "zero\t{ zt0 }"
+)
+
+;; =========================================================================
+;; == Binary arithmetic
+;; =========================================================================
+
+;; -------------------------------------------------------------------------
+;; ---- Binary arithmetic on ZA tile
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - ADDHA
+;; - ADDVA
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><mode>"
+ [(set (reg:SME_ZA_SDI ZA_REGNUM)
+ (unspec:SME_ZA_SDI
+ [(reg:SME_ZA_SDI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (match_operand:<VPRED> 1 "register_operand" "Upl")
+ (match_operand:<VPRED> 2 "register_operand" "Upl")
+ (match_operand:SME_ZA_SDI 3 "register_operand" "w")]
+ SME_BINARY_SDI))]
+ "TARGET_STREAMING_SME"
+ "<optab>\tza%0.<Vetype>, %1/m, %2/m, %3.<Vetype>"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- Binary arithmetic on ZA slice
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - ADD
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><mode>"
+ [(set (reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (unspec:SME_ZA_SDIx24
+ [(reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_SDIx24 1 "aligned_register_operand" "Uw<vector_count>")]
+ SME_BINARY_SLICE_SDI))]
+ "TARGET_STREAMING_SME2"
+ "<optab>\tza.<Vetype>[%w0, 0, vgx<vector_count>], %1"
+)
+
+(define_insn "*aarch64_sme_<optab><mode>_plus"
+ [(set (reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (unspec:SME_ZA_SDIx24
+ [(reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_SDIx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SME_BINARY_SLICE_SDI))]
+ "TARGET_STREAMING_SME2"
+ "<optab>\tza.<Vetype>[%w0, %1, vgx<vector_count>], %2"
+)
+
+(define_insn "@aarch64_sme_<optab><mode>"
+ [(set (reg:SME_ZA_SDFx24 ZA_REGNUM)
+ (unspec:SME_ZA_SDFx24
+ [(reg:SME_ZA_SDFx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_SDFx24 1 "aligned_register_operand" "Uw<vector_count>")]
+ SME_BINARY_SLICE_SDF))]
+ "TARGET_STREAMING_SME2"
+ "<optab>\tza.<Vetype>[%w0, 0, vgx<vector_count>], %1"
+)
+
+(define_insn "*aarch64_sme_<optab><mode>_plus"
+ [(set (reg:SME_ZA_SDFx24 ZA_REGNUM)
+ (unspec:SME_ZA_SDFx24
+ [(reg:SME_ZA_SDFx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_SDFx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SME_BINARY_SLICE_SDF))]
+ "TARGET_STREAMING_SME2"
+ "<optab>\tza.<Vetype>[%w0, %1, vgx<vector_count>], %2"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- Binary arithmetic, writing to ZA slice
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - ADD
+;; - SUB
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><mode>"
+ [(set (reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (unspec:SME_ZA_SDIx24
+ [(reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_SDIx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_SDIx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SME_BINARY_WRITE_SLICE_SDI))]
+ "TARGET_STREAMING_SME2"
+ "<sme_int_op>\tza.<Vetype>[%w0, 0, vgx<vector_count>], %1, %2"
+)
+
+(define_insn "*aarch64_sme_<optab><mode>_plus"
+ [(set (reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (unspec:SME_ZA_SDIx24
+ [(reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_SDIx24 2 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_SDIx24 3 "aligned_register_operand" "Uw<vector_count>")]
+ SME_BINARY_WRITE_SLICE_SDI))]
+ "TARGET_STREAMING_SME2"
+ "<sme_int_op>\tza.<Vetype>[%w0, %1, vgx<vector_count>], %2, %3"
+)
+
+(define_insn "@aarch64_sme_single_<optab><mode>"
+ [(set (reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (unspec:SME_ZA_SDIx24
+ [(reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_SDIx24 1 "register_operand" "w")
+ (vec_duplicate:SME_ZA_SDIx24
+ (match_operand:<VSINGLE> 2 "register_operand" "x"))]
+ SME_BINARY_WRITE_SLICE_SDI))]
+ "TARGET_STREAMING_SME2"
+ "<sme_int_op>\tza.<Vetype>[%w0, 0, vgx<vector_count>], %1, %2.<Vetype>"
+)
+
+(define_insn "*aarch64_sme_single_<optab><mode>_plus"
+ [(set (reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (unspec:SME_ZA_SDIx24
+ [(reg:SME_ZA_SDIx24 ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_SDIx24 2 "register_operand" "w")
+ (vec_duplicate:SME_ZA_SDIx24
+ (match_operand:<VSINGLE> 3 "register_operand" "x"))]
+ SME_BINARY_WRITE_SLICE_SDI))]
+ "TARGET_STREAMING_SME2"
+ "<sme_int_op>\tza.<Vetype>[%w0, %1, vgx<vector_count>], %2, %3.<Vetype>"
+)
+
+;; =========================================================================
+;; == Ternary arithmetic
+;; =========================================================================
+
+;; -------------------------------------------------------------------------
+;; ---- [INT] Dot product
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - SDOT
+;; - SUDOT
+;; - UDOT
+;; - USDOT
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>"
+ [(set (reg:SME_ZA_SDI ZA_REGNUM)
+ (unspec:SME_ZA_SDI
+ [(reg:SME_ZA_SDI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_BHIx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_BHIx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SME_INT_DOTPROD))]
+ "TARGET_STREAMING_SME2
+ && (<SME_ZA_SDI:elem_bits> == 32 || <SME_ZA_BHIx24:elem_bits> == 16)
+ && (<SME_ZA_BHIx24:elem_bits> == 8 || <has_16bit_form>)"
+ "<optab>\tza.<SME_ZA_SDI:Vetype>[%w0, 0, vgx<vector_count>], %1, %2"
+)
+
+(define_insn "*aarch64_sme_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>_plus"
+ [(set (reg:SME_ZA_SDI ZA_REGNUM)
+ (unspec:SME_ZA_SDI
+ [(reg:SME_ZA_SDI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_BHIx24 2 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_BHIx24 3 "aligned_register_operand" "Uw<vector_count>")]
+ SME_INT_DOTPROD))]
+ "TARGET_STREAMING_SME2
+ && (<SME_ZA_SDI:elem_bits> == 32 || <SME_ZA_BHIx24:elem_bits> == 16)
+ && (<SME_ZA_BHIx24:elem_bits> == 8 || <has_16bit_form>)"
+ "<optab>\tza.<SME_ZA_SDI:Vetype>[%w0, %1, vgx<vector_count>], %2, %3"
+)
+
+(define_insn "@aarch64_sme_single_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>"
+ [(set (reg:SME_ZA_SDI ZA_REGNUM)
+ (unspec:SME_ZA_SDI
+ [(reg:SME_ZA_SDI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_BHIx24 1 "register_operand" "w")
+ (vec_duplicate:SME_ZA_BHIx24
+ (match_operand:<VSINGLE> 2 "register_operand" "x"))]
+ SME_INT_DOTPROD))]
+ "TARGET_STREAMING_SME2
+ && (<SME_ZA_SDI:elem_bits> == 32 || <SME_ZA_BHIx24:elem_bits> == 16)
+ && (<SME_ZA_BHIx24:elem_bits> == 8 || <has_16bit_form>)"
+ "<optab>\tza.<SME_ZA_SDI:Vetype>[%w0, 0, vgx<vector_count>], %1, %2.<SME_ZA_BHIx24:Vetype>"
+)
+
+(define_insn "*aarch64_sme_single_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>_plus"
+ [(set (reg:SME_ZA_SDI ZA_REGNUM)
+ (unspec:SME_ZA_SDI
+ [(reg:SME_ZA_SDI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_BHIx24 2 "register_operand" "w")
+ (vec_duplicate:SME_ZA_BHIx24
+ (match_operand:<VSINGLE> 3 "register_operand" "x"))]
+ SME_INT_DOTPROD))]
+ "TARGET_STREAMING_SME2
+ && (<SME_ZA_SDI:elem_bits> == 32 || <SME_ZA_BHIx24:elem_bits> == 16)
+ && (<SME_ZA_BHIx24:elem_bits> == 8 || <has_16bit_form>)"
+ "<optab>\tza.<SME_ZA_SDI:Vetype>[%w0, %1, vgx<vector_count>], %2, %3.<SME_ZA_BHIx24:Vetype>"
+)
+
+;; SUDOT is USDOT with the operands swapped.
+(define_insn "@aarch64_sme_single_sudot<VNx4SI_ONLY:mode><SME_ZA_BIx24:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (vec_duplicate:SME_ZA_BIx24
+ (match_operand:<VSINGLE> 2 "register_operand" "x"))
+ (match_operand:SME_ZA_BIx24 1 "register_operand" "w")]
+ UNSPEC_SME_USDOT))]
+ "TARGET_STREAMING_SME2"
+ "sudot\tza.s[%w0, 0, vgx<vector_count>], %1, %2.b"
+)
+
+(define_insn "*aarch64_sme_single_sudot<VNx4SI_ONLY:mode><SME_ZA_BIx24:mode>_plus"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (vec_duplicate:SME_ZA_BIx24
+ (match_operand:<VSINGLE> 3 "register_operand" "x"))
+ (match_operand:SME_ZA_BIx24 2 "register_operand" "w")]
+ UNSPEC_SME_USDOT))]
+ "TARGET_STREAMING_SME2"
+ "sudot\tza.s[%w0, %1, vgx<vector_count>], %2, %3.b"
+)
+
+(define_insn "@aarch64_sme_lane_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>"
+ [(set (reg:SME_ZA_SDI ZA_REGNUM)
+ (unspec:SME_ZA_SDI
+ [(reg:SME_ZA_SDI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_BHIx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (unspec:SME_ZA_BHIx24
+ [(match_operand:<VSINGLE> 2 "register_operand" "x")
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_INT_DOTPROD_LANE))]
+ "TARGET_STREAMING_SME2
+ && (<SME_ZA_SDI:elem_bits> == 32 || <SME_ZA_BHIx24:elem_bits> == 16)
+ && (<SME_ZA_BHIx24:elem_bits> == 8 || <has_16bit_form>)"
+ "<optab>\tza.<SME_ZA_SDI:Vetype>[%w0, 0, vgx<vector_count>], %1, %2.<SME_ZA_BHIx24:Vetype>[%3]"
+)
+
+(define_insn "*aarch64_sme_lane_<optab><SME_ZA_SDI:mode><SME_ZA_BHIx24:mode>_plus"
+ [(set (reg:SME_ZA_SDI ZA_REGNUM)
+ (unspec:SME_ZA_SDI
+ [(reg:SME_ZA_SDI ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_BHIx24 2 "aligned_register_operand" "Uw<vector_count>")
+ (unspec:SME_ZA_BHIx24
+ [(match_operand:<VSINGLE> 3 "register_operand" "x")
+ (match_operand:SI 4 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_INT_DOTPROD_LANE))]
+ "TARGET_STREAMING_SME2
+ && (<SME_ZA_SDI:elem_bits> == 32 || <SME_ZA_BHIx24:elem_bits> == 16)
+ && (<SME_ZA_BHIx24:elem_bits> == 8 || <has_16bit_form>)"
+ "<optab>\tza.<SME_ZA_SDI:Vetype>[%w0, %1, vgx<vector_count>], %2, %3.<SME_ZA_BHIx24:Vetype>[%4]"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [INT] Ternary widening arithmetic on ZA slice
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - SMLA
+;; - SMLS
+;; - UMLA
+;; - UMLS
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><VNx4SI_ONLY:mode><SVE_FULL_BHI:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SVE_FULL_BHI 1 "register_operand" "w")
+ (match_operand:SVE_FULL_BHI 2 "register_operand" "x")]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ "<optab><za32_long>\tza.s[%w0, 0:<za32_last_offset>], %1.<SVE_FULL_BHI:Vetype>, %2.<SVE_FULL_BHI:Vetype>"
+)
+
+(define_insn "*aarch64_sme_<optab><VNx4SI_ONLY:mode><SVE_FULL_BHI:mode>_plus"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za32_offset_range>_operand"))
+ (match_operand:SVE_FULL_BHI 2 "register_operand" "w")
+ (match_operand:SVE_FULL_BHI 3 "register_operand" "x")]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ {
+ operands[4] = GEN_INT (INTVAL (operands[1]) + <za32_last_offset>);
+ return "<optab><za32_long>\tza.s[%w0, %1:%4], %2.<SVE_FULL_BHI:Vetype>, %3.<SVE_FULL_BHI:Vetype>";
+ }
+)
+
+(define_insn "@aarch64_sme_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx24:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_BHIx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_BHIx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ "<optab><za32_long>\tza.s[%w0, 0:<za32_last_offset>, vgx<vector_count>], %1, %2"
+)
+
+(define_insn "*aarch64_sme_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx24:mode>_plus"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za32_offset_range>_operand"))
+ (match_operand:SME_ZA_BHIx24 2 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_BHIx24 3 "aligned_register_operand" "Uw<vector_count>")]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ {
+ operands[4] = GEN_INT (INTVAL (operands[1]) + <za32_last_offset>);
+ return "<optab><za32_long>\tza.s[%w0, %1:%4, vgx<vector_count>], %2, %3";
+ }
+)
+
+(define_insn "@aarch64_sme_single_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx24:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_BHIx24 1 "register_operand" "w")
+ (vec_duplicate:SME_ZA_BHIx24
+ (match_operand:<SME_ZA_BHIx24:VSINGLE> 2 "register_operand" "x"))]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ "<optab><za32_long>\tza.s[%w0, 0:<za32_last_offset>, vgx<vector_count>], %1, %2.<SME_ZA_BHIx24:Vetype>"
+)
+
+(define_insn "*aarch64_sme_single_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx24:mode>_plus"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za32_offset_range>_operand"))
+ (match_operand:SME_ZA_BHIx24 2 "register_operand" "w")
+ (vec_duplicate:SME_ZA_BHIx24
+ (match_operand:<SME_ZA_BHIx24:VSINGLE> 3 "register_operand" "x"))]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ {
+ operands[4] = GEN_INT (INTVAL (operands[1]) + <za32_last_offset>);
+ return "<optab><za32_long>\tza.s[%w0, %1:%4, vgx<vector_count>], %2, %3.<SME_ZA_BHIx24:Vetype>";
+ }
+)
+
+(define_insn "@aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx124:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_BHIx124 1 "<aligned_operand>" "<aligned_fpr>")
+ (unspec:SME_ZA_BHIx124
+ [(match_operand:<VSINGLE> 2 "register_operand" "x")
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ "<optab><za32_long>\tza.s[%w0, 0:<za32_last_offset><vg_modifier>], %1<z_suffix>, %2.<SME_ZA_BHIx124:Vetype>[%3]"
+)
+
+(define_insn "*aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_BHIx124:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za32_offset_range>_operand"))
+ (match_operand:SME_ZA_BHIx124 2 "<aligned_operand>" "<aligned_fpr>")
+ (unspec:SME_ZA_BHIx124
+ [(match_operand:<VSINGLE> 3 "register_operand" "x")
+ (match_operand:SI 4 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ {
+ operands[5] = GEN_INT (INTVAL (operands[1]) + <za32_last_offset>);
+ return "<optab><za32_long>\tza.s[%w0, %1:%5<vg_modifier>], %2<z_suffix>, %3.<SME_ZA_BHIx124:Vetype>[%4]";
+ }
+)
+
+(define_insn "@aarch64_sme_<optab><VNx2DI_ONLY:mode><VNx8HI_ONLY:mode>"
+ [(set (reg:VNx2DI_ONLY ZA_REGNUM)
+ (unspec:VNx2DI_ONLY
+ [(reg:VNx2DI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:VNx8HI_ONLY 1 "register_operand" "w")
+ (match_operand:VNx8HI_ONLY 2 "register_operand" "x")]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_SME2 && TARGET_SME_I16I64 && TARGET_STREAMING_SME"
+ "<optab>ll\tza.d[%w0, 0:3], %1.h, %2.h"
+)
+
+(define_insn "*aarch64_sme_<optab><VNx2DI_ONLY:mode><VNx8HI_ONLY:mode>_plus"
+ [(set (reg:VNx2DI_ONLY ZA_REGNUM)
+ (unspec:VNx2DI_ONLY
+ [(reg:VNx2DI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za64_offset_range>_operand"))
+ (match_operand:VNx8HI_ONLY 2 "register_operand" "w")
+ (match_operand:VNx8HI_ONLY 3 "register_operand" "x")]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_SME2 && TARGET_SME_I16I64 && TARGET_STREAMING_SME"
+ {
+ operands[4] = GEN_INT (INTVAL (operands[1]) + 3);
+ return "<optab>ll\tza.d[%w0, %1:%4], %2.h, %3.h";
+ }
+)
+
+(define_insn "@aarch64_sme_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx24:mode>"
+ [(set (reg:VNx2DI_ONLY ZA_REGNUM)
+ (unspec:VNx2DI_ONLY
+ [(reg:VNx2DI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_HIx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_HIx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_SME2 && TARGET_SME_I16I64 && TARGET_STREAMING_SME"
+ "<optab>ll\tza.d[%w0, 0:3, vgx<vector_count>], %1, %2"
+)
+
+(define_insn "*aarch64_sme_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx24:mode>_plus"
+ [(set (reg:VNx2DI_ONLY ZA_REGNUM)
+ (unspec:VNx2DI_ONLY
+ [(reg:VNx2DI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za64_offset_range>_operand"))
+ (match_operand:SME_ZA_HIx24 2 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_HIx24 3 "aligned_register_operand" "Uw<vector_count>")]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_SME2 && TARGET_SME_I16I64 && TARGET_STREAMING_SME"
+ {
+ operands[4] = GEN_INT (INTVAL (operands[1]) + 3);
+ return "<optab>ll\tza.d[%w0, %1:%4, vgx<vector_count>], %2, %3";
+ }
+)
+
+(define_insn "@aarch64_sme_single_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx24:mode>"
+ [(set (reg:VNx2DI_ONLY ZA_REGNUM)
+ (unspec:VNx2DI_ONLY
+ [(reg:VNx2DI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_HIx24 1 "register_operand" "w")
+ (vec_duplicate:SME_ZA_HIx24
+ (match_operand:<SME_ZA_HIx24:VSINGLE> 2 "register_operand" "x"))]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_SME2 && TARGET_SME_I16I64 && TARGET_STREAMING_SME"
+ "<optab>ll\tza.d[%w0, 0:3, vgx<vector_count>], %1, %2.h"
+)
+
+(define_insn "*aarch64_sme_single_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx24:mode>_plus"
+ [(set (reg:VNx2DI_ONLY ZA_REGNUM)
+ (unspec:VNx2DI_ONLY
+ [(reg:VNx2DI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za64_offset_range>_operand"))
+ (match_operand:SME_ZA_HIx24 2 "register_operand" "w")
+ (vec_duplicate:SME_ZA_HIx24
+ (match_operand:<SME_ZA_HIx24:VSINGLE> 3 "register_operand" "x"))]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_SME2 && TARGET_SME_I16I64 && TARGET_STREAMING_SME"
+ {
+ operands[4] = GEN_INT (INTVAL (operands[1]) + 3);
+ return "<optab>ll\tza.d[%w0, %1:%4, vgx<vector_count>], %2, %3.h";
+ }
+)
+
+(define_insn "@aarch64_sme_lane_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx124:mode>"
+ [(set (reg:VNx2DI_ONLY ZA_REGNUM)
+ (unspec:VNx2DI_ONLY
+ [(reg:VNx2DI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_HIx124 1 "<aligned_operand>" "<aligned_fpr>")
+ (unspec:SME_ZA_HIx124
+ [(match_operand:<VSINGLE> 2 "register_operand" "x")
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_SME2 && TARGET_SME_I16I64 && TARGET_STREAMING_SME"
+ "<optab>ll\tza.d[%w0, 0:3<vg_modifier>], %1<z_suffix>, %2.h[%3]"
+)
+
+(define_insn "*aarch64_sme_lane_<optab><VNx2DI_ONLY:mode><SME_ZA_HIx124:mode>"
+ [(set (reg:VNx2DI_ONLY ZA_REGNUM)
+ (unspec:VNx2DI_ONLY
+ [(reg:VNx2DI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za64_offset_range>_operand"))
+ (match_operand:SME_ZA_HIx124 2 "<aligned_operand>" "<aligned_fpr>")
+ (unspec:SME_ZA_HIx124
+ [(match_operand:<VSINGLE> 3 "register_operand" "x")
+ (match_operand:SI 4 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_INT_TERNARY_SLICE))]
+ "TARGET_SME2 && TARGET_SME_I16I64 && TARGET_STREAMING_SME"
+ {
+ operands[5] = GEN_INT (INTVAL (operands[1]) + 3);
+ return "<optab>ll\tza.d[%w0, %1:%5<vg_modifier>], %2<z_suffix>, %3.h[%4]";
+ }
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [INT] Sum of outer products
+;; -------------------------------------------------------------------------
+;; - BMOPA
+;; - BMOPS
+;; - SMOPA
+;; - SMOPS
+;; - SUMOPA
+;; - SUMOPS
+;; - UMOPA
+;; - UMOPS
+;; - USMOPA
+;; - USMOPS
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><VNx4SI_ONLY:mode><VNx16QI_ONLY:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (match_operand:<VNx4SI_ONLY:VPRED> 1 "register_operand" "Upl")
+ (match_operand:<VNx4SI_ONLY:VPRED> 2 "register_operand" "Upl")
+ (match_operand:VNx16QI_ONLY 3 "register_operand" "w")
+ (match_operand:VNx16QI_ONLY 4 "register_operand" "w")]
+ SME_INT_MOP))]
+ "TARGET_STREAMING_SME"
+ "<optab>\tza%0.s, %1/m, %2/m, %3.b, %4.b"
+)
+
+(define_insn "@aarch64_sme_<optab><VNx2DI_ONLY:mode><VNx8HI_ONLY:mode>"
+ [(set (reg:VNx2DI_ONLY ZA_REGNUM)
+ (unspec:VNx2DI_ONLY
+ [(reg:VNx2DI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (match_operand:<VNx2DI_ONLY:VPRED> 1 "register_operand" "Upl")
+ (match_operand:<VNx2DI_ONLY:VPRED> 2 "register_operand" "Upl")
+ (match_operand:VNx8HI_ONLY 3 "register_operand" "w")
+ (match_operand:VNx8HI_ONLY 4 "register_operand" "w")]
+ SME_INT_MOP))]
+ "TARGET_STREAMING_SME && TARGET_SME_I16I64"
+ "<optab>\tza%0.d, %1/m, %2/m, %3.h, %4.h"
+)
+
+(define_insn "@aarch64_sme_<optab><VNx4SI_ONLY:mode><VNx8HI_ONLY:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (match_operand:<VNx4SI_ONLY:VPRED> 1 "register_operand" "Upl")
+ (match_operand:<VNx4SI_ONLY:VPRED> 2 "register_operand" "Upl")
+ (match_operand:VNx8HI_ONLY 3 "register_operand" "w")
+ (match_operand:VNx8HI_ONLY 4 "register_operand" "w")]
+ SME2_INT_MOP))]
+ "TARGET_STREAMING_SME2"
+ "<optab>\tza%0.s, %1/m, %2/m, %3.h, %4.h"
+)
+
+(define_insn "@aarch64_sme_<optab><VNx4SI_ONLY:mode><VNx4SI_ONLY:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (match_operand:<VNx4SI_ONLY:VPRED> 1 "register_operand" "Upl")
+ (match_operand:<VNx4SI_ONLY:VPRED> 2 "register_operand" "Upl")
+ (match_operand:VNx4SI_ONLY 3 "register_operand" "w")
+ (match_operand:VNx4SI_ONLY 4 "register_operand" "w")]
+ SME2_BMOP))]
+ "TARGET_STREAMING_SME2"
+ "<optab>\tza%0.s, %1/m, %2/m, %3.s, %4.s"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [FP] Dot product
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - BFDOT
+;; - FDOT
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_HFx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_HFx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SME_FP_DOTPROD))]
+ "TARGET_STREAMING_SME2"
+ "<b><optab>\tza.s[%w0, 0, vgx<vector_count>], %1, %2"
+)
+
+(define_insn "*aarch64_sme_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>_plus"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_HFx24 2 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_HFx24 3 "aligned_register_operand" "Uw<vector_count>")]
+ SME_FP_DOTPROD))]
+ "TARGET_STREAMING_SME2"
+ "<b><optab>\tza.s[%w0, %1, vgx<vector_count>], %2, %3"
+)
+
+(define_insn "@aarch64_sme_single_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_HFx24 1 "register_operand" "w")
+ (vec_duplicate:SME_ZA_HFx24
+ (match_operand:<VSINGLE> 2 "register_operand" "x"))]
+ SME_FP_DOTPROD))]
+ "TARGET_STREAMING_SME2"
+ "<b><optab>\tza.s[%w0, 0, vgx<vector_count>], %1, %2.h"
+)
+
+(define_insn "*aarch64_sme_single_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>_plus"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_HFx24 2 "register_operand" "w")
+ (vec_duplicate:SME_ZA_HFx24
+ (match_operand:<VSINGLE> 3 "register_operand" "x"))]
+ SME_FP_DOTPROD))]
+ "TARGET_STREAMING_SME2"
+ "<b><optab>\tza.s[%w0, %1, vgx<vector_count>], %2, %3.h"
+)
+
+(define_insn "@aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_HFx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (unspec:SME_ZA_HFx24
+ [(match_operand:<VSINGLE> 2 "register_operand" "x")
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_FP_DOTPROD_LANE))]
+ "TARGET_STREAMING_SME2"
+ "<b><optab>\tza.s[%w0, 0, vgx<vector_count>], %1, %2.h[%3]"
+)
+
+(define_insn "*aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>_plus"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_HFx24 2 "aligned_register_operand" "Uw<vector_count>")
+ (unspec:SME_ZA_HFx24
+ [(match_operand:<VSINGLE> 3 "register_operand" "x")
+ (match_operand:SI 4 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_FP_DOTPROD_LANE))]
+ "TARGET_STREAMING_SME2"
+ "<b><optab>\tza.s[%w0, %1, vgx<vector_count>], %2, %3.h[%4]"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [FP] Ternary arithmetic on ZA slice
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - FMLA
+;; - FMLS
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>"
+ [(set (reg:SME_ZA_SDF_I ZA_REGNUM)
+ (unspec:SME_ZA_SDF_I
+ [(reg:SME_ZA_SDF_I ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_SDFx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_SDFx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_SME2
+ && TARGET_STREAMING_SME
+ && <SME_ZA_SDF_I:elem_bits> == <SME_ZA_SDFx24:elem_bits>"
+ "<optab>\tza.<SME_ZA_SDF_I:Vetype>[%w0, 0, vgx<vector_count>], %1, %2"
+)
+
+(define_insn "*aarch64_sme_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>_plus"
+ [(set (reg:SME_ZA_SDF_I ZA_REGNUM)
+ (unspec:SME_ZA_SDF_I
+ [(reg:SME_ZA_SDF_I ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_SDFx24 2 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_SDFx24 3 "aligned_register_operand" "Uw<vector_count>")]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_SME2
+ && TARGET_STREAMING_SME
+ && <SME_ZA_SDF_I:elem_bits> == <SME_ZA_SDFx24:elem_bits>"
+ "<optab>\tza.<SME_ZA_SDF_I:Vetype>[%w0, %1, vgx<vector_count>], %2, %3"
+)
+
+(define_insn "@aarch64_sme_single_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>"
+ [(set (reg:SME_ZA_SDF_I ZA_REGNUM)
+ (unspec:SME_ZA_SDF_I
+ [(reg:SME_ZA_SDF_I ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_SDFx24 1 "register_operand" "w")
+ (vec_duplicate:SME_ZA_SDFx24
+ (match_operand:<VSINGLE> 2 "register_operand" "x"))]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_SME2
+ && TARGET_STREAMING_SME
+ && <SME_ZA_SDF_I:elem_bits> == <SME_ZA_SDFx24:elem_bits>"
+ "<optab>\tza.<SME_ZA_SDF_I:Vetype>[%w0, 0, vgx<vector_count>], %1, %2.<SME_ZA_SDFx24:Vetype>"
+)
+
+(define_insn "*aarch64_sme_single_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>_plus"
+ [(set (reg:SME_ZA_SDF_I ZA_REGNUM)
+ (unspec:SME_ZA_SDF_I
+ [(reg:SME_ZA_SDF_I ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_SDFx24 2 "register_operand" "w")
+ (vec_duplicate:SME_ZA_SDFx24
+ (match_operand:<VSINGLE> 3 "register_operand" "x"))]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_SME2
+ && TARGET_STREAMING_SME
+ && <SME_ZA_SDF_I:elem_bits> == <SME_ZA_SDFx24:elem_bits>"
+ "<optab>\tza.<SME_ZA_SDF_I:Vetype>[%w0, %1, vgx<vector_count>], %2, %3.<SME_ZA_SDFx24:Vetype>"
+)
+
+(define_insn "@aarch64_sme_lane_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>"
+ [(set (reg:SME_ZA_SDF_I ZA_REGNUM)
+ (unspec:SME_ZA_SDF_I
+ [(reg:SME_ZA_SDF_I ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_SDFx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (unspec:SME_ZA_SDFx24
+ [(match_operand:<VSINGLE> 2 "register_operand" "x")
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_SME2
+ && TARGET_STREAMING_SME
+ && <SME_ZA_SDF_I:elem_bits> == <SME_ZA_SDFx24:elem_bits>"
+ "<optab>\tza.<SME_ZA_SDF_I:Vetype>[%w0, 0, vgx<vector_count>], %1, %2.<SME_ZA_SDFx24:Vetype>[%3]"
+)
+
+(define_insn "*aarch64_sme_lane_<optab><SME_ZA_SDF_I:mode><SME_ZA_SDFx24:mode>"
+ [(set (reg:SME_ZA_SDF_I ZA_REGNUM)
+ (unspec:SME_ZA_SDF_I
+ [(reg:SME_ZA_SDF_I ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_0_to_7_operand"))
+ (match_operand:SME_ZA_SDFx24 2 "aligned_register_operand" "Uw<vector_count>")
+ (unspec:SME_ZA_SDFx24
+ [(match_operand:<VSINGLE> 3 "register_operand" "x")
+ (match_operand:SI 4 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_SME2
+ && TARGET_STREAMING_SME
+ && <SME_ZA_SDF_I:elem_bits> == <SME_ZA_SDFx24:elem_bits>"
+ "<optab>\tza.<SME_ZA_SDF_I:Vetype>[%w0, %1, vgx<vector_count>], %2, %3.<SME_ZA_SDFx24:Vetype>[%4]"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [FP] Ternary widening arithmetic on ZA slice
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - BFMLAL
+;; - BFMLSL
+;; - FMLAL
+;; - FMLSL
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><VNx4SI_ONLY:mode><SVE_FULL_HF:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SVE_FULL_HF 1 "register_operand" "w")
+ (match_operand:SVE_FULL_HF 2 "register_operand" "x")]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ "<b><optab>l\tza.s[%w0, 0:1], %1.h, %2.h"
+)
+
+(define_insn "*aarch64_sme_<optab><VNx4SI_ONLY:mode><SVE_FULL_HF:mode>_plus"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za32_offset_range>_operand"))
+ (match_operand:SVE_FULL_HF 2 "register_operand" "w")
+ (match_operand:SVE_FULL_HF 3 "register_operand" "x")]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ {
+ operands[4] = GEN_INT (INTVAL (operands[1]) + 1);
+ return "<b><optab>l\tza.s[%w0, %1:%4], %2.h, %3.h";
+ }
+)
+
+(define_insn "@aarch64_sme_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_HFx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_HFx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ "<b><optab>l\tza.s[%w0, 0:1, vgx<vector_count>], %1, %2"
+)
+
+(define_insn "*aarch64_sme_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>_plus"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za32_offset_range>_operand"))
+ (match_operand:SME_ZA_HFx24 2 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SME_ZA_HFx24 3 "aligned_register_operand" "Uw<vector_count>")]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ {
+ operands[4] = GEN_INT (INTVAL (operands[1]) + 1);
+ return "<b><optab>l\tza.s[%w0, %1:%4, vgx<vector_count>], %2, %3";
+ }
+)
+
+(define_insn "@aarch64_sme_single_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_HFx24 1 "register_operand" "w")
+ (vec_duplicate:SME_ZA_HFx24
+ (match_operand:<SME_ZA_HFx24:VSINGLE> 2 "register_operand" "x"))]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ "<b><optab>l\tza.s[%w0, 0:1, vgx<vector_count>], %1, %2.h"
+)
+
+(define_insn "*aarch64_sme_single_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx24:mode>_plus"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za32_offset_range>_operand"))
+ (match_operand:SME_ZA_HFx24 2 "register_operand" "w")
+ (vec_duplicate:SME_ZA_HFx24
+ (match_operand:<SME_ZA_HFx24:VSINGLE> 3 "register_operand" "x"))]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ {
+ operands[4] = GEN_INT (INTVAL (operands[1]) + 1);
+ return "<b><optab>l\tza.s[%w0, %1:%4, vgx<vector_count>], %2, %3.h";
+ }
+)
+
+(define_insn "@aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx124:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SME_ZA_HFx124 1 "<aligned_operand>" "<aligned_fpr>")
+ (unspec:SME_ZA_HFx124
+ [(match_operand:<VSINGLE> 2 "register_operand" "x")
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ "<b><optab>l\tza.s[%w0, 0:1<vg_modifier>], %1<z_suffix>, %2.h[%3]"
+)
+
+(define_insn "*aarch64_sme_lane_<optab><VNx4SI_ONLY:mode><SME_ZA_HFx124:mode>"
+ [(set (reg:VNx4SI_ONLY ZA_REGNUM)
+ (unspec:VNx4SI_ONLY
+ [(reg:VNx4SI_ONLY ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (plus:SI (match_operand:SI 0 "register_operand" "Uci")
+ (match_operand:SI 1 "const_<za32_offset_range>_operand"))
+ (match_operand:SME_ZA_HFx124 2 "<aligned_operand>" "<aligned_fpr>")
+ (unspec:SME_ZA_HFx124
+ [(match_operand:<VSINGLE> 3 "register_operand" "x")
+ (match_operand:SI 4 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ SME_FP_TERNARY_SLICE))]
+ "TARGET_STREAMING_SME2"
+ {
+ operands[5] = GEN_INT (INTVAL (operands[1]) + 1);
+ return "<b><optab>l\tza.s[%w0, %1:%5<vg_modifier>], %2<z_suffix>, %3.h[%4]";
+ }
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [FP] Sum of outer products
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - BFMOPA
+;; - BFMOPS
+;; - FMOPA
+;; - FMOPS
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sme_<optab><SME_ZA_SDF_I:mode><SME_MOP_HSDF:mode>"
+ [(set (reg:SME_ZA_SDF_I ZA_REGNUM)
+ (unspec:SME_ZA_SDF_I
+ [(reg:SME_ZA_SDF_I ZA_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:DI 0 "const_int_operand")
+ (match_operand:<SME_ZA_SDF_I:VPRED> 1 "register_operand" "Upl")
+ (match_operand:<SME_ZA_SDF_I:VPRED> 2 "register_operand" "Upl")
+ (match_operand:SME_MOP_HSDF 3 "register_operand" "w")
+ (match_operand:SME_MOP_HSDF 4 "register_operand" "w")]
+ SME_FP_MOP))]
+ "TARGET_STREAMING_SME
+ && (<SME_ZA_SDF_I:elem_bits> == 32) == (<SME_MOP_HSDF:elem_bits> <= 32)"
+ "<b><optab>\tza%0.<SME_ZA_SDF_I:Vetype>, %1/m, %2/m, %3.<SME_MOP_HSDF:Vetype>, %4.<SME_MOP_HSDF:Vetype>"
+)
+
+;; =========================================================================
+;; == Table lookup
+;; =========================================================================
+
+;; -------------------------------------------------------------------------
+;; ---- Table lookup
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - LUTI2
+;; - LUTI4
+;; -------------------------------------------------------------------------
+
+(define_c_enum "unspec" [
+ UNSPEC_SME_LUTI
+])
+
+(define_insn "@aarch64_sme_lut<LUTI_BITS><mode>"
+ [(set (match_operand:SVE_FULL_BHS 0 "register_operand" "=w")
+ (unspec:SVE_FULL_BHS
+ [(reg:V8DI ZT0_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:VNx16QI 1 "register_operand" "w")
+ (match_operand:DI 2 "const_int_operand")
+ (const_int LUTI_BITS)]
+ UNSPEC_SME_LUTI))]
+ "TARGET_STREAMING_SME2"
+ "luti<LUTI_BITS>\t%0.<Vetype>, zt0, %1[%2]"
+)
+
+(define_insn "@aarch64_sme_lut<LUTI_BITS><mode>"
+ [(set (match_operand:SVE_BHSx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_BHSx24
+ [(reg:V8DI ZT0_REGNUM)
+ (reg:DI SME_STATE_REGNUM)
+ (match_operand:VNx16QI 1 "register_operand" "w")
+ (match_operand:DI 2 "const_int_operand")
+ (const_int LUTI_BITS)]
+ UNSPEC_SME_LUTI))]
+ "TARGET_STREAMING_SME2
+ && !(<LUTI_BITS> == 4 && <vector_count> == 4 && <elem_bits> == 8)"
+ "luti<LUTI_BITS>\t%0, zt0, %1[%2]"
+)
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
index 9010ecc..6492da0 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
@@ -240,7 +240,7 @@ public:
{
machine_mode mode = GET_MODE_INNER (e.vector_mode (0));
e.args[2] = simplify_unary_operation (NOT, mode, e.args[2], mode);
- return e.map_to_rtx_codes (AND, AND, -1);
+ return e.map_to_rtx_codes (AND, AND, -1, -1);
}
if (e.type_suffix_ids[0] == TYPE_SUFFIX_b)
@@ -573,6 +573,12 @@ public:
rtx
expand (function_expander &e) const override
{
+ if (e.type_suffix (0).tclass == TYPE_count)
+ {
+ unsigned int bits = e.type_suffix (0).element_bits;
+ return e.use_exact_insn (code_for_aarch64_sve_cntp_c (bits));
+ }
+
machine_mode mode = e.vector_mode (0);
e.add_ptrue_hint (0, mode);
return e.use_exact_insn (code_for_aarch64_pred_cntp (mode));
@@ -640,9 +646,24 @@ public:
rtx
expand (function_expander &e) const override
{
+ insn_code icode;
+ if (e.pred == PRED_none)
+ {
+ machine_mode mode0 = e.result_mode ();
+ machine_mode mode1 = GET_MODE (e.args[0]);
+ convert_optab optab;
+ if (e.type_suffix (0).integer_p)
+ optab = e.type_suffix (0).unsigned_p ? ufix_optab : sfix_optab;
+ else if (e.type_suffix (1).integer_p)
+ optab = e.type_suffix (1).unsigned_p ? ufloat_optab : sfloat_optab;
+ else
+ optab = trunc_optab;
+ icode = convert_optab_handler (optab, mode0, mode1);
+ gcc_assert (icode != CODE_FOR_nothing);
+ return e.use_exact_insn (icode);
+ }
machine_mode mode0 = e.vector_mode (0);
machine_mode mode1 = e.vector_mode (1);
- insn_code icode;
/* All this complication comes from the need to select four things
simultaneously:
@@ -706,9 +727,17 @@ public:
/* In the optab, the multiplication operands come before the accumulator
operand. The optab is keyed off the multiplication mode. */
e.rotate_inputs_left (0, 3);
- insn_code icode
- = e.direct_optab_handler_for_sign (sdot_prod_optab, udot_prod_optab,
- 0, GET_MODE (e.args[0]));
+ insn_code icode;
+ if (e.type_suffix_ids[1] == NUM_TYPE_SUFFIXES)
+ icode = e.direct_optab_handler_for_sign (sdot_prod_optab,
+ udot_prod_optab,
+ 0, GET_MODE (e.args[0]));
+ else
+ icode = (e.type_suffix (0).float_p
+ ? CODE_FOR_aarch64_sve_fdotvnx4sfvnx8hf
+ : e.type_suffix (0).unsigned_p
+ ? CODE_FOR_aarch64_sve_udotvnx4sivnx8hi
+ : CODE_FOR_aarch64_sve_sdotvnx4sivnx8hi);
return e.use_unpred_insn (icode);
}
};
@@ -721,12 +750,18 @@ public:
rtx
expand (function_expander &e) const override
{
+ machine_mode mode0 = GET_MODE (e.args[0]);
+ machine_mode mode1 = GET_MODE (e.args[1]);
/* Use the same ordering as the dot_prod_optab, with the
accumulator last. */
e.rotate_inputs_left (0, 4);
int unspec = unspec_for (e);
- machine_mode mode = e.vector_mode (0);
- return e.use_exact_insn (code_for_aarch64_dot_prod_lane (unspec, mode));
+ insn_code icode;
+ if (unspec == UNSPEC_FDOT)
+ icode = CODE_FOR_aarch64_fdot_prod_lanevnx4sfvnx8hf;
+ else
+ icode = code_for_aarch64_dot_prod_lane (unspec, mode0, mode1);
+ return e.use_exact_insn (icode);
}
};
@@ -1013,7 +1048,7 @@ public:
with an extra argument on the end. Take the inactive elements
from this extra argument. */
e.rotate_inputs_left (0, 4);
- return e.map_to_rtx_codes (AND, AND, -1, 3);
+ return e.map_to_rtx_codes (AND, AND, -1, -1, 3);
}
machine_mode wide_mode = e.vector_mode (0);
@@ -1105,19 +1140,6 @@ public:
bool is_lasta () const { return m_unspec == UNSPEC_LASTA; }
bool is_lastb () const { return m_unspec == UNSPEC_LASTB; }
- bool vect_all_same (tree v, int step) const
- {
- int i;
- int nelts = vector_cst_encoded_nelts (v);
- tree first_el = VECTOR_CST_ENCODED_ELT (v, 0);
-
- for (i = 0; i < nelts; i += step)
- if (!operand_equal_p (VECTOR_CST_ENCODED_ELT (v, i), first_el, 0))
- return false;
-
- return true;
- }
-
/* Fold a svlast{a/b} call with constant predicate to a BIT_FIELD_REF.
BIT_FIELD_REF lowers to Advanced SIMD element extract, so we have to
ensure the index of the element being accessed is in the range of a
@@ -1142,7 +1164,7 @@ public:
without a linear search of the predicate vector:
1. LASTA if predicate is all true, return element 0.
2. LASTA if predicate all false, return element 0. */
- if (is_lasta () && vect_all_same (pred, step_1))
+ if (is_lasta () && vector_cst_all_same (pred, step_1))
{
b = build3 (BIT_FIELD_REF, TREE_TYPE (f.lhs), val,
bitsize_int (step * BITS_PER_UNIT), bitsize_int (0));
@@ -1152,7 +1174,7 @@ public:
/* Handle the all-false case for LASTB where SVE VL == 128b -
return the highest numbered element. */
if (is_lastb () && known_eq (BYTES_PER_SVE_VECTOR, 16)
- && vect_all_same (pred, step_1)
+ && vector_cst_all_same (pred, step_1)
&& integer_zerop (VECTOR_CST_ENCODED_ELT (pred, 0)))
{
b = build3 (BIT_FIELD_REF, TREE_TYPE (f.lhs), val,
@@ -1257,6 +1279,9 @@ public:
gimple *
fold (gimple_folder &f) const override
{
+ if (f.vectors_per_tuple () != 1)
+ return nullptr;
+
tree vectype = f.vector_type (0);
/* Get the predicate and base pointer. */
@@ -1275,8 +1300,12 @@ public:
rtx
expand (function_expander &e) const override
{
- insn_code icode = convert_optab_handler (maskload_optab,
- e.vector_mode (0), e.gp_mode (0));
+ insn_code icode;
+ if (e.vectors_per_tuple () == 1)
+ icode = convert_optab_handler (maskload_optab,
+ e.vector_mode (0), e.gp_mode (0));
+ else
+ icode = code_for_aarch64_ld1 (e.tuple_mode (0));
return e.use_contiguous_load_insn (icode);
}
};
@@ -1506,7 +1535,7 @@ public:
rtx
expand (function_expander &e) const override
{
- machine_mode tuple_mode = TYPE_MODE (TREE_TYPE (e.call_expr));
+ machine_mode tuple_mode = e.result_mode ();
insn_code icode = convert_optab_handler (vec_mask_load_lanes_optab,
tuple_mode, e.vector_mode (0));
return e.use_contiguous_load_insn (icode);
@@ -1576,7 +1605,7 @@ public:
rtx
expand (function_expander &e) const override
{
- insn_code icode = code_for_aarch64_ldnt1 (e.vector_mode (0));
+ insn_code icode = code_for_aarch64_ldnt1 (e.tuple_mode (0));
return e.use_contiguous_load_insn (icode);
}
};
@@ -1836,7 +1865,10 @@ public:
gimple *
fold (gimple_folder &f) const override
{
- return f.fold_to_pfalse ();
+ if (f.type_suffix (0).tclass == TYPE_bool)
+ return f.fold_to_pfalse ();
+
+ return nullptr;
}
rtx
@@ -1981,13 +2013,20 @@ public:
gimple *
fold (gimple_folder &f) const override
{
- return f.fold_to_ptrue ();
+ if (f.type_suffix (0).tclass == TYPE_bool)
+ return f.fold_to_ptrue ();
+
+ return nullptr;
}
rtx
expand (function_expander &e) const override
{
- return aarch64_ptrue_all (e.type_suffix (0).element_bytes);
+ if (e.type_suffix (0).tclass == TYPE_bool)
+ return aarch64_ptrue_all (e.type_suffix (0).element_bytes);
+
+ auto bits = e.type_suffix (0).element_bits;
+ return e.use_exact_insn (code_for_aarch64_sve_ptrue_c (bits));
}
};
@@ -2161,10 +2200,14 @@ public:
gimple *
fold (gimple_folder &f) const override
{
+ if (f.vectors_per_tuple () > 1)
+ return NULL;
+
/* Punt to rtl if the effect of the reinterpret on registers does not
conform to GCC's endianness model. */
- if (!targetm.can_change_mode_class (f.vector_mode (0),
- f.vector_mode (1), FP_REGS))
+ if (GET_MODE_CLASS (f.vector_mode (0)) != MODE_VECTOR_BOOL
+ && !targetm.can_change_mode_class (f.vector_mode (0),
+ f.vector_mode (1), FP_REGS))
return NULL;
/* Otherwise svreinterpret corresponds directly to a VIEW_CONVERT_EXPR
@@ -2177,7 +2220,10 @@ public:
rtx
expand (function_expander &e) const override
{
- machine_mode mode = e.vector_mode (0);
+ machine_mode mode = e.tuple_mode (0);
+ /* Handle svbool_t <-> svcount_t. */
+ if (mode == e.tuple_mode (1))
+ return e.args[0];
return e.use_exact_insn (code_for_aarch64_sve_reinterpret (mode));
}
};
@@ -2208,12 +2254,37 @@ public:
}
};
+class svrint_impl : public function_base
+{
+public:
+ CONSTEXPR svrint_impl (optab_tag optab, int cond_unspec)
+ : m_optab (optab), m_cond_unspec (cond_unspec)
+ {}
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ if (e.pred == PRED_none)
+ {
+ auto icode = direct_optab_handler (m_optab, e.tuple_mode (0));
+ return e.use_exact_insn (icode);
+ }
+ return e.map_to_unspecs (-1, -1, m_cond_unspec);
+ }
+
+ optab_tag m_optab;
+ int m_cond_unspec;
+};
+
class svsel_impl : public quiet<function_base>
{
public:
gimple *
fold (gimple_folder &f) const override
{
+ if (f.vectors_per_tuple () > 1)
+ return nullptr;
+
/* svsel corresponds exactly to VEC_COND_EXPR. */
gimple_seq stmts = NULL;
tree pred = f.convert_pred (stmts, f.vector_type (0), 0);
@@ -2228,9 +2299,11 @@ public:
{
/* svsel (cond, truev, falsev) is vcond_mask (truev, falsev, cond). */
e.rotate_inputs_left (0, 3);
- insn_code icode = convert_optab_handler (vcond_mask_optab,
- e.vector_mode (0),
- e.gp_mode (0));
+ insn_code icode = (e.vectors_per_tuple () > 1
+ ? code_for_aarch64_sve_sel (e.tuple_mode (0))
+ : convert_optab_handler (vcond_mask_optab,
+ e.vector_mode (0),
+ e.gp_mode (0)));
return e.use_exact_insn (icode);
}
};
@@ -2317,6 +2390,9 @@ public:
gimple *
fold (gimple_folder &f) const override
{
+ if (f.vectors_per_tuple () != 1)
+ return nullptr;
+
tree vectype = f.vector_type (0);
/* Get the predicate and base pointer. */
@@ -2334,8 +2410,12 @@ public:
rtx
expand (function_expander &e) const override
{
- insn_code icode = convert_optab_handler (maskstore_optab,
- e.vector_mode (0), e.gp_mode (0));
+ insn_code icode;
+ if (e.vectors_per_tuple () == 1)
+ icode = convert_optab_handler (maskstore_optab,
+ e.vector_mode (0), e.gp_mode (0));
+ else
+ icode = code_for_aarch64_st1 (e.tuple_mode (0));
return e.use_contiguous_store_insn (icode);
}
};
@@ -2453,7 +2533,7 @@ public:
rtx
expand (function_expander &e) const override
{
- insn_code icode = code_for_aarch64_stnt1 (e.vector_mode (0));
+ insn_code icode = code_for_aarch64_stnt1 (e.tuple_mode (0));
return e.use_contiguous_store_insn (icode);
}
};
@@ -2470,7 +2550,7 @@ public:
/* Canonicalize subtractions of constants to additions. */
machine_mode mode = e.vector_mode (0);
if (e.try_negating_argument (2, mode))
- return e.map_to_rtx_codes (PLUS, PLUS, UNSPEC_COND_FADD);
+ return e.map_to_rtx_codes (PLUS, PLUS, UNSPEC_COND_FADD, -1);
return rtx_code_function::expand (e);
}
@@ -2681,6 +2761,9 @@ public:
gimple *
fold (gimple_folder &f) const override
{
+ if (f.vectors_per_tuple () > 1)
+ return nullptr;
+
if (f.type_suffix (1).unsigned_p)
return fold_type<poly_uint64> (f);
else
@@ -2818,7 +2901,8 @@ FUNCTION (svcvtnt, CODE_FOR_MODE0 (aarch64_sve_cvtnt),)
FUNCTION (svdiv, rtx_code_function, (DIV, UDIV, UNSPEC_COND_FDIV))
FUNCTION (svdivr, rtx_code_function_rotated, (DIV, UDIV, UNSPEC_COND_FDIV))
FUNCTION (svdot, svdot_impl,)
-FUNCTION (svdot_lane, svdotprod_lane_impl, (UNSPEC_SDOT, UNSPEC_UDOT, -1))
+FUNCTION (svdot_lane, svdotprod_lane_impl, (UNSPEC_SDOT, UNSPEC_UDOT,
+ UNSPEC_FDOT))
FUNCTION (svdup, svdup_impl,)
FUNCTION (svdup_lane, svdup_lane_impl,)
FUNCTION (svdupq, svdupq_impl,)
@@ -2884,12 +2968,16 @@ FUNCTION (svlsl_wide, shift_wide, (ASHIFT, UNSPEC_ASHIFT_WIDE))
FUNCTION (svlsr, rtx_code_function, (LSHIFTRT, LSHIFTRT))
FUNCTION (svlsr_wide, shift_wide, (LSHIFTRT, UNSPEC_LSHIFTRT_WIDE))
FUNCTION (svmad, svmad_impl,)
-FUNCTION (svmax, rtx_code_function, (SMAX, UMAX, UNSPEC_COND_FMAX))
-FUNCTION (svmaxnm, unspec_based_function, (-1, -1, UNSPEC_COND_FMAXNM))
+FUNCTION (svmax, rtx_code_function, (SMAX, UMAX, UNSPEC_COND_FMAX,
+ UNSPEC_FMAX))
+FUNCTION (svmaxnm, cond_or_uncond_unspec_function, (UNSPEC_COND_FMAXNM,
+ UNSPEC_FMAXNM))
FUNCTION (svmaxnmv, reduction, (UNSPEC_FMAXNMV))
FUNCTION (svmaxv, reduction, (UNSPEC_SMAXV, UNSPEC_UMAXV, UNSPEC_FMAXV))
-FUNCTION (svmin, rtx_code_function, (SMIN, UMIN, UNSPEC_COND_FMIN))
-FUNCTION (svminnm, unspec_based_function, (-1, -1, UNSPEC_COND_FMINNM))
+FUNCTION (svmin, rtx_code_function, (SMIN, UMIN, UNSPEC_COND_FMIN,
+ UNSPEC_FMIN))
+FUNCTION (svminnm, cond_or_uncond_unspec_function, (UNSPEC_COND_FMINNM,
+ UNSPEC_FMINNM))
FUNCTION (svminnmv, reduction, (UNSPEC_FMINNMV))
FUNCTION (svminv, reduction, (UNSPEC_SMINV, UNSPEC_UMINV, UNSPEC_FMINV))
FUNCTION (svmla, svmla_impl,)
@@ -2961,13 +3049,13 @@ FUNCTION (svrev, svrev_impl,)
FUNCTION (svrevb, unspec_based_function, (UNSPEC_REVB, UNSPEC_REVB, -1))
FUNCTION (svrevh, unspec_based_function, (UNSPEC_REVH, UNSPEC_REVH, -1))
FUNCTION (svrevw, unspec_based_function, (UNSPEC_REVW, UNSPEC_REVW, -1))
-FUNCTION (svrinta, unspec_based_function, (-1, -1, UNSPEC_COND_FRINTA))
-FUNCTION (svrinti, unspec_based_function, (-1, -1, UNSPEC_COND_FRINTI))
-FUNCTION (svrintm, unspec_based_function, (-1, -1, UNSPEC_COND_FRINTM))
-FUNCTION (svrintn, unspec_based_function, (-1, -1, UNSPEC_COND_FRINTN))
-FUNCTION (svrintp, unspec_based_function, (-1, -1, UNSPEC_COND_FRINTP))
-FUNCTION (svrintx, unspec_based_function, (-1, -1, UNSPEC_COND_FRINTX))
-FUNCTION (svrintz, unspec_based_function, (-1, -1, UNSPEC_COND_FRINTZ))
+FUNCTION (svrinta, svrint_impl, (round_optab, UNSPEC_COND_FRINTA))
+FUNCTION (svrinti, svrint_impl, (nearbyint_optab, UNSPEC_COND_FRINTI))
+FUNCTION (svrintm, svrint_impl, (floor_optab, UNSPEC_COND_FRINTM))
+FUNCTION (svrintn, svrint_impl, (roundeven_optab, UNSPEC_COND_FRINTN))
+FUNCTION (svrintp, svrint_impl, (ceil_optab, UNSPEC_COND_FRINTP))
+FUNCTION (svrintx, svrint_impl, (rint_optab, UNSPEC_COND_FRINTX))
+FUNCTION (svrintz, svrint_impl, (btrunc_optab, UNSPEC_COND_FRINTZ))
FUNCTION (svrsqrte, unspec_based_function, (-1, UNSPEC_RSQRTE, UNSPEC_RSQRTE))
FUNCTION (svrsqrts, unspec_based_function, (-1, -1, UNSPEC_RSQRTS))
FUNCTION (svscale, unspec_based_function, (-1, -1, UNSPEC_COND_FSCALE))
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.def b/gcc/config/aarch64/aarch64-sve-builtins-base.def
index 95ae1d7..ddeeaea 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.def
@@ -17,7 +17,7 @@
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-#define REQUIRED_EXTENSIONS 0
+#define REQUIRED_EXTENSIONS AARCH64_FL_SVE
DEF_SVE_FUNCTION (svabd, binary_opt_n, all_arith, mxz)
DEF_SVE_FUNCTION (svabs, unary, all_float_and_signed, mxz)
DEF_SVE_FUNCTION (svacge, compare_opt_n, all_float, implicit)
@@ -25,12 +25,7 @@ DEF_SVE_FUNCTION (svacgt, compare_opt_n, all_float, implicit)
DEF_SVE_FUNCTION (svacle, compare_opt_n, all_float, implicit)
DEF_SVE_FUNCTION (svaclt, compare_opt_n, all_float, implicit)
DEF_SVE_FUNCTION (svadd, binary_opt_n, all_arith, mxz)
-DEF_SVE_FUNCTION (svadda, fold_left, all_float, implicit)
DEF_SVE_FUNCTION (svaddv, reduction_wide, all_arith, implicit)
-DEF_SVE_FUNCTION (svadrb, adr_offset, none, none)
-DEF_SVE_FUNCTION (svadrd, adr_index, none, none)
-DEF_SVE_FUNCTION (svadrh, adr_index, none, none)
-DEF_SVE_FUNCTION (svadrw, adr_index, none, none)
DEF_SVE_FUNCTION (svand, binary_opt_n, all_integer, mxz)
DEF_SVE_FUNCTION (svand, binary_opt_n, b, z)
DEF_SVE_FUNCTION (svandv, reduction, all_integer, implicit)
@@ -75,15 +70,15 @@ DEF_SVE_FUNCTION (svcnth_pat, count_pat, none, none)
DEF_SVE_FUNCTION (svcntp, count_pred, all_pred, implicit)
DEF_SVE_FUNCTION (svcntw, count_inherent, none, none)
DEF_SVE_FUNCTION (svcntw_pat, count_pat, none, none)
-DEF_SVE_FUNCTION (svcompact, unary, sd_data, implicit)
DEF_SVE_FUNCTION (svcreate2, create, all_data, none)
+DEF_SVE_FUNCTION (svcreate2, create, b, none)
DEF_SVE_FUNCTION (svcreate3, create, all_data, none)
DEF_SVE_FUNCTION (svcreate4, create, all_data, none)
-DEF_SVE_FUNCTION (svcvt, unary_convert, cvt, mxz)
+DEF_SVE_FUNCTION (svcvt, unary_convertxn, cvt, mxz)
DEF_SVE_FUNCTION (svdiv, binary_opt_n, all_float_and_sd_integer, mxz)
DEF_SVE_FUNCTION (svdivr, binary_opt_n, all_float_and_sd_integer, mxz)
-DEF_SVE_FUNCTION (svdot, ternary_qq_opt_n, sd_integer, none)
-DEF_SVE_FUNCTION (svdot_lane, ternary_qq_lane, sd_integer, none)
+DEF_SVE_FUNCTION (svdot, ternary_qq_opt_n_or_011, sd_integer, none)
+DEF_SVE_FUNCTION (svdot_lane, ternary_qq_or_011_lane, sd_integer, none)
DEF_SVE_FUNCTION (svdup, unary_n, all_data, mxz_or_none)
DEF_SVE_FUNCTION (svdup, unary_n, all_pred, none)
DEF_SVE_FUNCTION (svdup_lane, binary_uint_n, all_data, none)
@@ -93,12 +88,12 @@ DEF_SVE_FUNCTION (svdupq_lane, binary_uint64_n, all_data, none)
DEF_SVE_FUNCTION (sveor, binary_opt_n, all_integer, mxz)
DEF_SVE_FUNCTION (sveor, binary_opt_n, b, z)
DEF_SVE_FUNCTION (sveorv, reduction, all_integer, implicit)
-DEF_SVE_FUNCTION (svexpa, unary_uint, all_float, none)
DEF_SVE_FUNCTION (svext, ext, all_data, none)
DEF_SVE_FUNCTION (svextb, unary, hsd_integer, mxz)
DEF_SVE_FUNCTION (svexth, unary, sd_integer, mxz)
DEF_SVE_FUNCTION (svextw, unary, d_integer, mxz)
DEF_SVE_FUNCTION (svget2, get, all_data, none)
+DEF_SVE_FUNCTION (svget2, get, b, none)
DEF_SVE_FUNCTION (svget3, get, all_data, none)
DEF_SVE_FUNCTION (svget4, get, all_data, none)
DEF_SVE_FUNCTION (svindex, binary_scalar, all_integer, none)
@@ -106,51 +101,13 @@ DEF_SVE_FUNCTION (svinsr, binary_n, all_data, none)
DEF_SVE_FUNCTION (svlasta, reduction, all_data, implicit)
DEF_SVE_FUNCTION (svlastb, reduction, all_data, implicit)
DEF_SVE_FUNCTION (svld1, load, all_data, implicit)
-DEF_SVE_FUNCTION (svld1_gather, load_gather_sv, sd_data, implicit)
-DEF_SVE_FUNCTION (svld1_gather, load_gather_vs, sd_data, implicit)
DEF_SVE_FUNCTION (svld1rq, load_replicate, all_data, implicit)
DEF_SVE_FUNCTION (svld1sb, load_ext, hsd_integer, implicit)
-DEF_SVE_FUNCTION (svld1sb_gather, load_ext_gather_offset, sd_integer, implicit)
DEF_SVE_FUNCTION (svld1sh, load_ext, sd_integer, implicit)
-DEF_SVE_FUNCTION (svld1sh_gather, load_ext_gather_offset, sd_integer, implicit)
-DEF_SVE_FUNCTION (svld1sh_gather, load_ext_gather_index, sd_integer, implicit)
DEF_SVE_FUNCTION (svld1sw, load_ext, d_integer, implicit)
-DEF_SVE_FUNCTION (svld1sw_gather, load_ext_gather_offset, d_integer, implicit)
-DEF_SVE_FUNCTION (svld1sw_gather, load_ext_gather_index, d_integer, implicit)
DEF_SVE_FUNCTION (svld1ub, load_ext, hsd_integer, implicit)
-DEF_SVE_FUNCTION (svld1ub_gather, load_ext_gather_offset, sd_integer, implicit)
DEF_SVE_FUNCTION (svld1uh, load_ext, sd_integer, implicit)
-DEF_SVE_FUNCTION (svld1uh_gather, load_ext_gather_offset, sd_integer, implicit)
-DEF_SVE_FUNCTION (svld1uh_gather, load_ext_gather_index, sd_integer, implicit)
DEF_SVE_FUNCTION (svld1uw, load_ext, d_integer, implicit)
-DEF_SVE_FUNCTION (svld1uw_gather, load_ext_gather_offset, d_integer, implicit)
-DEF_SVE_FUNCTION (svld1uw_gather, load_ext_gather_index, d_integer, implicit)
-DEF_SVE_FUNCTION (svldff1, load, all_data, implicit)
-DEF_SVE_FUNCTION (svldff1_gather, load_gather_sv, sd_data, implicit)
-DEF_SVE_FUNCTION (svldff1_gather, load_gather_vs, sd_data, implicit)
-DEF_SVE_FUNCTION (svldff1sb, load_ext, hsd_integer, implicit)
-DEF_SVE_FUNCTION (svldff1sb_gather, load_ext_gather_offset, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldff1sh, load_ext, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldff1sh_gather, load_ext_gather_offset, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldff1sh_gather, load_ext_gather_index, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldff1sw, load_ext, d_integer, implicit)
-DEF_SVE_FUNCTION (svldff1sw_gather, load_ext_gather_offset, d_integer, implicit)
-DEF_SVE_FUNCTION (svldff1sw_gather, load_ext_gather_index, d_integer, implicit)
-DEF_SVE_FUNCTION (svldff1ub, load_ext, hsd_integer, implicit)
-DEF_SVE_FUNCTION (svldff1ub_gather, load_ext_gather_offset, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldff1uh, load_ext, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldff1uh_gather, load_ext_gather_offset, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldff1uh_gather, load_ext_gather_index, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldff1uw, load_ext, d_integer, implicit)
-DEF_SVE_FUNCTION (svldff1uw_gather, load_ext_gather_offset, d_integer, implicit)
-DEF_SVE_FUNCTION (svldff1uw_gather, load_ext_gather_index, d_integer, implicit)
-DEF_SVE_FUNCTION (svldnf1, load, all_data, implicit)
-DEF_SVE_FUNCTION (svldnf1sb, load_ext, hsd_integer, implicit)
-DEF_SVE_FUNCTION (svldnf1sh, load_ext, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldnf1sw, load_ext, d_integer, implicit)
-DEF_SVE_FUNCTION (svldnf1ub, load_ext, hsd_integer, implicit)
-DEF_SVE_FUNCTION (svldnf1uh, load_ext, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldnf1uw, load_ext, d_integer, implicit)
DEF_SVE_FUNCTION (svldnt1, load, all_data, implicit)
DEF_SVE_FUNCTION (svld2, load, all_data, implicit)
DEF_SVE_FUNCTION (svld3, load, all_data, implicit)
@@ -161,19 +118,18 @@ DEF_SVE_FUNCTION (svlsl_wide, binary_uint64_opt_n, bhs_integer, mxz)
DEF_SVE_FUNCTION (svlsr, binary_uint_opt_n, all_unsigned, mxz)
DEF_SVE_FUNCTION (svlsr_wide, binary_uint64_opt_n, bhs_unsigned, mxz)
DEF_SVE_FUNCTION (svmad, ternary_opt_n, all_arith, mxz)
-DEF_SVE_FUNCTION (svmax, binary_opt_n, all_arith, mxz)
-DEF_SVE_FUNCTION (svmaxnm, binary_opt_n, all_float, mxz)
+DEF_SVE_FUNCTION (svmax, binary_opt_single_n, all_arith, mxz)
+DEF_SVE_FUNCTION (svmaxnm, binary_opt_single_n, all_float, mxz)
DEF_SVE_FUNCTION (svmaxnmv, reduction, all_float, implicit)
DEF_SVE_FUNCTION (svmaxv, reduction, all_arith, implicit)
-DEF_SVE_FUNCTION (svmin, binary_opt_n, all_arith, mxz)
-DEF_SVE_FUNCTION (svminnm, binary_opt_n, all_float, mxz)
+DEF_SVE_FUNCTION (svmin, binary_opt_single_n, all_arith, mxz)
+DEF_SVE_FUNCTION (svminnm, binary_opt_single_n, all_float, mxz)
DEF_SVE_FUNCTION (svminnmv, reduction, all_float, implicit)
DEF_SVE_FUNCTION (svminv, reduction, all_arith, implicit)
DEF_SVE_FUNCTION (svmla, ternary_opt_n, all_arith, mxz)
DEF_SVE_FUNCTION (svmla_lane, ternary_lane, all_float, none)
DEF_SVE_FUNCTION (svmls, ternary_opt_n, all_arith, mxz)
DEF_SVE_FUNCTION (svmls_lane, ternary_lane, all_float, none)
-DEF_SVE_FUNCTION (svmmla, mmla, none, none)
DEF_SVE_FUNCTION (svmov, unary, b, z)
DEF_SVE_FUNCTION (svmsb, ternary_opt_n, all_arith, mxz)
DEF_SVE_FUNCTION (svmul, binary_opt_n, all_arith, mxz)
@@ -194,16 +150,13 @@ DEF_SVE_FUNCTION (svorr, binary_opt_n, all_integer, mxz)
DEF_SVE_FUNCTION (svorr, binary_opt_n, b, z)
DEF_SVE_FUNCTION (svorv, reduction, all_integer, implicit)
DEF_SVE_FUNCTION (svpfalse, inherent_b, b, none)
+DEF_SVE_FUNCTION (svpfalse, inherent, c, none)
DEF_SVE_FUNCTION (svpfirst, unary, b, implicit)
DEF_SVE_FUNCTION (svpnext, unary_pred, all_pred, implicit)
DEF_SVE_FUNCTION (svprfb, prefetch, none, implicit)
-DEF_SVE_FUNCTION (svprfb_gather, prefetch_gather_offset, none, implicit)
DEF_SVE_FUNCTION (svprfd, prefetch, none, implicit)
-DEF_SVE_FUNCTION (svprfd_gather, prefetch_gather_index, none, implicit)
DEF_SVE_FUNCTION (svprfh, prefetch, none, implicit)
-DEF_SVE_FUNCTION (svprfh_gather, prefetch_gather_index, none, implicit)
DEF_SVE_FUNCTION (svprfw, prefetch, none, implicit)
-DEF_SVE_FUNCTION (svprfw_gather, prefetch_gather_index, none, implicit)
DEF_SVE_FUNCTION (svptest_any, ptest, none, implicit)
DEF_SVE_FUNCTION (svptest_first, ptest, none, implicit)
DEF_SVE_FUNCTION (svptest_last, ptest, none, implicit)
@@ -244,61 +197,52 @@ DEF_SVE_FUNCTION (svqincw_pat, inc_dec_pat, s_integer, none)
DEF_SVE_FUNCTION (svqincw_pat, inc_dec_pat, sd_integer, none)
DEF_SVE_FUNCTION (svqsub, binary_opt_n, all_integer, none)
DEF_SVE_FUNCTION (svrbit, unary, all_integer, mxz)
-DEF_SVE_FUNCTION (svrdffr, rdffr, none, z_or_none)
DEF_SVE_FUNCTION (svrecpe, unary, all_float, none)
DEF_SVE_FUNCTION (svrecps, binary, all_float, none)
DEF_SVE_FUNCTION (svrecpx, unary, all_float, mxz)
-DEF_SVE_FUNCTION (svreinterpret, unary_convert, reinterpret, none)
+DEF_SVE_FUNCTION_GS (svreinterpret, reinterpret, reinterpret, x1234, none)
+DEF_SVE_FUNCTION (svreinterpret, reinterpret, reinterpret_b, none)
DEF_SVE_FUNCTION (svrev, unary, all_data, none)
DEF_SVE_FUNCTION (svrev, unary_pred, all_pred, none)
DEF_SVE_FUNCTION (svrevb, unary, hsd_integer, mxz)
DEF_SVE_FUNCTION (svrevh, unary, sd_integer, mxz)
DEF_SVE_FUNCTION (svrevw, unary, d_integer, mxz)
-DEF_SVE_FUNCTION (svrinta, unary, all_float, mxz)
+DEF_SVE_FUNCTION (svrinta, unaryxn, all_float, mxz)
DEF_SVE_FUNCTION (svrinti, unary, all_float, mxz)
-DEF_SVE_FUNCTION (svrintm, unary, all_float, mxz)
-DEF_SVE_FUNCTION (svrintn, unary, all_float, mxz)
-DEF_SVE_FUNCTION (svrintp, unary, all_float, mxz)
+DEF_SVE_FUNCTION (svrintm, unaryxn, all_float, mxz)
+DEF_SVE_FUNCTION (svrintn, unaryxn, all_float, mxz)
+DEF_SVE_FUNCTION (svrintp, unaryxn, all_float, mxz)
DEF_SVE_FUNCTION (svrintx, unary, all_float, mxz)
DEF_SVE_FUNCTION (svrintz, unary, all_float, mxz)
DEF_SVE_FUNCTION (svrsqrte, unary, all_float, none)
DEF_SVE_FUNCTION (svrsqrts, binary, all_float, none)
DEF_SVE_FUNCTION (svscale, binary_int_opt_n, all_float, mxz)
-DEF_SVE_FUNCTION (svsel, binary, all_data, implicit)
-DEF_SVE_FUNCTION (svsel, binary, b, implicit)
+DEF_SVE_FUNCTION (svsel, binaryxn, all_data, implicit)
+DEF_SVE_FUNCTION (svsel, binaryxn, b, implicit)
DEF_SVE_FUNCTION (svset2, set, all_data, none)
+DEF_SVE_FUNCTION (svset2, set, b, none)
DEF_SVE_FUNCTION (svset3, set, all_data, none)
DEF_SVE_FUNCTION (svset4, set, all_data, none)
-DEF_SVE_FUNCTION (svsetffr, setffr, none, none)
DEF_SVE_FUNCTION (svsplice, binary, all_data, implicit)
DEF_SVE_FUNCTION (svsqrt, unary, all_float, mxz)
-DEF_SVE_FUNCTION (svst1, store, all_data, implicit)
-DEF_SVE_FUNCTION (svst1_scatter, store_scatter_index, sd_data, implicit)
-DEF_SVE_FUNCTION (svst1_scatter, store_scatter_offset, sd_data, implicit)
+DEF_SVE_FUNCTION (svst1, storexn, all_data, implicit)
DEF_SVE_FUNCTION (svst1b, store, hsd_integer, implicit)
-DEF_SVE_FUNCTION (svst1b_scatter, store_scatter_offset, sd_integer, implicit)
DEF_SVE_FUNCTION (svst1h, store, sd_integer, implicit)
-DEF_SVE_FUNCTION (svst1h_scatter, store_scatter_index, sd_integer, implicit)
-DEF_SVE_FUNCTION (svst1h_scatter, store_scatter_offset, sd_integer, implicit)
DEF_SVE_FUNCTION (svst1w, store, d_integer, implicit)
-DEF_SVE_FUNCTION (svst1w_scatter, store_scatter_index, d_integer, implicit)
-DEF_SVE_FUNCTION (svst1w_scatter, store_scatter_offset, d_integer, implicit)
DEF_SVE_FUNCTION (svst2, store, all_data, implicit)
DEF_SVE_FUNCTION (svst3, store, all_data, implicit)
DEF_SVE_FUNCTION (svst4, store, all_data, implicit)
-DEF_SVE_FUNCTION (svstnt1, store, all_data, implicit)
+DEF_SVE_FUNCTION (svstnt1, storexn, all_data, implicit)
DEF_SVE_FUNCTION (svsub, binary_opt_n, all_arith, mxz)
DEF_SVE_FUNCTION (svsubr, binary_opt_n, all_arith, mxz)
DEF_SVE_FUNCTION (svtbl, binary_uint, all_data, none)
-DEF_SVE_FUNCTION (svtmad, tmad, all_float, none)
DEF_SVE_FUNCTION (svtrn1, binary, all_data, none)
DEF_SVE_FUNCTION (svtrn1, binary_pred, all_pred, none)
DEF_SVE_FUNCTION (svtrn2, binary, all_data, none)
DEF_SVE_FUNCTION (svtrn2, binary_pred, all_pred, none)
-DEF_SVE_FUNCTION (svtsmul, binary_uint, all_float, none)
-DEF_SVE_FUNCTION (svtssel, binary_uint, all_float, none)
DEF_SVE_FUNCTION (svundef, inherent, all_data, none)
DEF_SVE_FUNCTION (svundef2, inherent, all_data, none)
+DEF_SVE_FUNCTION (svundef2, inherent, b, none)
DEF_SVE_FUNCTION (svundef3, inherent, all_data, none)
DEF_SVE_FUNCTION (svundef4, inherent, all_data, none)
DEF_SVE_FUNCTION (svunpkhi, unary_widen, hsd_integer, none)
@@ -311,41 +255,116 @@ DEF_SVE_FUNCTION (svuzp2, binary, all_data, none)
DEF_SVE_FUNCTION (svuzp2, binary_pred, all_pred, none)
DEF_SVE_FUNCTION (svwhilele, compare_scalar, while, none)
DEF_SVE_FUNCTION (svwhilelt, compare_scalar, while, none)
-DEF_SVE_FUNCTION (svwrffr, setffr, none, implicit)
DEF_SVE_FUNCTION (svzip1, binary, all_data, none)
DEF_SVE_FUNCTION (svzip1, binary_pred, all_pred, none)
DEF_SVE_FUNCTION (svzip2, binary, all_data, none)
DEF_SVE_FUNCTION (svzip2, binary_pred, all_pred, none)
#undef REQUIRED_EXTENSIONS
-#define REQUIRED_EXTENSIONS AARCH64_FL_BF16
+#define REQUIRED_EXTENSIONS AARCH64_FL_SVE | AARCH64_FL_SM_OFF
+DEF_SVE_FUNCTION (svadda, fold_left, all_float, implicit)
+DEF_SVE_FUNCTION (svadrb, adr_offset, none, none)
+DEF_SVE_FUNCTION (svadrd, adr_index, none, none)
+DEF_SVE_FUNCTION (svadrh, adr_index, none, none)
+DEF_SVE_FUNCTION (svadrw, adr_index, none, none)
+DEF_SVE_FUNCTION (svcompact, unary, sd_data, implicit)
+DEF_SVE_FUNCTION (svexpa, unary_uint, all_float, none)
+DEF_SVE_FUNCTION (svld1_gather, load_gather_sv, sd_data, implicit)
+DEF_SVE_FUNCTION (svld1_gather, load_gather_vs, sd_data, implicit)
+DEF_SVE_FUNCTION (svld1sb_gather, load_ext_gather_offset, sd_integer, implicit)
+DEF_SVE_FUNCTION (svld1sh_gather, load_ext_gather_offset, sd_integer, implicit)
+DEF_SVE_FUNCTION (svld1sh_gather, load_ext_gather_index, sd_integer, implicit)
+DEF_SVE_FUNCTION (svld1sw_gather, load_ext_gather_offset, d_integer, implicit)
+DEF_SVE_FUNCTION (svld1sw_gather, load_ext_gather_index, d_integer, implicit)
+DEF_SVE_FUNCTION (svld1ub_gather, load_ext_gather_offset, sd_integer, implicit)
+DEF_SVE_FUNCTION (svld1uh_gather, load_ext_gather_offset, sd_integer, implicit)
+DEF_SVE_FUNCTION (svld1uh_gather, load_ext_gather_index, sd_integer, implicit)
+DEF_SVE_FUNCTION (svld1uw_gather, load_ext_gather_offset, d_integer, implicit)
+DEF_SVE_FUNCTION (svld1uw_gather, load_ext_gather_index, d_integer, implicit)
+DEF_SVE_FUNCTION (svldff1, load, all_data, implicit)
+DEF_SVE_FUNCTION (svldff1_gather, load_gather_sv, sd_data, implicit)
+DEF_SVE_FUNCTION (svldff1_gather, load_gather_vs, sd_data, implicit)
+DEF_SVE_FUNCTION (svldff1sb, load_ext, hsd_integer, implicit)
+DEF_SVE_FUNCTION (svldff1sb_gather, load_ext_gather_offset, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldff1sh, load_ext, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldff1sh_gather, load_ext_gather_offset, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldff1sh_gather, load_ext_gather_index, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldff1sw, load_ext, d_integer, implicit)
+DEF_SVE_FUNCTION (svldff1sw_gather, load_ext_gather_offset, d_integer, implicit)
+DEF_SVE_FUNCTION (svldff1sw_gather, load_ext_gather_index, d_integer, implicit)
+DEF_SVE_FUNCTION (svldff1ub, load_ext, hsd_integer, implicit)
+DEF_SVE_FUNCTION (svldff1ub_gather, load_ext_gather_offset, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldff1uh, load_ext, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldff1uh_gather, load_ext_gather_offset, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldff1uh_gather, load_ext_gather_index, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldff1uw, load_ext, d_integer, implicit)
+DEF_SVE_FUNCTION (svldff1uw_gather, load_ext_gather_offset, d_integer, implicit)
+DEF_SVE_FUNCTION (svldff1uw_gather, load_ext_gather_index, d_integer, implicit)
+DEF_SVE_FUNCTION (svldnf1, load, all_data, implicit)
+DEF_SVE_FUNCTION (svldnf1sb, load_ext, hsd_integer, implicit)
+DEF_SVE_FUNCTION (svldnf1sh, load_ext, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldnf1sw, load_ext, d_integer, implicit)
+DEF_SVE_FUNCTION (svldnf1ub, load_ext, hsd_integer, implicit)
+DEF_SVE_FUNCTION (svldnf1uh, load_ext, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldnf1uw, load_ext, d_integer, implicit)
+DEF_SVE_FUNCTION (svmmla, mmla, none, none)
+DEF_SVE_FUNCTION (svprfb_gather, prefetch_gather_offset, none, implicit)
+DEF_SVE_FUNCTION (svprfd_gather, prefetch_gather_index, none, implicit)
+DEF_SVE_FUNCTION (svprfh_gather, prefetch_gather_index, none, implicit)
+DEF_SVE_FUNCTION (svprfw_gather, prefetch_gather_index, none, implicit)
+DEF_SVE_FUNCTION (svrdffr, rdffr, none, z_or_none)
+DEF_SVE_FUNCTION (svsetffr, setffr, none, none)
+DEF_SVE_FUNCTION (svst1_scatter, store_scatter_index, sd_data, implicit)
+DEF_SVE_FUNCTION (svst1_scatter, store_scatter_offset, sd_data, implicit)
+DEF_SVE_FUNCTION (svst1b_scatter, store_scatter_offset, sd_integer, implicit)
+DEF_SVE_FUNCTION (svst1h_scatter, store_scatter_index, sd_integer, implicit)
+DEF_SVE_FUNCTION (svst1h_scatter, store_scatter_offset, sd_integer, implicit)
+DEF_SVE_FUNCTION (svst1w_scatter, store_scatter_index, d_integer, implicit)
+DEF_SVE_FUNCTION (svst1w_scatter, store_scatter_offset, d_integer, implicit)
+DEF_SVE_FUNCTION (svtmad, tmad, all_float, none)
+DEF_SVE_FUNCTION (svtsmul, binary_uint, all_float, none)
+DEF_SVE_FUNCTION (svtssel, binary_uint, all_float, none)
+DEF_SVE_FUNCTION (svwrffr, setffr, none, implicit)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS AARCH64_FL_SVE | AARCH64_FL_BF16
DEF_SVE_FUNCTION (svbfdot, ternary_bfloat_opt_n, s_float, none)
DEF_SVE_FUNCTION (svbfdot_lane, ternary_bfloat_lanex2, s_float, none)
DEF_SVE_FUNCTION (svbfmlalb, ternary_bfloat_opt_n, s_float, none)
DEF_SVE_FUNCTION (svbfmlalb_lane, ternary_bfloat_lane, s_float, none)
DEF_SVE_FUNCTION (svbfmlalt, ternary_bfloat_opt_n, s_float, none)
DEF_SVE_FUNCTION (svbfmlalt_lane, ternary_bfloat_lane, s_float, none)
-DEF_SVE_FUNCTION (svbfmmla, ternary_bfloat, s_float, none)
-DEF_SVE_FUNCTION (svcvt, unary_convert, cvt_bfloat, mxz)
+DEF_SVE_FUNCTION (svcvt, unary_convertxn, cvt_bfloat, mxz)
DEF_SVE_FUNCTION (svcvtnt, unary_convert_narrowt, cvt_bfloat, mx)
#undef REQUIRED_EXTENSIONS
-#define REQUIRED_EXTENSIONS AARCH64_FL_I8MM
-DEF_SVE_FUNCTION (svmmla, mmla, s_integer, none)
-DEF_SVE_FUNCTION (svusmmla, ternary_uintq_intq, s_signed, none)
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE \
+ | AARCH64_FL_BF16 \
+ | AARCH64_FL_SM_OFF)
+DEF_SVE_FUNCTION (svbfmmla, ternary_bfloat, s_float, none)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS AARCH64_FL_SVE | AARCH64_FL_I8MM
DEF_SVE_FUNCTION (svsudot, ternary_intq_uintq_opt_n, s_signed, none)
DEF_SVE_FUNCTION (svsudot_lane, ternary_intq_uintq_lane, s_signed, none)
DEF_SVE_FUNCTION (svusdot, ternary_uintq_intq_opt_n, s_signed, none)
DEF_SVE_FUNCTION (svusdot_lane, ternary_uintq_intq_lane, s_signed, none)
#undef REQUIRED_EXTENSIONS
-#define REQUIRED_EXTENSIONS AARCH64_FL_F32MM
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE \
+ | AARCH64_FL_I8MM \
+ | AARCH64_FL_SM_OFF)
+DEF_SVE_FUNCTION (svmmla, mmla, s_integer, none)
+DEF_SVE_FUNCTION (svusmmla, ternary_uintq_intq, s_signed, none)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE \
+ | AARCH64_FL_F32MM \
+ | AARCH64_FL_SM_OFF)
DEF_SVE_FUNCTION (svmmla, mmla, s_float, none)
#undef REQUIRED_EXTENSIONS
-#define REQUIRED_EXTENSIONS AARCH64_FL_F64MM
-DEF_SVE_FUNCTION (svld1ro, load_replicate, all_data, implicit)
-DEF_SVE_FUNCTION (svmmla, mmla, d_float, none)
+#define REQUIRED_EXTENSIONS AARCH64_FL_SVE | AARCH64_FL_F64MM
DEF_SVE_FUNCTION (svtrn1q, binary, all_data, none)
DEF_SVE_FUNCTION (svtrn2q, binary, all_data, none)
DEF_SVE_FUNCTION (svuzp1q, binary, all_data, none)
@@ -353,3 +372,10 @@ DEF_SVE_FUNCTION (svuzp2q, binary, all_data, none)
DEF_SVE_FUNCTION (svzip1q, binary, all_data, none)
DEF_SVE_FUNCTION (svzip2q, binary, all_data, none)
#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE \
+ | AARCH64_FL_F64MM \
+ | AARCH64_FL_SM_OFF)
+DEF_SVE_FUNCTION (svld1ro, load_replicate, all_data, implicit)
+DEF_SVE_FUNCTION (svmmla, mmla, d_float, none)
+#undef REQUIRED_EXTENSIONS
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-functions.h b/gcc/config/aarch64/aarch64-sve-builtins-functions.h
index 2729877..b40640b 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-functions.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins-functions.h
@@ -39,6 +39,33 @@ public:
}
};
+/* Wrap T, which is derived from function_base, and indicate that it
+ additionally has the call properties in PROPERTIES. */
+template<typename T, unsigned int PROPERTIES>
+class add_call_properties : public T
+{
+public:
+ using T::T;
+
+ unsigned int
+ call_properties (const function_instance &fi) const override
+ {
+ return T::call_properties (fi) | PROPERTIES;
+ }
+};
+
+template<typename T>
+using read_write_za = add_call_properties<T, CP_READ_ZA | CP_WRITE_ZA>;
+
+template<typename T>
+using write_za = add_call_properties<T, CP_WRITE_ZA>;
+
+template<typename T>
+using read_zt0 = add_call_properties<T, CP_READ_ZT0>;
+
+template<typename T>
+using write_zt0 = add_call_properties<T, CP_WRITE_ZT0>;
+
/* A function_base that sometimes or always operates on tuples of
vectors. */
class multi_vector_function : public function_base
@@ -48,8 +75,13 @@ public:
: m_vectors_per_tuple (vectors_per_tuple) {}
unsigned int
- vectors_per_tuple () const override
+ vectors_per_tuple (const function_instance &fi) const override
{
+ if (fi.group_suffix_id != GROUP_none)
+ {
+ gcc_checking_assert (m_vectors_per_tuple == 1);
+ return fi.group_suffix ().vectors_per_tuple;
+ }
return m_vectors_per_tuple;
}
@@ -76,8 +108,9 @@ public:
memory_vector_mode (const function_instance &fi) const override
{
machine_mode mode = fi.vector_mode (0);
- if (m_vectors_per_tuple != 1)
- mode = targetm.array_mode (mode, m_vectors_per_tuple).require ();
+ auto vectors_per_tuple = fi.vectors_per_tuple ();
+ if (vectors_per_tuple != 1)
+ mode = targetm.array_mode (mode, vectors_per_tuple).require ();
return mode;
}
};
@@ -170,9 +203,11 @@ class rtx_code_function_base : public function_base
public:
CONSTEXPR rtx_code_function_base (rtx_code code_for_sint,
rtx_code code_for_uint,
- int unspec_for_fp = -1)
+ int unspec_for_cond_fp = -1,
+ int unspec_for_uncond_fp = -1)
: m_code_for_sint (code_for_sint), m_code_for_uint (code_for_uint),
- m_unspec_for_fp (unspec_for_fp) {}
+ m_unspec_for_cond_fp (unspec_for_cond_fp),
+ m_unspec_for_uncond_fp (unspec_for_uncond_fp) {}
/* The rtx code to use for signed and unsigned integers respectively.
Can be UNKNOWN for functions that don't have integer forms. */
@@ -181,7 +216,11 @@ public:
/* The UNSPEC_COND_* to use for floating-point operations. Can be -1
for functions that only operate on integers. */
- int m_unspec_for_fp;
+ int m_unspec_for_cond_fp;
+
+ /* The UNSPEC_* to use for unpredicated floating-point operations.
+ Can be -1 if there is no such operation. */
+ int m_unspec_for_uncond_fp;
};
/* A function_base for functions that have an associated rtx code.
@@ -195,7 +234,7 @@ public:
expand (function_expander &e) const override
{
return e.map_to_rtx_codes (m_code_for_sint, m_code_for_uint,
- m_unspec_for_fp);
+ m_unspec_for_cond_fp, m_unspec_for_uncond_fp);
}
};
@@ -216,7 +255,8 @@ public:
unsigned int nargs = e.args.length ();
e.rotate_inputs_left (e.pred != PRED_none ? 1 : 0, nargs);
return e.map_to_rtx_codes (m_code_for_sint, m_code_for_uint,
- m_unspec_for_fp, nargs - 1);
+ m_unspec_for_cond_fp, m_unspec_for_uncond_fp,
+ nargs - 1);
}
};
@@ -229,18 +269,21 @@ class unspec_based_function_base : public function_base
public:
CONSTEXPR unspec_based_function_base (int unspec_for_sint,
int unspec_for_uint,
- int unspec_for_fp)
+ int unspec_for_fp,
+ unsigned int suffix_index = 0)
: m_unspec_for_sint (unspec_for_sint),
m_unspec_for_uint (unspec_for_uint),
- m_unspec_for_fp (unspec_for_fp)
+ m_unspec_for_fp (unspec_for_fp),
+ m_suffix_index (suffix_index)
{}
/* Return the unspec code to use for INSTANCE, based on type suffix 0. */
int
unspec_for (const function_instance &instance) const
{
- return (!instance.type_suffix (0).integer_p ? m_unspec_for_fp
- : instance.type_suffix (0).unsigned_p ? m_unspec_for_uint
+ auto &suffix = instance.type_suffix (m_suffix_index);
+ return (!suffix.integer_p ? m_unspec_for_fp
+ : suffix.unsigned_p ? m_unspec_for_uint
: m_unspec_for_sint);
}
@@ -249,6 +292,9 @@ public:
int m_unspec_for_sint;
int m_unspec_for_uint;
int m_unspec_for_fp;
+
+ /* Which type suffix is used to choose between the unspecs. */
+ unsigned int m_suffix_index;
};
/* A function_base for functions that have an associated unspec code.
@@ -301,10 +347,14 @@ public:
rtx
expand (function_expander &e) const override
{
- return e.use_exact_insn (CODE (unspec_for (e), e.vector_mode (0)));
+ return e.use_exact_insn (CODE (unspec_for (e),
+ e.tuple_mode (m_suffix_index)));
}
};
+typedef unspec_based_function_exact_insn<code_for_aarch64_sve>
+ unspec_based_uncond_function;
+
/* A function that performs an unspec and then adds it to another value. */
typedef unspec_based_function_exact_insn<code_for_aarch64_sve_add>
unspec_based_add_function;
@@ -341,6 +391,90 @@ typedef unspec_based_function_exact_insn<code_for_aarch64_sve_sub>
typedef unspec_based_function_exact_insn<code_for_aarch64_sve_sub_lane>
unspec_based_sub_lane_function;
+/* A function that has conditional and unconditional forms, with both
+ forms being associated with a single unspec each. */
+class cond_or_uncond_unspec_function : public function_base
+{
+public:
+ CONSTEXPR cond_or_uncond_unspec_function (int cond_unspec, int uncond_unspec)
+ : m_cond_unspec (cond_unspec), m_uncond_unspec (uncond_unspec) {}
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ if (e.pred == PRED_none)
+ {
+ auto mode = e.tuple_mode (0);
+ auto icode = (e.mode_suffix_id == MODE_single
+ ? code_for_aarch64_sve_single (m_uncond_unspec, mode)
+ : code_for_aarch64_sve (m_uncond_unspec, mode));
+ return e.use_exact_insn (icode);
+ }
+ return e.map_to_unspecs (m_cond_unspec, m_cond_unspec, m_cond_unspec);
+ }
+
+ /* The unspecs for the conditional and unconditional instructions,
+ respectively. */
+ int m_cond_unspec;
+ int m_uncond_unspec;
+};
+
+/* General SME unspec-based functions, parameterized on the vector mode. */
+class sme_1mode_function : public read_write_za<unspec_based_function_base>
+{
+public:
+ using parent = read_write_za<unspec_based_function_base>;
+
+ CONSTEXPR sme_1mode_function (int unspec_for_sint, int unspec_for_uint,
+ int unspec_for_fp)
+ : parent (unspec_for_sint, unspec_for_uint, unspec_for_fp, 1)
+ {}
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ insn_code icode;
+ if (e.mode_suffix_id == MODE_single)
+ icode = code_for_aarch64_sme_single (unspec_for (e), e.tuple_mode (1));
+ else
+ icode = code_for_aarch64_sme (unspec_for (e), e.tuple_mode (1));
+ return e.use_exact_insn (icode);
+ }
+};
+
+/* General SME unspec-based functions, parameterized on both the ZA mode
+ and the vector mode. */
+template<insn_code (*CODE) (int, machine_mode, machine_mode),
+ insn_code (*CODE_SINGLE) (int, machine_mode, machine_mode)>
+class sme_2mode_function_t : public read_write_za<unspec_based_function_base>
+{
+public:
+ using parent = read_write_za<unspec_based_function_base>;
+
+ CONSTEXPR sme_2mode_function_t (int unspec_for_sint, int unspec_for_uint,
+ int unspec_for_fp)
+ : parent (unspec_for_sint, unspec_for_uint, unspec_for_fp, 1)
+ {}
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ insn_code icode;
+ if (e.mode_suffix_id == MODE_single)
+ icode = CODE_SINGLE (unspec_for (e), e.vector_mode (0),
+ e.tuple_mode (1));
+ else
+ icode = CODE (unspec_for (e), e.vector_mode (0), e.tuple_mode (1));
+ return e.use_exact_insn (icode);
+ }
+};
+
+using sme_2mode_function
+ = sme_2mode_function_t<code_for_aarch64_sme, code_for_aarch64_sme_single>;
+
+using sme_2mode_lane_function
+ = sme_2mode_function_t<code_for_aarch64_sme_lane, nullptr>;
+
/* A function that acts like unspec_based_function_exact_insn<INT_CODE>
when operating on integers, but that expands to an (fma ...)-style
aarch64_sve* operation when applied to floats. */
@@ -355,16 +489,16 @@ public:
{
int unspec = unspec_for (e);
insn_code icode;
- if (e.type_suffix (0).float_p)
+ if (e.type_suffix (m_suffix_index).float_p)
{
/* Put the operands in the normal (fma ...) order, with the accumulator
last. This fits naturally since that's also the unprinted operand
in the asm output. */
e.rotate_inputs_left (0, e.pred != PRED_none ? 4 : 3);
- icode = code_for_aarch64_sve (unspec, e.vector_mode (0));
+ icode = code_for_aarch64_sve (unspec, e.vector_mode (m_suffix_index));
}
else
- icode = INT_CODE (unspec, e.vector_mode (0));
+ icode = INT_CODE (unspec, e.vector_mode (m_suffix_index));
return e.use_exact_insn (icode);
}
};
@@ -385,16 +519,16 @@ public:
{
int unspec = unspec_for (e);
insn_code icode;
- if (e.type_suffix (0).float_p)
+ if (e.type_suffix (m_suffix_index).float_p)
{
/* Put the operands in the normal (fma ...) order, with the accumulator
last. This fits naturally since that's also the unprinted operand
in the asm output. */
e.rotate_inputs_left (0, e.pred != PRED_none ? 5 : 4);
- icode = code_for_aarch64_lane (unspec, e.vector_mode (0));
+ icode = code_for_aarch64_lane (unspec, e.vector_mode (m_suffix_index));
}
else
- icode = INT_CODE (unspec, e.vector_mode (0));
+ icode = INT_CODE (unspec, e.vector_mode (m_suffix_index));
return e.use_exact_insn (icode);
}
};
@@ -489,6 +623,77 @@ public:
int m_unspec;
};
+/* A function that implements a x2 or x4 permute instruction. Both forms
+ of intrinsic have a single x2 or x4 tuple argument, but the underlying
+ x2 instruction takes two separate input operands. */
+class multireg_permute : public function_base
+{
+public:
+ CONSTEXPR multireg_permute (int unspec) : m_unspec (unspec) {}
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ insn_code icode = code_for_aarch64_sve (m_unspec, e.tuple_mode (0));
+ if (e.group_suffix ().vectors_per_tuple == 2)
+ {
+ machine_mode elt_mode = e.vector_mode (0);
+ rtx arg = e.args[0];
+ e.args[0] = simplify_gen_subreg (elt_mode, arg, GET_MODE (arg), 0);
+ e.args.safe_push (simplify_gen_subreg (elt_mode, arg, GET_MODE (arg),
+ GET_MODE_SIZE (elt_mode)));
+ }
+ return e.use_exact_insn (icode);
+ }
+
+ /* The unspec associated with the permutation. */
+ int m_unspec;
+};
+
+/* A function that has two type integer type suffixes, which might agree
+ or disagree on signedness. There are separate instructions for each
+ signed/unsigned combination. */
+class integer_conversion : public function_base
+{
+public:
+ CONSTEXPR integer_conversion (int unspec_for_sint, int unspec_for_sintu,
+ int unspec_for_uint, int unspec_for_uints)
+ : m_unspec_for_sint (unspec_for_sint),
+ m_unspec_for_sintu (unspec_for_sintu),
+ m_unspec_for_uint (unspec_for_uint),
+ m_unspec_for_uints (unspec_for_uints)
+ {}
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ machine_mode mode0 = e.vector_mode (0);
+ machine_mode mode1 = GET_MODE (e.args[0]);
+ int unspec;
+ if (e.type_suffix (0).unsigned_p == e.type_suffix (1).unsigned_p)
+ unspec = (e.type_suffix (0).unsigned_p
+ ? m_unspec_for_uint
+ : m_unspec_for_sint);
+ else
+ unspec = (e.type_suffix (0).unsigned_p
+ ? m_unspec_for_sintu
+ : m_unspec_for_uints);
+ return e.use_exact_insn (code_for_aarch64_sve (unspec, mode0, mode1));
+ }
+
+ /* The unspec for signed -> signed. */
+ int m_unspec_for_sint;
+
+ /* The unspec for signed -> unsigned. */
+ int m_unspec_for_sintu;
+
+ /* The unspec for unsigned -> signed. */
+ int m_unspec_for_uint;
+
+ /* The unspec for unsigned -> unsigned. */
+ int m_unspec_for_uints;
+};
+
/* A function_base for functions that reduce a vector to a scalar. */
class reduction : public function_base
{
@@ -547,7 +752,7 @@ public:
if (aarch64_simd_shift_imm_p (shift, elem_mode, m_code == ASHIFT))
{
e.args.last () = shift;
- return e.map_to_rtx_codes (m_code, m_code, -1);
+ return e.map_to_rtx_codes (m_code, m_code, -1, -1);
}
if (e.pred == PRED_x)
@@ -603,6 +808,19 @@ public:
int unspec = (e.type_suffix (1).unsigned_p
? m_unspec_for_uint
: m_unspec_for_sint);
+ if (e.vectors_per_tuple () > 1)
+ {
+ auto bits = e.type_suffix (0).element_bits;
+ auto icode = code_for_aarch64_sve_while_b_x2 (unspec, bits);
+ return e.use_exact_insn (icode);
+ }
+ if (e.type_suffix (0).tclass == TYPE_count)
+ {
+ auto bits = e.type_suffix (0).element_bits;
+ auto icode = code_for_aarch64_sve_while_c (unspec, bits);
+ return e.use_exact_insn (icode);
+ }
+
machine_mode pred_mode = e.vector_mode (0);
scalar_mode reg_mode = GET_MODE_INNER (e.vector_mode (1));
return e.use_exact_insn (code_for_while (unspec, reg_mode, pred_mode));
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
index af816c4..9380cc7 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
@@ -52,6 +52,17 @@ build_const_pointer (tree t)
return build_pointer_type (build_qualified_type (t, TYPE_QUAL_CONST));
}
+/* GROUP's first type suffix is a ZA-related one. Return true if the
+ group exists only for the purpose of defining C overloads. This is
+ useful if some forms of an instruction require one feature and other
+ forms require another feature, and neither feature implies the other. */
+static bool
+za_group_is_pure_overload (const function_group_info &group)
+{
+ gcc_checking_assert (type_suffixes[group.types[0][0]].za_p);
+ return group.types[0][1] == NUM_TYPE_SUFFIXES;
+}
+
/* If INSTANCE has a governing predicate, add it to the list of argument
types in ARGUMENT_TYPES. RETURN_TYPE is the type returned by the
function. */
@@ -59,15 +70,18 @@ static void
apply_predication (const function_instance &instance, tree return_type,
vec<tree> &argument_types)
{
- if (instance.pred != PRED_none)
+ /* There are currently no SME ZA instructions that have both merging and
+ unpredicated forms, so for simplicity, the predicates are always included
+ in the original format string. */
+ if (instance.pred != PRED_none && instance.pred != PRED_za_m)
{
- argument_types.quick_insert (0, get_svbool_t ());
+ argument_types.quick_insert (0, instance.gp_type ());
/* For unary merge operations, the first argument is a vector with
the same type as the result. For unary_convert_narrowt it also
provides the "bottom" half of active elements, and is present
for all types of predication. */
- if ((argument_types.length () == 2 && instance.pred == PRED_m)
- || instance.shape == shapes::unary_convert_narrowt)
+ auto nargs = argument_types.length () - 1;
+ if (instance.shape->has_merge_argument_p (instance, nargs))
argument_types.quick_insert (0, return_type);
}
}
@@ -79,6 +93,7 @@ apply_predication (const function_instance &instance, tree return_type,
f<bits> - a floating-point type with the given number of bits
f[01] - a floating-point type with the same width as type suffix 0 or 1
B - bfloat16_t
+ c - a predicate-as-counter
h<elt> - a half-sized version of <elt>
p - a predicate (represented as TYPE_SUFFIX_b)
q<elt> - a quarter-sized version of <elt>
@@ -115,6 +130,9 @@ parse_element_type (const function_instance &instance, const char *&format)
return suffix;
}
+ if (ch == 'c')
+ return TYPE_SUFFIX_c;
+
if (ch == 'p')
return TYPE_SUFFIX_b;
@@ -153,6 +171,8 @@ parse_element_type (const function_instance &instance, const char *&format)
ap - array pointer for prefetches
as - array pointer for stores
b - base vector type (from a _<m0>base suffix)
+ c0 - the result of a conversion, based on type and group suffixes
+ c1 - the source of a conversion, based on type and group suffixes
d - displacement vector type (from a _<m1>index or _<m1>offset suffix)
e<name> - an enum with the given name
s<elt> - a scalar type with the given element suffix
@@ -186,6 +206,23 @@ parse_type (const function_instance &instance, const char *&format)
if (ch == 'b')
return instance.base_vector_type ();
+ if (ch == 'c')
+ {
+ int ch = *format++;
+ gcc_assert (ch == '0' || ch == '1');
+ unsigned int id = (ch == '0' ? 0 : 1);
+ auto vector_type = instance.type_suffix (id).vector_type;
+ unsigned int num_vectors = instance.group_suffix ().vectors_per_tuple;
+ if (num_vectors != 1)
+ {
+ unsigned int bits = instance.type_suffix (id).element_bits;
+ unsigned int other_bits = instance.type_suffix (1 - id).element_bits;
+ if (other_bits > bits)
+ num_vectors /= other_bits / bits;
+ }
+ return acle_vector_types[num_vectors - 1][vector_type];
+ }
+
if (ch == 'd')
return instance.displacement_vector_type ();
@@ -275,18 +312,20 @@ parse_signature (const function_instance &instance, const char *format,
}
/* Add one function instance for GROUP, using mode suffix MODE_SUFFIX_ID,
- the type suffixes at index TI and the predication suffix at index PI.
- The other arguments are as for build_all. */
+ the type suffixes at index TI, the group suffixes at index GI, and the
+ predication suffix at index PI. The other arguments are as for
+ build_all. */
static void
build_one (function_builder &b, const char *signature,
const function_group_info &group, mode_suffix_index mode_suffix_id,
- unsigned int ti, unsigned int pi, bool force_direct_overloads)
+ unsigned int ti, unsigned int gi, unsigned int pi,
+ bool force_direct_overloads)
{
/* Byte forms of svdupq take 16 arguments. */
auto_vec<tree, 16> argument_types;
function_instance instance (group.base_name, *group.base, *group.shape,
mode_suffix_id, group.types[ti],
- group.preds[pi]);
+ group.groups[gi], group.preds[pi]);
tree return_type = parse_signature (instance, signature, argument_types);
apply_predication (instance, return_type, argument_types);
b.add_unique_function (instance, return_type, argument_types,
@@ -312,24 +351,26 @@ build_32_64 (function_builder &b, const char *signature,
mode_suffix_index mode64, bool force_direct_overloads = false)
{
for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
- if (group.types[0][0] == NUM_TYPE_SUFFIXES)
- {
- gcc_assert (mode32 != MODE_none && mode64 != MODE_none);
- build_one (b, signature, group, mode32, 0, pi,
- force_direct_overloads);
- build_one (b, signature, group, mode64, 0, pi,
- force_direct_overloads);
- }
- else
- for (unsigned int ti = 0; group.types[ti][0] != NUM_TYPE_SUFFIXES; ++ti)
+ for (unsigned int gi = 0; group.groups[gi] != NUM_GROUP_SUFFIXES; ++gi)
+ if (group.types[0][0] == NUM_TYPE_SUFFIXES)
{
- unsigned int bits = type_suffixes[group.types[ti][0]].element_bits;
- gcc_assert (bits == 32 || bits == 64);
- mode_suffix_index mode = bits == 32 ? mode32 : mode64;
- if (mode != MODE_none)
- build_one (b, signature, group, mode, ti, pi,
- force_direct_overloads);
+ gcc_assert (mode32 != MODE_none && mode64 != MODE_none);
+ build_one (b, signature, group, mode32, 0, gi, pi,
+ force_direct_overloads);
+ build_one (b, signature, group, mode64, 0, gi, pi,
+ force_direct_overloads);
}
+ else
+ for (unsigned int ti = 0; group.types[ti][0] != NUM_TYPE_SUFFIXES;
+ ++ti)
+ {
+ unsigned int bits = type_suffixes[group.types[ti][0]].element_bits;
+ gcc_assert (bits == 32 || bits == 64);
+ mode_suffix_index mode = bits == 32 ? mode32 : mode64;
+ if (mode != MODE_none)
+ build_one (b, signature, group, mode, ti, gi, pi,
+ force_direct_overloads);
+ }
}
/* For every type and predicate combination in GROUP, add one function
@@ -423,10 +464,11 @@ build_all (function_builder &b, const char *signature,
bool force_direct_overloads = false)
{
for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
- for (unsigned int ti = 0;
- ti == 0 || group.types[ti][0] != NUM_TYPE_SUFFIXES; ++ti)
- build_one (b, signature, group, mode_suffix_id, ti, pi,
- force_direct_overloads);
+ for (unsigned int gi = 0; group.groups[gi] != NUM_GROUP_SUFFIXES; ++gi)
+ for (unsigned int ti = 0;
+ ti == 0 || group.types[ti][0] != NUM_TYPE_SUFFIXES; ++ti)
+ build_one (b, signature, group, mode_suffix_id, ti, gi, pi,
+ force_direct_overloads);
}
/* TYPE is the largest type suffix associated with the arguments of R,
@@ -584,6 +626,90 @@ struct binary_imm_long_base : public overloaded_base<0>
}
};
+/* Base class for binary_za_m and similar shapes. */
+template<type_class_index TCLASS = function_resolver::SAME_TYPE_CLASS,
+ unsigned int BITS = function_resolver::SAME_SIZE>
+struct binary_za_m_base : public overloaded_base<1>
+{
+ tree
+ resolve (function_resolver &r) const override
+ {
+ type_suffix_index type;
+ if (!r.check_num_arguments (5)
+ || !r.require_integer_immediate (0)
+ || !r.require_vector_type (1, VECTOR_TYPE_svbool_t)
+ || !r.require_vector_type (2, VECTOR_TYPE_svbool_t)
+ || (type = r.infer_vector_type (3)) == NUM_TYPE_SUFFIXES
+ || !r.require_derived_vector_type (4, 3, type, TCLASS, BITS))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0], type);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_range (0, 0, c.num_za_tiles () - 1);
+ }
+};
+
+/* Base class for shapes like binary_za_slice_lane. TCLASS is the type
+ class of the final vector argument. */
+template<type_class_index TCLASS = function_resolver::SAME_TYPE_CLASS>
+struct binary_za_slice_lane_base : public overloaded_base<1>
+{
+ constexpr binary_za_slice_lane_base (unsigned int lane_type_suffix)
+ : m_lane_type_suffix (lane_type_suffix) {}
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su32,t1,v1,su64", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ sve_type type;
+ if (!r.check_num_arguments (4)
+ || !r.require_scalar_type (0, "uint32_t")
+ || !(type = r.infer_tuple_type (1))
+ || !r.require_derived_vector_type (2, 1, type, TCLASS)
+ || !r.require_integer_immediate (3))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ unsigned int bytes = c.type_suffix (m_lane_type_suffix).element_bytes;
+ return c.require_immediate_range (3, 0, 16 / bytes - 1);
+ }
+
+ unsigned int m_lane_type_suffix;
+};
+
+/* Base class for shapes like binary_za_slice_opt_single. TCLASS is the
+ type class of the final argument. */
+template<type_class_index TCLASS = function_resolver::SAME_TYPE_CLASS>
+struct binary_za_slice_opt_single_base : public overloaded_base<1>
+{
+ tree
+ resolve (function_resolver &r) const override
+ {
+ sve_type type;
+ if (!r.check_num_arguments (3)
+ || !r.require_scalar_type (0, "uint32_t")
+ || !(type = r.infer_tuple_type (1)))
+ return error_mark_node;
+
+ return r.finish_opt_single_resolution (2, 1, type, TCLASS);
+ }
+};
+
/* Base class for inc_dec and inc_dec_pat. */
struct inc_dec_base : public overloaded_base<0>
{
@@ -649,7 +775,8 @@ struct load_contiguous_base : public overloaded_base<0>
|| (vnum_p && !r.require_scalar_type (i + 1, "int64_t")))
return error_mark_node;
- return r.resolve_to (r.mode_suffix_id, type);
+ return r.resolve_to (r.mode_suffix_id, type, NUM_TYPE_SUFFIXES,
+ r.group_suffix_id);
}
};
@@ -704,6 +831,29 @@ struct load_ext_gather_base : public overloaded_base<1>
}
};
+/* sv<t0>x<g>_t svfoo_t0_g(uint64_t, svuint8_t, uint64_t)
+
+ where the first argument is the ZT register number (currently always 0)
+ and the final argument is a constant index. The instruction divides
+ the vector argument in BITS-bit quantities. */
+template<unsigned int BITS>
+struct luti_lane_zt_base : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "t0,su64,vu8,su64", group, MODE_none);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ auto nvectors = c.vectors_per_tuple ();
+ return (c.require_immediate_range (0, 0, 0)
+ && c.require_immediate_range (2, 0, 32 / BITS / nvectors - 1));
+ }
+};
+
/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t,
sv<t0:quarter>_t) (for integer t0)
sv<t0>_t svmmla[_t0](sv<t0>_t, sv<t0>_t, sv<t0>_t) (for floating-point t0)
@@ -941,7 +1091,7 @@ struct ternary_bfloat_lane_base
bool
check (function_checker &c) const override
{
- return c.require_immediate_lane_index (3, N);
+ return c.require_immediate_lane_index (3, 2, N);
}
};
@@ -956,7 +1106,7 @@ struct ternary_qq_lane_base
bool
check (function_checker &c) const override
{
- return c.require_immediate_lane_index (3, 4);
+ return c.require_immediate_lane_index (3, 0);
}
};
@@ -1101,6 +1251,41 @@ struct binary_int_opt_n_def : public overloaded_base<0>
};
SHAPE (binary_int_opt_n)
+/* Like binary_int_opt_n for single vectors. For tuples:
+
+ sv<t0>x<g>_t svfoo[_t0_g](sv<t0>x<g>_t, sv<t0:int>x<g>_t)
+ sv<t0>x<g>_t svfoo[_single_t0_g](sv<t0>x<g>_t, sv<t0:int>_t). */
+struct binary_int_opt_single_n_def : public overloaded_base<0>
+{
+ bool explicit_group_suffix_p () const override { return false; }
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "t0,t0,ts0", group, MODE_none);
+ if (group.groups[0] == GROUP_none)
+ build_all (b, "v0,v0,ss0", group, MODE_n);
+ else
+ build_all (b, "t0,t0,vs0", group, MODE_single);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ unsigned int i, nargs;
+ sve_type type;
+ if (!r.check_gp_argument (2, i, nargs)
+ || !(type = r.infer_sve_type (i)))
+ return error_mark_node;
+
+ return (type.num_vectors == 1 && r.scalar_argument_p (i + 1)
+ ? r.finish_opt_n_resolution (i + 1, i, type.type, TYPE_signed)
+ : r.finish_opt_single_resolution (i + 1, i, type, TYPE_signed));
+ }
+};
+SHAPE (binary_int_opt_single_n)
+
/* sv<t0>_t svfoo_<t0>(sv<t0>_t, sv<t0>_t, uint64_t)
where the final argument is an integer constant expression in the
@@ -1123,7 +1308,7 @@ struct binary_lane_def : public overloaded_base<0>
bool
check (function_checker &c) const override
{
- return c.require_immediate_lane_index (2);
+ return c.require_immediate_lane_index (2, 1);
}
};
SHAPE (binary_lane)
@@ -1148,7 +1333,7 @@ struct binary_long_lane_def : public overloaded_base<0>
type_suffix_index type, result_type;
if (!r.check_gp_argument (3, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
- || !r.require_matching_vector_type (i + 1, type)
+ || !r.require_matching_vector_type (i + 1, i, type)
|| !r.require_integer_immediate (i + 2)
|| (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
@@ -1162,7 +1347,7 @@ struct binary_long_lane_def : public overloaded_base<0>
bool
check (function_checker &c) const override
{
- return c.require_immediate_lane_index (2);
+ return c.require_immediate_lane_index (2, 1);
}
};
SHAPE (binary_long_lane)
@@ -1305,6 +1490,41 @@ struct binary_opt_n_def : public overloaded_base<0>
};
SHAPE (binary_opt_n)
+/* Like binary_opt_n for single vectors. For tuples:
+
+ sv<t0>x<g>_t svfoo[_t0_g](sv<t0>x<g>_t, sv<t0>x<g>_t)
+ sv<t0>x<g>_t svfoo[_single_t0_g](sv<t0>x<g>_t, sv<t0>_t). */
+struct binary_opt_single_n_def : public overloaded_base<0>
+{
+ bool explicit_group_suffix_p () const override { return false; }
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "t0,t0,t0", group, MODE_none);
+ if (group.groups[0] == GROUP_none)
+ build_all (b, "v0,v0,s0", group, MODE_n);
+ else
+ build_all (b, "t0,t0,v0", group, MODE_single);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ unsigned int i, nargs;
+ sve_type type;
+ if (!r.check_gp_argument (2, i, nargs)
+ || !(type = r.infer_sve_type (i)))
+ return error_mark_node;
+
+ return (type.num_vectors == 1 && r.scalar_argument_p (i + 1)
+ ? r.finish_opt_n_resolution (i + 1, i, type.type)
+ : r.finish_opt_single_resolution (i + 1, i, type));
+ }
+};
+SHAPE (binary_opt_single_n)
+
/* svbool_t svfoo(svbool_t, svbool_t). */
struct binary_pred_def : public nonoverloaded_base
{
@@ -1356,6 +1576,33 @@ struct binary_scalar_def : public nonoverloaded_base
};
SHAPE (binary_scalar)
+/* sv<t0>x<g>_t svfoo[_single_t0_g](sv<t0>x<g>_t, sv<t0>_t). */
+struct binary_single_def : public overloaded_base<0>
+{
+ bool explicit_group_suffix_p () const override { return false; }
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "t0,t0,v0", group, MODE_single);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ sve_type type;
+ if (!r.check_num_arguments (2)
+ || !(type = r.infer_sve_type (0))
+ || !r.require_derived_vector_type (1, 0, type, r.SAME_TYPE_CLASS,
+ r.SAME_SIZE, 1))
+ return error_mark_node;
+
+ return r.resolve_to (MODE_single, type);
+ }
+};
+SHAPE (binary_single)
+
/* sv<t0:uint>_t svfoo[_t0](sv<t0>_t, sv<t0>_t).
i.e. a version of "binary" that returns unsigned integers. */
@@ -1571,6 +1818,197 @@ struct binary_wide_opt_n_def : public overloaded_base<0>
};
SHAPE (binary_wide_opt_n)
+/* void svfoo_t0[_t1]_g(uint64_t, svbool_t, svbool_t, sv<t1>x<g>_t,
+ sv<t1:int>x<g>_t)
+
+ where the first argument is a ZA tile. */
+struct binary_za_int_m_def : public binary_za_m_base<TYPE_signed>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su64,vp,vp,t1,ts1", group, MODE_none);
+ }
+};
+SHAPE (binary_za_int_m)
+
+/* void svfoo_t0[_t1]_g(uint64_t, svbool_t, svbool_t, sv<t1>x<g>_t,
+ sv<t1>x<g>_t)
+
+ where the first argument is a ZA tile. */
+struct binary_za_m_def : public binary_za_m_base<>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ /* Allow the overloaded form to be specified seperately, with just
+ a single suffix. This is necessary for the 64-bit SME MOP intrinsics,
+ which have some forms dependent on FEAT_SME_I16I64 and some forms
+ dependent on FEAT_SME_F64F64. The resolver needs to be defined
+ for base SME. */
+ if (group.types[0][1] != NUM_TYPE_SUFFIXES)
+ build_all (b, "_,su64,vp,vp,t1,t1", group, MODE_none);
+ }
+};
+SHAPE (binary_za_m)
+
+/* void svfoo_lane_t0[_t1]_g(uint32_t, sv<t1>x<g>_t, sv<t1>_t, uint64_t)
+
+ where the first argument is a variable ZA slice and the final argument
+ indexes a single element in the preceding vector argument. */
+struct binary_za_slice_lane_def : public binary_za_slice_lane_base<>
+{
+ constexpr binary_za_slice_lane_def () : binary_za_slice_lane_base<> (1) {}
+};
+SHAPE (binary_za_slice_lane)
+
+/* void svfoo_t0[_t1]_g(uint32_t, sv<t1>x<g>_t, sv<t1:int>x<g>_t)
+ void svfoo[_single]_t0[_t1]_g(uint32_t, sv<t1>x<g>_t, sv<t1:int>_t).
+
+ where the first argument is a variable ZA slice. */
+struct binary_za_slice_int_opt_single_def
+ : public binary_za_slice_opt_single_base<TYPE_signed>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su32,t1,ts1", group, MODE_none);
+ build_all (b, "_,su32,t1,vs1", group, MODE_single);
+ }
+};
+SHAPE (binary_za_slice_int_opt_single)
+
+/* void svfoo_t0[_t1]_g(uint32_t, sv<t1>x<g>_t, sv<t1>x<g>_t)
+ void svfoo[_single]_t0[_t1]_g(uint32_t, sv<t1>x<g>_t, sv<t1>_t)
+
+ where the first argument is a variable ZA slice. */
+struct binary_za_slice_opt_single_def
+ : public binary_za_slice_opt_single_base<>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su32,t1,t1", group, MODE_none);
+ build_all (b, "_,su32,t1,v1", group, MODE_single);
+ }
+};
+SHAPE (binary_za_slice_opt_single)
+
+/* void svfoo_t0[_t1]_g(uint32_t, sv<t1>x<g>_t, sv<t1:uint>x<g>_t)
+ void svfoo[_single]_t0[_t1]_g(uint32_t, sv<t1>x<g>_t, sv<t1:uint>_t)
+
+ where the first argument is a variable ZA slice. */
+struct binary_za_slice_uint_opt_single_def
+ : public binary_za_slice_opt_single_base<TYPE_unsigned>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su32,t1,tu1", group, MODE_none);
+ build_all (b, "_,su32,t1,vu1", group, MODE_single);
+ }
+};
+SHAPE (binary_za_slice_uint_opt_single)
+
+/* void svfoo_t0[_t1]_g(uint64_t, svbool_t, svbool_t, sv<t1>x<g>_t,
+ sv<t1:uint>x<g>_t)
+
+ where the first argument is a ZA tile. */
+struct binary_za_uint_m_def : public binary_za_m_base<TYPE_unsigned>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su64,vp,vp,t1,tu1", group, MODE_none);
+ }
+};
+SHAPE (binary_za_uint_m)
+
+/* sv<t0>x<g>_t svfoo[_t0_t1_g](sv<t0>x<g>_t, sv<t0>x<g>_t). */
+struct binaryxn_def : public overloaded_base<0>
+{
+ bool explicit_group_suffix_p () const override { return false; }
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "t0,t0,t0", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ vector_type_index pred_type;
+ sve_type type;
+ if (!r.check_num_arguments (3)
+ || (pred_type = r.infer_predicate_type (0)) == NUM_VECTOR_TYPES
+ || !(type = r.infer_sve_type (1))
+ || !r.require_matching_predicate_type (pred_type, type)
+ || !r.require_matching_vector_type (2, 1, type))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+};
+SHAPE (binaryxn)
+
+/* bool svfoo(). */
+struct bool_inherent_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "sp", group, MODE_none);
+ }
+};
+SHAPE (bool_inherent)
+
+/* Either:
+
+ sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, sv<t0>_t)
+
+ for single vectors or:
+
+ sv<t0>x<g>_t svfoo[_single_t0_g](sv<t0>x<g>_t, sv<t0>_t, sv<t0>_t)
+
+ for tuples. */
+struct clamp_def : public overloaded_base<0>
+{
+ bool explicit_group_suffix_p () const override { return false; }
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "t0,t0,v0,v0", group,
+ group.groups[0] == GROUP_none ? MODE_none : MODE_single);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ sve_type type;
+ if (!r.check_num_arguments (3)
+ || !(type = r.infer_sve_type (0))
+ || !r.require_derived_vector_type (1, 0, type, r.SAME_TYPE_CLASS,
+ r.SAME_SIZE, 1)
+ || !r.require_derived_vector_type (2, 0, type, r.SAME_TYPE_CLASS,
+ r.SAME_SIZE, 1))
+ return error_mark_node;
+
+ auto mode = type.num_vectors == 1 ? MODE_none : MODE_single;
+ return r.resolve_to (mode, type);
+ }
+};
+SHAPE (clamp)
+
/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
<t0>_t svfoo[_n_t0](<t0>_t, sv<t0>_t). */
struct clast_def : public overloaded_base<0>
@@ -1603,7 +2041,7 @@ struct clast_def : public overloaded_base<0>
{
type_suffix_index type;
if ((type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
- || !r.require_matching_vector_type (i + 1, type))
+ || !r.require_matching_vector_type (i + 1, i, type))
return error_mark_node;
return r.resolve_to (MODE_none, type);
}
@@ -1676,7 +2114,7 @@ struct compare_ptr_def : public overloaded_base<0>
};
SHAPE (compare_ptr)
-/* svbool_t svfoo_t0[_t1](<t1>_t, <t1>_t)
+/* svboolx<g>_t svfoo_t0[_t1]_g(<t1>_t, <t1>_t)
where _t0 is a _b<bits> suffix that describes the predicate result.
There is no direct relationship between the element sizes of _t0
@@ -1687,7 +2125,7 @@ struct compare_scalar_def : public overloaded_base<1>
build (function_builder &b, const function_group_info &group) const override
{
b.add_overloaded_functions (group, MODE_none);
- build_all (b, "vp,s1,s1", group, MODE_none);
+ build_all (b, "tp,s1,s1", group, MODE_none);
}
tree
@@ -1700,11 +2138,47 @@ struct compare_scalar_def : public overloaded_base<1>
|| !r.require_matching_integer_scalar_type (i + 1, i, type))
return error_mark_node;
- return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0], type);
+ return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0], type,
+ r.group_suffix_id);
}
};
SHAPE (compare_scalar)
+/* svcount_t svfoo_t0[_t1](<t1>_t, <t1>_t, uint64_t)
+
+ where _t0 is a _c<bits> suffix that describes the predicate-as-counter
+ result. The final argument is an integer constant that specifies the
+ number of vectors (2 or 4). */
+struct compare_scalar_count_def : public overloaded_base<1>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "v0,s1,s1,su64", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ unsigned int i, nargs;
+ type_suffix_index type;
+ if (!r.check_gp_argument (3, i, nargs)
+ || (type = r.infer_64bit_scalar_integer_pair (i)) == NUM_TYPE_SUFFIXES
+ || !r.require_integer_immediate (i + 2))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0], type);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_either_or (2, 2, 4);
+ }
+};
+SHAPE (compare_scalar_count)
+
/* svbool_t svfoo[_t0](sv<t0>_t, svint64_t) (for signed t0)
svbool_t svfoo[_n_t0](sv<t0>_t, int64_t) (for signed t0)
svbool_t svfoo[_t0](sv<t0>_t, svuint64_t) (for unsigned t0)
@@ -1768,6 +2242,25 @@ struct count_pred_def : public nonoverloaded_base
};
SHAPE (count_pred)
+/* uint64_t svfoo_t0(sv<t0>_t, uint64_t)
+
+ where the final argument must be 2 or 4. */
+struct count_pred_c_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "su64,v0,su64", group, MODE_none);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_either_or (1, 2, 4);
+ }
+};
+SHAPE (count_pred_c)
+
/* uint64_t svfoo[_t0](sv<t0>_t). */
struct count_vector_def : public overloaded_base<0>
{
@@ -1806,6 +2299,54 @@ struct create_def : public overloaded_base<0>
};
SHAPE (create)
+/* void svfoo_lane_t0[_t1]_g(uint32_t, sv<t1>x<g>_t, sv<t1:int>_t, uint64_t)
+
+ where the final argument indexes a <t0>-sized group of elements in the
+ preceding vector argument. */
+struct dot_za_slice_int_lane_def
+ : public binary_za_slice_lane_base<TYPE_signed>
+{
+ constexpr dot_za_slice_int_lane_def ()
+ : binary_za_slice_lane_base<TYPE_signed> (0) {}
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su32,t1,vs1,su64", group, MODE_none);
+ }
+};
+SHAPE (dot_za_slice_int_lane)
+
+/* void svfoo_lane_t0[_t1]_g(uint32_t, sv<t1>x<g>_t, sv<t1>_t, uint64_t)
+
+ where the final argument indexes a <t0>-sized group of elements in the
+ preceding vector argument. */
+struct dot_za_slice_lane_def : public binary_za_slice_lane_base<>
+{
+ constexpr dot_za_slice_lane_def () : binary_za_slice_lane_base<> (0) {}
+};
+SHAPE (dot_za_slice_lane)
+
+/* void svfoo_lane_t0[_t1]_g(uint32_t, sv<t1>x<g>_t, sv<t1:uint>_t, uint64_t)
+
+ where the final argument indexes a <t0>-sized group of elements in the
+ preceding vector argument. */
+struct dot_za_slice_uint_lane_def
+ : public binary_za_slice_lane_base<TYPE_unsigned>
+{
+ constexpr dot_za_slice_uint_lane_def ()
+ : binary_za_slice_lane_base<TYPE_unsigned> (0) {}
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su32,t1,vu1,su64", group, MODE_none);
+ }
+};
+SHAPE (dot_za_slice_uint_lane)
+
/* sv<t0>_t svfoo[_n]_t0(<t0>_t, ..., <t0>_t)
where there are enough arguments to fill 128 bits of data (or to
@@ -1857,6 +2398,24 @@ struct ext_def : public overloaded_base<0>
};
SHAPE (ext)
+/* svboolx<g>_t svfoo_t0_g(sv<t0>_t, sv<t0>_t, uint32_t). */
+struct extract_pred_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "tp,vc,su64", group, MODE_none);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ unsigned int size = c.vectors_per_tuple ();
+ return c.require_immediate_range (1, 0, 4 / size - 1);
+ }
+};
+SHAPE (extract_pred)
+
/* <t0>_t svfoo[_t0](<t0>_t, sv<t0>_t). */
struct fold_left_def : public overloaded_base<0>
{
@@ -1899,9 +2458,9 @@ struct get_def : public overloaded_base<0>
resolve (function_resolver &r) const override
{
unsigned int i, nargs;
- type_suffix_index type;
+ sve_type type;
if (!r.check_gp_argument (2, i, nargs)
- || (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
+ || !(type = r.infer_tuple_type (i))
|| !r.require_integer_immediate (i + 1))
return error_mark_node;
@@ -2050,8 +2609,91 @@ struct inherent_b_def : public overloaded_base<0>
};
SHAPE (inherent_b)
-/* sv<t0>[xN]_t svfoo[_t0](const <t0>_t *)
- sv<t0>[xN]_t svfoo_vnum[_t0](const <t0>_t *, int64_t). */
+/* void svfoo_t0(). */
+struct inherent_za_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "_", group, MODE_none);
+ }
+};
+SHAPE (inherent_za)
+
+/* void svfoo_zt(uint64_t)
+
+ where the argument must be zero. */
+struct inherent_zt_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "_,su64", group, MODE_none);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_range (0, 0, 0);
+ }
+};
+SHAPE (inherent_zt)
+
+/* void svfoo_t0(uint64_t)
+
+ where the argument is an integer constant that specifies an 8-bit mask. */
+struct inherent_mask_za_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "_,su64", group, MODE_none);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_range (0, 0, 255);
+ }
+};
+SHAPE (inherent_mask_za)
+
+/* void svfoo_t0(uint32_t, const void *)
+ void svfoo_vnum_t0(uint32_t, const void *, int64_t)
+
+ where the first argument is a variable ZA slice. */
+struct ldr_za_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "_,su32,al", group, MODE_none);
+ build_all (b, "_,su32,al,ss64", group, MODE_vnum);
+ }
+};
+SHAPE (ldr_za)
+
+/* void svfoo_zt(uint64_t, const void *)
+
+ where the first argument must be zero. */
+struct ldr_zt_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "_,su64,al", group, MODE_none);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_range (0, 0, 0);
+ }
+};
+SHAPE (ldr_zt)
+
+/* sv<t0>[xN]_t svfoo[_t0]_g(const <t0>_t *)
+ sv<t0>[xN]_t svfoo_vnum[_t0]_g(const <t0>_t *, int64_t). */
struct load_def : public load_contiguous_base
{
void
@@ -2260,6 +2902,33 @@ struct load_replicate_def : public load_contiguous_base
};
SHAPE (load_replicate)
+/* void svfoo_t0(uint64_t, uint32_t, svbool_t, const void *)
+ void svfoo_vnum_t0(uint64_t, uint32_t, svbool_t, const void *, int64_t)
+
+ where the first two fields form a (ZA tile, slice) pair. */
+struct load_za_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "_,su64,su32,vp,al", group, MODE_none);
+ build_all (b, "_,su64,su32,vp,al,ss64", group, MODE_vnum);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_range (0, 0, c.num_za_tiles () - 1);
+ }
+};
+SHAPE (load_za)
+
+using luti2_lane_zt_def = luti_lane_zt_base<2>;
+SHAPE (luti2_lane_zt)
+
+using luti4_lane_zt_def = luti_lane_zt_base<4>;
+SHAPE (luti4_lane_zt)
+
/* svbool_t svfoo(enum svpattern). */
struct pattern_pred_def : public nonoverloaded_base
{
@@ -2354,6 +3023,76 @@ struct rdffr_def : public nonoverloaded_base
};
SHAPE (rdffr)
+/* sv<t1>x<g>_t svfoo_t0_t1_g(uint64_t, uint32_t). */
+struct read_za_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "t1,su64,su32", group, MODE_none);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_range (0, 0, c.num_za_tiles () - 1);
+ }
+};
+SHAPE (read_za)
+
+/* sv<t1>_t svfoo_t0[_t1](uint64_t, uint32_t)
+
+ where the first two fields form a (ZA tile, slice) pair. */
+struct read_za_m_def : public overloaded_base<1>
+{
+ bool
+ has_merge_argument_p (const function_instance &, unsigned int) const override
+ {
+ return true;
+ }
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "t1,su64,su32", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ gcc_assert (r.pred == PRED_m);
+ type_suffix_index type;
+ if (!r.check_num_arguments (4)
+ || (type = r.infer_vector_type (0)) == NUM_TYPE_SUFFIXES
+ || !r.require_vector_type (1, VECTOR_TYPE_svbool_t)
+ || !r.require_integer_immediate (2)
+ || !r.require_scalar_type (3, "uint32_t"))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0], type);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ gcc_assert (c.pred == PRED_m);
+ return c.require_immediate_range (1, 0, c.num_za_tiles () - 1);
+ }
+};
+SHAPE (read_za_m)
+
+/* sv<t1>x<g>_t svfoo_t0_t1_g(uint32_t). */
+struct read_za_slice_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "t1,su32", group, MODE_none);
+ }
+};
+SHAPE (read_za_slice)
+
/* <t0>_t svfoo[_t0](sv<t0>_t). */
struct reduction_def : public overloaded_base<0>
{
@@ -2395,6 +3134,45 @@ struct reduction_wide_def : public overloaded_base<0>
};
SHAPE (reduction_wide)
+/* sv<t0>x<g>_t svfoo_t0[_t1_g](sv<t1>x<g>_t)
+
+ where the target type <t0> must be specified explicitly but the source
+ type <t1> can be inferred. */
+struct reinterpret_def : public overloaded_base<1>
+{
+ bool explicit_group_suffix_p () const override { return false; }
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "t0,t1", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ sve_type type;
+ if (!r.check_num_arguments (1)
+ || !(type = r.infer_sve_type (0)))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+};
+SHAPE (reinterpret)
+
+/* sv<t0>_t svfoo_t0(sv<t0>_t, sv<t0>_t, uint32_t). */
+struct select_pred_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "v0,v0,vp,su32", group, MODE_none);
+ }
+};
+SHAPE (select_pred)
+
/* sv<t0>xN_t svfoo[_t0](sv<t0>xN_t, uint64_t, sv<t0>_t)
where the second argument is an integer constant expression in the
@@ -2412,9 +3190,9 @@ struct set_def : public overloaded_base<0>
resolve (function_resolver &r) const override
{
unsigned int i, nargs;
- type_suffix_index type;
+ sve_type type;
if (!r.check_gp_argument (3, i, nargs)
- || (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
+ || !(type = r.infer_tuple_type (i))
|| !r.require_integer_immediate (i + 1)
|| !r.require_derived_vector_type (i + 2, i, type))
return error_mark_node;
@@ -2564,6 +3342,42 @@ typedef shift_right_imm_narrow_wrapper<binary_imm_narrowt_base_unsigned, 2>
shift_right_imm_narrowt_to_uint_def;
SHAPE (shift_right_imm_narrowt_to_uint)
+/* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
+
+ where the final argument must be an integer constant expression in the
+ range [1, sizeof (<t0>_t) * 8]. */
+struct shift_right_imm_narrowxn_def : public overloaded_base<1>
+{
+ bool explicit_group_suffix_p () const override { return false; }
+
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_n);
+ build_all (b, "c0,c1,su64", group, MODE_n);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ sve_type type;
+ if (!r.check_num_arguments (2)
+ || !(type = r.infer_sve_type (0))
+ || !r.require_integer_immediate (1))
+ return error_mark_node;
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ unsigned int suffix = c.group_suffix_id == GROUP_x4 ? 1 : 0;
+ unsigned int bits = c.type_suffix (suffix).element_bits;
+ return c.require_immediate_range (1, 1, bits);
+ }
+};
+SHAPE (shift_right_imm_narrowxn)
+
/* void svfoo[_t0](<X>_t *, sv<t0>[xN]_t)
void svfoo_vnum[_t0](<X>_t *, int64_t, sv<t0>[xN]_t)
@@ -2587,11 +3401,11 @@ struct store_def : public overloaded_base<0>
gcc_assert (r.mode_suffix_id == MODE_none || vnum_p);
unsigned int i, nargs;
- type_suffix_index type;
+ sve_type type;
if (!r.check_gp_argument (vnum_p ? 3 : 2, i, nargs)
|| !r.require_pointer_type (i)
|| (vnum_p && !r.require_scalar_type (i + 1, "int64_t"))
- || ((type = r.infer_tuple_type (nargs - 1)) == NUM_TYPE_SUFFIXES))
+ || !(type = r.infer_tuple_type (nargs - 1)))
return error_mark_node;
return r.resolve_to (r.mode_suffix_id, type);
@@ -2694,6 +3508,92 @@ struct store_scatter_offset_restricted_def : public store_scatter_base
};
SHAPE (store_scatter_offset_restricted)
+/* void svfoo_t0(uint64_t, uint32_t, svbool_t, void *)
+ void svfoo_vnum_t0(uint64_t, uint32_t, svbool_t, void *, int64_t)
+
+ where the first two fields form a (ZA tile, slice) pair. */
+struct store_za_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "_,su64,su32,vp,as", group, MODE_none);
+ build_all (b, "_,su64,su32,vp,as,ss64", group, MODE_vnum);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_range (0, 0, c.num_za_tiles () - 1);
+ }
+};
+SHAPE (store_za)
+
+/* void svfoo[_t0_g](<X>_t *, sv<t0>x<g>_t)
+ void svfoo_vnum[_t0_g](<X>_t *, int64_t, sv<t0>x<g>_t)
+
+ where <X> might be tied to <t0> (for non-truncating stores) or might
+ depend on the function base name (for truncating stores). */
+struct storexn_def : public store_def
+{
+ bool explicit_group_suffix_p () const override { return false; }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ bool vnum_p = r.mode_suffix_id == MODE_vnum;
+ gcc_assert (r.mode_suffix_id == MODE_none || vnum_p);
+
+ unsigned int nargs = vnum_p ? 4 : 3;
+ vector_type_index pred_type;
+ sve_type type;
+ if (!r.check_num_arguments (nargs)
+ || (pred_type = r.infer_predicate_type (0)) == NUM_VECTOR_TYPES
+ || !r.require_pointer_type (1)
+ || (vnum_p && !r.require_scalar_type (2, "int64_t"))
+ || !(type = r.infer_sve_type (nargs - 1))
+ || !r.require_matching_predicate_type (pred_type, type))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+};
+SHAPE (storexn)
+
+/* void svfoo_t0(uint32_t, void *)
+ void svfoo_vnum_t0(uint32_t, void *, int64_t)
+
+ where the first argument is a variable ZA slice. */
+struct str_za_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "_,su32,as", group, MODE_none);
+ build_all (b, "_,su32,as,ss64", group, MODE_vnum);
+ }
+};
+SHAPE (str_za)
+
+/* void svfoo_zt(uint64_t, void *)
+
+ where the first argument must be zero. */
+struct str_zt_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "_,su64,as", group, MODE_none);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_range (0, 0, 0);
+ }
+};
+SHAPE (str_zt)
+
/* sv<t0>_t svfoo[_t0](sv<t0>xN_t, sv<t0:uint>_t). */
struct tbl_tuple_def : public overloaded_base<0>
{
@@ -2708,9 +3608,9 @@ struct tbl_tuple_def : public overloaded_base<0>
resolve (function_resolver &r) const override
{
unsigned int i, nargs;
- type_suffix_index type;
+ sve_type type;
if (!r.check_gp_argument (2, i, nargs)
- || (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
+ || !(type = r.infer_tuple_type (i))
|| !r.require_derived_vector_type (i + 1, i, type, TYPE_unsigned))
return error_mark_node;
@@ -2817,7 +3717,7 @@ struct ternary_lane_def : public overloaded_base<0>
bool
check (function_checker &c) const override
{
- return c.require_immediate_lane_index (3);
+ return c.require_immediate_lane_index (3, 2);
}
};
SHAPE (ternary_lane)
@@ -2845,7 +3745,7 @@ struct ternary_lane_rotate_def : public overloaded_base<0>
bool
check (function_checker &c) const override
{
- return (c.require_immediate_lane_index (3, 2)
+ return (c.require_immediate_lane_index (3, 2, 2)
&& c.require_immediate_one_of (4, 0, 90, 180, 270));
}
};
@@ -2868,7 +3768,7 @@ struct ternary_long_lane_def
bool
check (function_checker &c) const override
{
- return c.require_immediate_lane_index (3);
+ return c.require_immediate_lane_index (3, 2);
}
};
SHAPE (ternary_long_lane)
@@ -2915,20 +3815,49 @@ struct ternary_opt_n_def : public overloaded_base<0>
};
SHAPE (ternary_opt_n)
-/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t, uint64_t)
+/* A choice between:
+
+ (1) sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t,
+ uint64_t)
+
+ (2) sv<t0>_t svfoo[_t0_t1](sv<t0>_t, sv<t1>_t, sv<t1>_t, uint64_t)
where the final argument is an integer constant expression in the range
[0, 16 / sizeof (<t0>_t) - 1]. */
-struct ternary_qq_lane_def : public ternary_qq_lane_base<>
+struct ternary_qq_or_011_lane_def : public ternary_qq_lane_base<>
{
void
build (function_builder &b, const function_group_info &group) const override
{
b.add_overloaded_functions (group, MODE_none);
- build_all (b, "v0,v0,vq0,vq0,su64", group, MODE_none);
+ if (group.types[0][1] == NUM_TYPE_SUFFIXES)
+ build_all (b, "v0,v0,vq0,vq0,su64", group, MODE_none);
+ else
+ build_all (b, "v0,v0,v1,v1,su64", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ unsigned int i, nargs;
+ type_suffix_index type0, type1;
+ if (!r.check_gp_argument (4, i, nargs)
+ || (type0 = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
+ || (type1 = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
+ || !r.require_matching_vector_type (i + 2, i + 1, type1)
+ || !r.require_integer_immediate (i + 3))
+ return error_mark_node;
+
+ if ((type_suffixes[type0].element_bits
+ == 4 * type_suffixes[type1].element_bits)
+ && type_suffixes[type0].tclass == type_suffixes[type1].tclass)
+ if (tree res = r.lookup_form (MODE_none, type0))
+ return res;
+
+ return r.resolve_to (r.mode_suffix_id, type0, type1);
}
};
-SHAPE (ternary_qq_lane)
+SHAPE (ternary_qq_or_011_lane)
/* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t,
uint64_t)
@@ -2965,30 +3894,70 @@ struct ternary_qq_lane_rotate_def : public overloaded_base<0>
bool
check (function_checker &c) const override
{
- return (c.require_immediate_lane_index (3, 4)
+ return (c.require_immediate_lane_index (3, 0)
&& c.require_immediate_one_of (4, 0, 90, 180, 270));
}
};
SHAPE (ternary_qq_lane_rotate)
-/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t)
- sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:quarter>_t, <t0:quarter>_t)
+/* A choice between:
- i.e. a version of the standard ternary shape ternary_opt_n in which
- the element type of the last two arguments is the quarter-sized
- equivalent of <t0>. */
-struct ternary_qq_opt_n_def
+ (1) sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t)
+ sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:quarter>_t, <t0:quarter>_t)
+
+ i.e. a version of the standard ternary shape ternary_opt_n in which
+ the element type of the last two arguments is the quarter-sized
+ equivalent of <t0>.
+
+ (2) sv<t0>_t svfoo[_t0_t1](sv<t0>_t, sv<t1>_t, sv<t1>_t)
+
+ where the element type of the last two arguments is specified
+ explicitly. */
+struct ternary_qq_opt_n_or_011_def
: public ternary_resize2_opt_n_base<function_resolver::QUARTER_SIZE>
{
void
build (function_builder &b, const function_group_info &group) const override
{
b.add_overloaded_functions (group, MODE_none);
- build_all (b, "v0,v0,vq0,vq0", group, MODE_none);
- build_all (b, "v0,v0,vq0,sq0", group, MODE_n);
+ if (group.types[0][1] == NUM_TYPE_SUFFIXES)
+ {
+ build_all (b, "v0,v0,vq0,vq0", group, MODE_none);
+ build_all (b, "v0,v0,vq0,sq0", group, MODE_n);
+ }
+ else
+ build_all (b, "v0,v0,v1,v1", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ unsigned int i, nargs;
+ type_suffix_index type0, type1;
+ if (!r.check_gp_argument (3, i, nargs)
+ || (type0 = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
+ || (type1 = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
+ || !r.require_vector_or_scalar_type (i + 2))
+ return error_mark_node;
+
+ auto mode = r.scalar_argument_p (i + 2) ? MODE_n : MODE_none;
+ if (mode == MODE_none
+ && !r.require_matching_vector_type (i + 2, i + 1, type1))
+ return error_mark_node;
+
+ if ((type_suffixes[type0].element_bits
+ == 4 * type_suffixes[type1].element_bits)
+ && type_suffixes[type0].tclass == type_suffixes[type1].tclass)
+ if (tree res = r.lookup_form (mode, type0))
+ return res;
+
+ if (!r.require_nonscalar_type (i + 2))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, type0, type1);
}
};
-SHAPE (ternary_qq_opt_n)
+SHAPE (ternary_qq_opt_n_or_011)
/* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t,
uint64_t)
@@ -3103,7 +4072,7 @@ struct ternary_uint_def : public overloaded_base<0>
type_suffix_index type;
if (!r.check_gp_argument (3, i, nargs)
|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
- || !r.require_matching_vector_type (i + 1, type)
+ || !r.require_matching_vector_type (i + 1, i, type)
|| !r.require_derived_vector_type (i + 2, i, type, TYPE_unsigned))
return error_mark_node;
@@ -3198,7 +4167,7 @@ struct unary_def : public overloaded_base<0>
build (function_builder &b, const function_group_info &group) const override
{
b.add_overloaded_functions (group, MODE_none);
- build_all (b, "v0,v0", group, MODE_none);
+ build_all (b, "t0,t0", group, MODE_none);
}
tree
@@ -3219,7 +4188,7 @@ struct unary_convert_def : public overloaded_base<1>
build (function_builder &b, const function_group_info &group) const override
{
b.add_overloaded_functions (group, MODE_none);
- build_all (b, "v0,v1", group, MODE_none);
+ build_all (b, "c0,c1", group, MODE_none);
}
tree
@@ -3238,6 +4207,12 @@ SHAPE (unary_convert)
predicate. */
struct unary_convert_narrowt_def : public overloaded_base<1>
{
+ bool
+ has_merge_argument_p (const function_instance &, unsigned int) const override
+ {
+ return true;
+ }
+
void
build (function_builder &b, const function_group_info &group) const override
{
@@ -3254,6 +4229,38 @@ struct unary_convert_narrowt_def : public overloaded_base<1>
};
SHAPE (unary_convert_narrowt)
+/* sv<t0>x<g0>_t svfoo_t0[_t1_g](sv<t1>x<g1>_t)
+
+ where the target type <t0> must be specified explicitly but the
+ source type <t1> can be inferred.
+
+ Functions with a group suffix are unpredicated. For them:
+
+ - If <t0> is N times wider than <t1>, the return value has N times
+ more vectors than the argument.
+
+ - If <t1> is N times wider than <t0>, the argument has N times
+ more vectors than the return type. */
+struct unary_convertxn_def : public unary_convert_def
+{
+ bool explicit_group_suffix_p () const override { return false; }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ if (r.pred != PRED_none)
+ return unary_convert_def::resolve (r);
+
+ sve_type type;
+ if (!r.check_num_arguments (1)
+ || !(type = r.infer_sve_type (0)))
+ return error_mark_node;
+
+ return r.resolve_conversion (r.mode_suffix_id, type);
+ }
+};
+SHAPE (unary_convertxn)
+
/* sv<t0>_t svfoo[_t0](sv<t0:half>_t). */
struct unary_long_def : public overloaded_base<0>
{
@@ -3448,4 +4455,173 @@ struct unary_widen_def : public overloaded_base<0>
};
SHAPE (unary_widen)
+/* void svfoo_t0[_t1](uint64_t, svbool_t, svbool_t, sv<t1>_t)
+
+ where the first argument is a ZA tile. */
+struct unary_za_m_def : public overloaded_base<1>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su64,vp,vp,t1", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ type_suffix_index type;
+ if (!r.check_num_arguments (4)
+ || !r.require_integer_immediate (0)
+ || !r.require_vector_type (1, VECTOR_TYPE_svbool_t)
+ || !r.require_vector_type (2, VECTOR_TYPE_svbool_t)
+ || (type = r.infer_vector_type (3)) == NUM_TYPE_SUFFIXES)
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0], type);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_range (0, 0, c.num_za_tiles () - 1);
+ }
+};
+SHAPE (unary_za_m)
+
+/* void svfoo_t0[_t1]_g(uint32_t, sv<t1>x<g>_t). */
+struct unary_za_slice_def : public overloaded_base<1>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ if (!za_group_is_pure_overload (group))
+ build_all (b, "_,su32,t1", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ sve_type type;
+ if (!r.check_num_arguments (2)
+ || !r.require_scalar_type (0, "uint32_t")
+ || !(type = r.infer_tuple_type (1)))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+};
+SHAPE (unary_za_slice)
+
+/* sv<t0>x<g>_t svfoo[_t0_g](sv<t0>x<g>_t). */
+struct unaryxn_def : public unary_def
+{
+ bool explicit_group_suffix_p () const override { return false; }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ if (r.pred != PRED_none)
+ return unary_def::resolve (r);
+
+ sve_type type;
+ if (!r.check_num_arguments (1)
+ || !(type = r.infer_sve_type (0)))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+};
+SHAPE (unaryxn)
+
+/* void svfoo_t0[_t1_g](uint64_t, uint32_t, sv<t1>x<g>_t). */
+struct write_za_def : public overloaded_base<1>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su64,su32,t1", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ sve_type type;
+ if (!r.check_num_arguments (3)
+ || !r.require_integer_immediate (0)
+ || !r.require_scalar_type (1, "uint32_t")
+ || !(type = r.infer_tuple_type (2)))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_range (0, 0, c.num_za_tiles () - 1);
+ }
+};
+SHAPE (write_za)
+
+/* void svfoo_t0[_t1](uint64_t, uint32_t, svbool_t, sv<t1>_t)
+
+ where the first two fields form a (ZA tile, slice) pair. */
+struct write_za_m_def : public overloaded_base<1>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su64,su32,vp,t1", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ type_suffix_index type;
+ if (!r.check_num_arguments (4)
+ || !r.require_integer_immediate (0)
+ || !r.require_scalar_type (1, "uint32_t")
+ || !r.require_vector_type (2, VECTOR_TYPE_svbool_t)
+ || (type = r.infer_vector_type (3)) == NUM_TYPE_SUFFIXES)
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0], type);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_range (0, 0, c.num_za_tiles () - 1);
+ }
+};
+SHAPE (write_za_m)
+
+/* void svfoo_t0[_t1_g](uint32_t, sv<t1>x<g>_t). */
+struct write_za_slice_def : public overloaded_base<1>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "_,su32,t1", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ sve_type type;
+ if (!r.check_num_arguments (2)
+ || !r.require_scalar_type (0, "uint32_t")
+ || !(type = r.infer_tuple_type (1)))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+};
+SHAPE (write_za_slice)
+
}
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.h b/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
index 7483c1d..88af62d 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
@@ -40,6 +40,9 @@ namespace aarch64_sve
one in which the argument is the usual vector, and one in which it
is replaced by a scalar.
+ - an "_opt_single" suffix similarly says that the function can take
+ a vector or tuple argument, with the former having a "_single" suffix.
+
- "_int" and "_uint" replace the argument's element type with a
signed or unsigned integer of the same width. The suffixes above
then indicate whether this final argument is or might be a scalar.
@@ -75,6 +78,7 @@ namespace aarch64_sve
extern const function_shape *const adr_offset;
extern const function_shape *const binary;
extern const function_shape *const binary_int_opt_n;
+ extern const function_shape *const binary_int_opt_single_n;
extern const function_shape *const binary_lane;
extern const function_shape *const binary_long_lane;
extern const function_shape *const binary_long_opt_n;
@@ -82,9 +86,11 @@ namespace aarch64_sve
extern const function_shape *const binary_narrowb_opt_n;
extern const function_shape *const binary_narrowt_opt_n;
extern const function_shape *const binary_opt_n;
+ extern const function_shape *const binary_opt_single_n;
extern const function_shape *const binary_pred;
extern const function_shape *const binary_rotate;
extern const function_shape *const binary_scalar;
+ extern const function_shape *const binary_single;
extern const function_shape *const binary_to_uint;
extern const function_shape *const binary_uint;
extern const function_shape *const binary_uint_n;
@@ -93,19 +99,35 @@ namespace aarch64_sve
extern const function_shape *const binary_uint64_opt_n;
extern const function_shape *const binary_wide;
extern const function_shape *const binary_wide_opt_n;
+ extern const function_shape *const binary_za_int_m;
+ extern const function_shape *const binary_za_m;
+ extern const function_shape *const binary_za_slice_lane;
+ extern const function_shape *const binary_za_slice_int_opt_single;
+ extern const function_shape *const binary_za_slice_opt_single;
+ extern const function_shape *const binary_za_slice_uint_opt_single;
+ extern const function_shape *const binary_za_uint_m;
+ extern const function_shape *const binaryxn;
+ extern const function_shape *const bool_inherent;
+ extern const function_shape *const clamp;
extern const function_shape *const clast;
extern const function_shape *const compare;
extern const function_shape *const compare_opt_n;
extern const function_shape *const compare_ptr;
extern const function_shape *const compare_scalar;
+ extern const function_shape *const compare_scalar_count;
extern const function_shape *const compare_wide_opt_n;
extern const function_shape *const count_inherent;
extern const function_shape *const count_pat;
extern const function_shape *const count_pred;
+ extern const function_shape *const count_pred_c;
extern const function_shape *const count_vector;
extern const function_shape *const create;
+ extern const function_shape *const dot_za_slice_int_lane;
+ extern const function_shape *const dot_za_slice_lane;
+ extern const function_shape *const dot_za_slice_uint_lane;
extern const function_shape *const dupq;
extern const function_shape *const ext;
+ extern const function_shape *const extract_pred;
extern const function_shape *const fold_left;
extern const function_shape *const get;
extern const function_shape *const inc_dec;
@@ -114,6 +136,11 @@ namespace aarch64_sve
extern const function_shape *const inc_dec_pred_scalar;
extern const function_shape *const inherent;
extern const function_shape *const inherent_b;
+ extern const function_shape *const inherent_za;
+ extern const function_shape *const inherent_zt;
+ extern const function_shape *const inherent_mask_za;
+ extern const function_shape *const ldr_zt;
+ extern const function_shape *const ldr_za;
extern const function_shape *const load;
extern const function_shape *const load_ext;
extern const function_shape *const load_ext_gather_index;
@@ -124,6 +151,9 @@ namespace aarch64_sve
extern const function_shape *const load_gather_sv_restricted;
extern const function_shape *const load_gather_vs;
extern const function_shape *const load_replicate;
+ extern const function_shape *const load_za;
+ extern const function_shape *const luti2_lane_zt;
+ extern const function_shape *const luti4_lane_zt;
extern const function_shape *const mmla;
extern const function_shape *const pattern_pred;
extern const function_shape *const prefetch;
@@ -131,8 +161,13 @@ namespace aarch64_sve
extern const function_shape *const prefetch_gather_offset;
extern const function_shape *const ptest;
extern const function_shape *const rdffr;
+ extern const function_shape *const read_za;
+ extern const function_shape *const read_za_m;
+ extern const function_shape *const read_za_slice;
extern const function_shape *const reduction;
extern const function_shape *const reduction_wide;
+ extern const function_shape *const reinterpret;
+ extern const function_shape *const select_pred;
extern const function_shape *const set;
extern const function_shape *const setffr;
extern const function_shape *const shift_left_imm_long;
@@ -140,6 +175,7 @@ namespace aarch64_sve
extern const function_shape *const shift_right_imm;
extern const function_shape *const shift_right_imm_narrowb;
extern const function_shape *const shift_right_imm_narrowt;
+ extern const function_shape *const shift_right_imm_narrowxn;
extern const function_shape *const shift_right_imm_narrowb_to_uint;
extern const function_shape *const shift_right_imm_narrowt_to_uint;
extern const function_shape *const store;
@@ -147,6 +183,10 @@ namespace aarch64_sve
extern const function_shape *const store_scatter_index_restricted;
extern const function_shape *const store_scatter_offset;
extern const function_shape *const store_scatter_offset_restricted;
+ extern const function_shape *const store_za;
+ extern const function_shape *const storexn;
+ extern const function_shape *const str_za;
+ extern const function_shape *const str_zt;
extern const function_shape *const tbl_tuple;
extern const function_shape *const ternary_bfloat;
extern const function_shape *const ternary_bfloat_lane;
@@ -159,9 +199,9 @@ namespace aarch64_sve
extern const function_shape *const ternary_long_lane;
extern const function_shape *const ternary_long_opt_n;
extern const function_shape *const ternary_opt_n;
- extern const function_shape *const ternary_qq_lane;
+ extern const function_shape *const ternary_qq_or_011_lane;
extern const function_shape *const ternary_qq_lane_rotate;
- extern const function_shape *const ternary_qq_opt_n;
+ extern const function_shape *const ternary_qq_opt_n_or_011;
extern const function_shape *const ternary_qq_rotate;
extern const function_shape *const ternary_rotate;
extern const function_shape *const ternary_shift_left_imm;
@@ -174,6 +214,7 @@ namespace aarch64_sve
extern const function_shape *const unary;
extern const function_shape *const unary_convert;
extern const function_shape *const unary_convert_narrowt;
+ extern const function_shape *const unary_convertxn;
extern const function_shape *const unary_long;
extern const function_shape *const unary_n;
extern const function_shape *const unary_narrowb;
@@ -185,6 +226,12 @@ namespace aarch64_sve
extern const function_shape *const unary_to_uint;
extern const function_shape *const unary_uint;
extern const function_shape *const unary_widen;
+ extern const function_shape *const unary_za_m;
+ extern const function_shape *const unary_za_slice;
+ extern const function_shape *const unaryxn;
+ extern const function_shape *const write_za;
+ extern const function_shape *const write_za_m;
+ extern const function_shape *const write_za_slice;
}
}
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sme.cc b/gcc/config/aarch64/aarch64-sve-builtins-sme.cc
new file mode 100644
index 0000000..8d06a72
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sme.cc
@@ -0,0 +1,579 @@
+/* ACLE support for AArch64 SME.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "rtl.h"
+#include "tm_p.h"
+#include "memmodel.h"
+#include "insn-codes.h"
+#include "optabs.h"
+#include "recog.h"
+#include "expr.h"
+#include "basic-block.h"
+#include "function.h"
+#include "fold-const.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "gimplify.h"
+#include "explow.h"
+#include "emit-rtl.h"
+#include "aarch64-sve-builtins.h"
+#include "aarch64-sve-builtins-shapes.h"
+#include "aarch64-sve-builtins-base.h"
+#include "aarch64-sve-builtins-sme.h"
+#include "aarch64-sve-builtins-functions.h"
+
+using namespace aarch64_sve;
+
+namespace {
+
+class load_store_za_zt0_base : public function_base
+{
+public:
+ tree
+ memory_scalar_type (const function_instance &) const override
+ {
+ return void_type_node;
+ }
+};
+
+class read_write_za_base : public function_base
+{
+public:
+ constexpr read_write_za_base (int unspec) : m_unspec (unspec) {}
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ auto za_mode = e.vector_mode (0);
+ auto z_mode = e.tuple_mode (1);
+ auto icode = (za_mode == VNx1TImode
+ ? code_for_aarch64_sme (m_unspec, za_mode, z_mode)
+ : code_for_aarch64_sme (m_unspec, z_mode, z_mode));
+ return e.use_exact_insn (icode);
+ }
+
+ int m_unspec;
+};
+
+using load_za_base = add_call_properties<load_store_za_zt0_base,
+ CP_READ_MEMORY | CP_READ_ZA
+ | CP_WRITE_ZA>;
+
+using store_za_base = add_call_properties<load_store_za_zt0_base,
+ CP_WRITE_MEMORY | CP_READ_ZA>;
+
+/* E is a load or store intrinsic that accesses a ZA slice of mode MEM_MODE.
+ The intrinsic has a vnum parameter at index ARGNO. Return true if the
+ vnum argument is a constant that is a valid ZA offset for the underlying
+ instruction. */
+
+static bool
+has_in_range_vnum_arg (function_expander &e, machine_mode mem_mode,
+ unsigned int argno)
+{
+ return (e.mode_suffix_id == MODE_vnum
+ && CONST_INT_P (e.args[argno])
+ && UINTVAL (e.args[argno]) < 16 / GET_MODE_UNIT_SIZE (mem_mode));
+}
+
+/* E is a ZA load or store intrinsic that uses instruction ICODE. Add a
+ 32-bit operand that gives the total ZA slice. (The instruction hard-codes
+ the constant offset to 0, so there is no operand for that.)
+
+ Argument ARGNO is the intrinsic's slice argument. If the intrinsic is
+ a _vnum intrinsic, argument VNUM_ARGNO is the intrinsic's vnum operand,
+ which must be added to the slice argument. */
+
+static void
+add_load_store_slice_operand (function_expander &e, insn_code icode,
+ unsigned int argno, unsigned int vnum_argno)
+{
+ rtx base = e.args[argno];
+ if (e.mode_suffix_id == MODE_vnum)
+ {
+ rtx vnum = lowpart_subreg (SImode, e.args[vnum_argno], DImode);
+ base = simplify_gen_binary (PLUS, SImode, base, vnum);
+ }
+ e.add_input_operand (icode, base);
+}
+
+/* Add a memory operand for ZA LD1 or ST1 intrinsic E. BASE_ARGNO is
+ the index of the base argument. */
+
+static void
+add_load_store_operand (function_expander &e, unsigned int base_argno)
+{
+ auto mode = e.vector_mode (0);
+ rtx base = e.get_contiguous_base (mode, base_argno, base_argno + 1,
+ AARCH64_FL_SM_ON);
+ auto mem = gen_rtx_MEM (mode, force_reg (Pmode, base));
+ set_mem_align (mem, BITS_PER_UNIT);
+ e.add_fixed_operand (mem);
+}
+
+/* Expand ZA LDR or STR intrinsic E. There are two underlying instructions:
+
+ - BASE_CODE has a zero ZA slice offset
+ - VNUM_CODE has a constant operand for the ZA slice offset. */
+
+static rtx
+expand_ldr_str_za (function_expander &e, insn_code base_code,
+ insn_code vnum_code)
+{
+ if (has_in_range_vnum_arg (e, VNx16QImode, 2))
+ {
+ rtx mem_offset = aarch64_sme_vq_immediate (Pmode,
+ UINTVAL (e.args[2]) * 16,
+ AARCH64_ISA_MODE);
+ e.add_input_operand (vnum_code, e.args[0]);
+ e.add_input_operand (vnum_code, e.args[2]);
+ e.add_input_operand (vnum_code, e.args[1]);
+ e.add_input_operand (vnum_code, mem_offset);
+ return e.generate_insn (vnum_code);
+ }
+ else
+ {
+ rtx base = e.get_contiguous_base (VNx16QImode, 1, 2, AARCH64_FL_SM_ON);
+ add_load_store_slice_operand (e, base_code, 0, 2);
+ e.add_input_operand (base_code, base);
+ return e.generate_insn (base_code);
+ }
+}
+
+/* Use instruction ICODE to expand ZT0 load or store E. */
+
+static rtx
+expand_ldr_str_zt0 (function_expander &e, insn_code icode)
+{
+ rtx base = e.convert_to_pmode (e.args[1]);
+ rtx mem = gen_rtx_MEM (V8DImode, force_reg (Pmode, base));
+ e.add_fixed_operand (mem);
+ return e.generate_insn (icode);
+}
+
+/* Expand ZA LD1 or ST1 intrinsic E. UNSPEC is the load or store unspec.
+ IS_LOAD is true if E is a load, false if it is a store. */
+
+static rtx
+expand_ld1_st1 (function_expander &e, int unspec, bool is_load)
+{
+ bool is_vnum = has_in_range_vnum_arg (e, e.vector_mode (0), 4);
+ auto icode = (is_vnum
+ ? code_for_aarch64_sme_plus (unspec, e.vector_mode (0))
+ : code_for_aarch64_sme (unspec, e.vector_mode (0)));
+ if (!is_load)
+ add_load_store_operand (e, 3);
+ e.add_input_operand (icode, e.args[0]);
+ if (is_vnum)
+ {
+ e.add_input_operand (icode, e.args[1]);
+ e.add_input_operand (icode, e.args[4]);
+ }
+ else
+ add_load_store_slice_operand (e, icode, 1, 4);
+ e.add_input_operand (icode, e.args[2]);
+ if (is_load)
+ add_load_store_operand (e, 3);
+ return e.generate_insn (icode);
+}
+
+class arm_has_sme_impl : public function_base
+{
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (TARGET_SME)
+ return f.fold_to_cstu (1);
+ return nullptr;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ if (TARGET_SME)
+ return const1_rtx;
+ emit_insn (gen_aarch64_get_sme_state ());
+ return expand_simple_binop (DImode, LSHIFTRT,
+ gen_rtx_REG (DImode, R0_REGNUM),
+ gen_int_mode (63, QImode),
+ e.possible_target, true, OPTAB_LIB_WIDEN);
+ }
+};
+
+class arm_in_streaming_mode_impl : public function_base
+{
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (TARGET_STREAMING)
+ return f.fold_to_cstu (1);
+ if (TARGET_NON_STREAMING)
+ return f.fold_to_cstu (0);
+ return nullptr;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ if (TARGET_STREAMING)
+ return const1_rtx;
+
+ if (TARGET_NON_STREAMING)
+ return const0_rtx;
+
+ rtx reg;
+ if (TARGET_SME)
+ {
+ reg = gen_reg_rtx (DImode);
+ emit_insn (gen_aarch64_read_svcr (reg));
+ }
+ else
+ {
+ emit_insn (gen_aarch64_get_sme_state ());
+ reg = gen_rtx_REG (DImode, R0_REGNUM);
+ }
+ return expand_simple_binop (DImode, AND, reg, gen_int_mode (1, DImode),
+ e.possible_target, true, OPTAB_LIB_WIDEN);
+ }
+};
+
+/* Implements svcnts[bhwd]. */
+class svcnts_bhwd_impl : public function_base
+{
+public:
+ constexpr svcnts_bhwd_impl (machine_mode ref_mode) : m_ref_mode (ref_mode) {}
+
+ unsigned int
+ get_shift () const
+ {
+ return exact_log2 (GET_MODE_UNIT_SIZE (m_ref_mode));
+ }
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ if (TARGET_STREAMING)
+ return f.fold_to_cstu (GET_MODE_NUNITS (m_ref_mode));
+ return nullptr;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ rtx cntsb = aarch64_sme_vq_immediate (DImode, 16, AARCH64_ISA_MODE);
+ auto shift = get_shift ();
+ if (!shift)
+ return cntsb;
+
+ return expand_simple_binop (DImode, LSHIFTRT, cntsb,
+ gen_int_mode (shift, QImode),
+ e.possible_target, true, OPTAB_LIB_WIDEN);
+ }
+
+ /* The mode of the vector associated with the [bhwd] suffix. */
+ machine_mode m_ref_mode;
+};
+
+class svld1_za_impl : public load_za_base
+{
+public:
+ constexpr svld1_za_impl (int unspec) : m_unspec (unspec) {}
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ return expand_ld1_st1 (e, m_unspec, true);
+ }
+
+ int m_unspec;
+};
+
+class svldr_za_impl : public load_za_base
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ return expand_ldr_str_za (e, CODE_FOR_aarch64_sme_ldr0,
+ code_for_aarch64_sme_ldrn (Pmode));
+ }
+};
+
+class svldr_zt_impl : public load_store_za_zt0_base
+{
+public:
+ unsigned int
+ call_properties (const function_instance &) const override
+ {
+ return CP_READ_MEMORY | CP_WRITE_ZT0;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ return expand_ldr_str_zt0 (e, CODE_FOR_aarch64_sme_ldr_zt0);
+ }
+};
+
+class svluti_lane_zt_impl : public read_zt0<function_base>
+{
+public:
+ CONSTEXPR svluti_lane_zt_impl (unsigned int bits) : m_bits (bits) {}
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ auto mode = e.tuple_mode (0);
+ e.args.ordered_remove (0);
+ return e.use_exact_insn (code_for_aarch64_sme_lut (m_bits, mode));
+ }
+
+ unsigned int m_bits;
+};
+
+class svread_za_impl : public function_base
+{
+public:
+ unsigned int
+ call_properties (const function_instance &) const override
+ {
+ return CP_READ_ZA;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ machine_mode mode = e.vectors_per_tuple () == 4 ? VNx8DImode : VNx4DImode;
+ return e.use_exact_insn (code_for_aarch64_sme_read (mode));
+ }
+};
+
+using svread_za_tile_impl = add_call_properties<read_write_za_base,
+ CP_READ_ZA>;
+
+class svst1_za_impl : public store_za_base
+{
+public:
+ constexpr svst1_za_impl (int unspec) : m_unspec (unspec) {}
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ return expand_ld1_st1 (e, m_unspec, false);
+ }
+
+ int m_unspec;
+};
+
+class svstr_za_impl : public store_za_base
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ return expand_ldr_str_za (e, CODE_FOR_aarch64_sme_str0,
+ code_for_aarch64_sme_strn (Pmode));
+ }
+};
+
+class svstr_zt_impl : public load_store_za_zt0_base
+{
+public:
+ unsigned int
+ call_properties (const function_instance &) const override
+ {
+ return CP_WRITE_MEMORY | CP_READ_ZT0;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ return expand_ldr_str_zt0 (e, CODE_FOR_aarch64_sme_str_zt0);
+ }
+};
+
+class svsudot_za_impl : public read_write_za<function_base>
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ if (e.mode_suffix_id == MODE_single)
+ {
+ auto icode = code_for_aarch64_sme_single_sudot (e.vector_mode (0),
+ e.tuple_mode (1));
+ return e.use_exact_insn (icode);
+ }
+ std::swap (e.args[1], e.args[2]);
+ return e.use_exact_insn (code_for_aarch64_sme (UNSPEC_SME_USDOT,
+ e.vector_mode (0),
+ e.tuple_mode (1)));
+ }
+};
+
+class svundef_za_impl : public write_za<function_base>
+{
+public:
+ rtx
+ expand (function_expander &) const override
+ {
+ rtx target = gen_rtx_REG (VNx16QImode, ZA_REGNUM);
+ emit_clobber (copy_rtx (target));
+ return const0_rtx;
+ }
+};
+
+class svwrite_za_impl : public function_base
+{
+public:
+ unsigned int
+ call_properties (const function_instance &) const override
+ {
+ return CP_WRITE_ZA;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ machine_mode mode = e.vectors_per_tuple () == 4 ? VNx8DImode : VNx4DImode;
+ e.args[1] = lowpart_subreg (mode, e.args[1], e.tuple_mode (1));
+ return e.use_exact_insn (code_for_aarch64_sme_write (mode));
+ }
+};
+
+using svwrite_za_tile_impl = add_call_properties<read_write_za_base,
+ CP_READ_ZA | CP_WRITE_ZA>;
+
+class svzero_mask_za_impl : public write_za<function_base>
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ return e.use_exact_insn (CODE_FOR_aarch64_sme_zero_za);
+ }
+};
+
+class svzero_za_impl : public write_za<function_base>
+{
+public:
+ rtx
+ expand (function_expander &) const override
+ {
+ emit_insn (gen_aarch64_sme_zero_za (gen_int_mode (0xff, SImode)));
+ return const0_rtx;
+ }
+};
+
+class svzero_zt_impl : public write_zt0<function_base>
+{
+public:
+ rtx
+ expand (function_expander &) const override
+ {
+ emit_insn (gen_aarch64_sme_zero_zt0 ());
+ return const0_rtx;
+ }
+};
+
+} /* end anonymous namespace */
+
+namespace aarch64_sve {
+
+FUNCTION (arm_has_sme, arm_has_sme_impl, )
+FUNCTION (arm_in_streaming_mode, arm_in_streaming_mode_impl, )
+FUNCTION (svadd_za, sme_1mode_function, (UNSPEC_SME_ADD, UNSPEC_SME_ADD,
+ UNSPEC_SME_FADD))
+FUNCTION (svadd_write_za, sme_1mode_function, (UNSPEC_SME_ADD_WRITE,
+ UNSPEC_SME_ADD_WRITE, -1))
+FUNCTION (svaddha_za, sme_1mode_function, (UNSPEC_SME_ADDHA,
+ UNSPEC_SME_ADDHA, -1))
+FUNCTION (svaddva_za, sme_1mode_function, (UNSPEC_SME_ADDVA,
+ UNSPEC_SME_ADDVA, -1))
+FUNCTION (svbmopa_za, sme_2mode_function, (-1, UNSPEC_SME_BMOPA, -1))
+FUNCTION (svbmops_za, sme_2mode_function, (-1, UNSPEC_SME_BMOPS, -1))
+FUNCTION (svcntsb, svcnts_bhwd_impl, (VNx16QImode))
+FUNCTION (svcntsd, svcnts_bhwd_impl, (VNx2DImode))
+FUNCTION (svcntsh, svcnts_bhwd_impl, (VNx8HImode))
+FUNCTION (svcntsw, svcnts_bhwd_impl, (VNx4SImode))
+FUNCTION (svdot_za, sme_2mode_function, (UNSPEC_SME_SDOT, UNSPEC_SME_UDOT,
+ UNSPEC_SME_FDOT))
+FUNCTION (svdot_lane_za, sme_2mode_lane_function, (UNSPEC_SME_SDOT,
+ UNSPEC_SME_UDOT,
+ UNSPEC_SME_FDOT))
+FUNCTION (svld1_hor_za, svld1_za_impl, (UNSPEC_SME_LD1_HOR))
+FUNCTION (svld1_ver_za, svld1_za_impl, (UNSPEC_SME_LD1_VER))
+FUNCTION (svldr_za, svldr_za_impl, )
+FUNCTION (svldr_zt, svldr_zt_impl, )
+FUNCTION (svluti2_lane_zt, svluti_lane_zt_impl, (2))
+FUNCTION (svluti4_lane_zt, svluti_lane_zt_impl, (4))
+FUNCTION (svmla_za, sme_2mode_function, (UNSPEC_SME_SMLA, UNSPEC_SME_UMLA,
+ UNSPEC_SME_FMLA))
+FUNCTION (svmla_lane_za, sme_2mode_lane_function, (UNSPEC_SME_SMLA,
+ UNSPEC_SME_UMLA,
+ UNSPEC_SME_FMLA))
+FUNCTION (svmls_za, sme_2mode_function, (UNSPEC_SME_SMLS, UNSPEC_SME_UMLS,
+ UNSPEC_SME_FMLS))
+FUNCTION (svmls_lane_za, sme_2mode_lane_function, (UNSPEC_SME_SMLS,
+ UNSPEC_SME_UMLS,
+ UNSPEC_SME_FMLS))
+FUNCTION (svmopa_za, sme_2mode_function, (UNSPEC_SME_SMOPA, UNSPEC_SME_UMOPA,
+ UNSPEC_SME_FMOPA))
+FUNCTION (svmops_za, sme_2mode_function, (UNSPEC_SME_SMOPS, UNSPEC_SME_UMOPS,
+ UNSPEC_SME_FMOPS))
+FUNCTION (svread_za, svread_za_impl,)
+FUNCTION (svread_hor_za, svread_za_tile_impl, (UNSPEC_SME_READ_HOR))
+FUNCTION (svread_ver_za, svread_za_tile_impl, (UNSPEC_SME_READ_VER))
+FUNCTION (svst1_hor_za, svst1_za_impl, (UNSPEC_SME_ST1_HOR))
+FUNCTION (svst1_ver_za, svst1_za_impl, (UNSPEC_SME_ST1_VER))
+FUNCTION (svstr_za, svstr_za_impl, )
+FUNCTION (svstr_zt, svstr_zt_impl, )
+FUNCTION (svsub_za, sme_1mode_function, (UNSPEC_SME_SUB, UNSPEC_SME_SUB,
+ UNSPEC_SME_FSUB))
+FUNCTION (svsub_write_za, sme_1mode_function, (UNSPEC_SME_SUB_WRITE,
+ UNSPEC_SME_SUB_WRITE, -1))
+FUNCTION (svsudot_za, svsudot_za_impl,)
+FUNCTION (svsudot_lane_za, sme_2mode_lane_function, (UNSPEC_SME_SUDOT, -1, -1))
+FUNCTION (svsuvdot_lane_za, sme_2mode_lane_function, (UNSPEC_SME_SUVDOT,
+ -1, -1))
+FUNCTION (svsumopa_za, sme_2mode_function, (UNSPEC_SME_SUMOPA, -1, -1))
+FUNCTION (svsumops_za, sme_2mode_function, (UNSPEC_SME_SUMOPS, -1, -1))
+FUNCTION (svundef_za, svundef_za_impl, )
+FUNCTION (svusdot_za, sme_2mode_function, (-1, UNSPEC_SME_USDOT, -1))
+FUNCTION (svusdot_lane_za, sme_2mode_lane_function, (-1, UNSPEC_SME_USDOT, -1))
+FUNCTION (svusvdot_lane_za, sme_2mode_lane_function, (-1, UNSPEC_SME_USVDOT,
+ -1))
+FUNCTION (svusmopa_za, sme_2mode_function, (-1, UNSPEC_SME_USMOPA, -1))
+FUNCTION (svusmops_za, sme_2mode_function, (-1, UNSPEC_SME_USMOPS, -1))
+FUNCTION (svvdot_lane_za, sme_2mode_lane_function, (UNSPEC_SME_SVDOT,
+ UNSPEC_SME_UVDOT,
+ UNSPEC_SME_FVDOT))
+FUNCTION (svwrite_za, svwrite_za_impl,)
+FUNCTION (svwrite_hor_za, svwrite_za_tile_impl, (UNSPEC_SME_WRITE_HOR))
+FUNCTION (svwrite_ver_za, svwrite_za_tile_impl, (UNSPEC_SME_WRITE_VER))
+FUNCTION (svzero_mask_za, svzero_mask_za_impl, )
+FUNCTION (svzero_za, svzero_za_impl, )
+FUNCTION (svzero_zt, svzero_zt_impl, )
+
+} /* end namespace aarch64_sve */
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sme.def b/gcc/config/aarch64/aarch64-sve-builtins-sme.def
new file mode 100644
index 0000000..5f76d00
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sme.def
@@ -0,0 +1,198 @@
+/* ACLE support for AArch64 SME.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#define REQUIRED_EXTENSIONS 0
+DEF_SVE_FUNCTION (arm_has_sme, bool_inherent, none, none)
+DEF_SVE_FUNCTION (arm_in_streaming_mode, bool_inherent, none, none)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS AARCH64_FL_SME
+DEF_SVE_FUNCTION (svcntsb, count_inherent, none, none)
+DEF_SVE_FUNCTION (svcntsd, count_inherent, none, none)
+DEF_SVE_FUNCTION (svcntsh, count_inherent, none, none)
+DEF_SVE_FUNCTION (svcntsw, count_inherent, none, none)
+DEF_SME_ZA_FUNCTION (svldr, ldr_za, za, none)
+DEF_SME_ZA_FUNCTION (svstr, str_za, za, none)
+DEF_SME_ZA_FUNCTION (svundef, inherent_za, za, none)
+DEF_SME_ZA_FUNCTION (svzero, inherent_za, za, none)
+DEF_SME_ZA_FUNCTION (svzero_mask, inherent_mask_za, za, none)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS AARCH64_FL_SME | AARCH64_FL_SM_ON
+DEF_SME_ZA_FUNCTION (svaddha, unary_za_m, za_s_integer, za_m)
+DEF_SME_ZA_FUNCTION (svaddva, unary_za_m, za_s_integer, za_m)
+DEF_SME_ZA_FUNCTION (svld1_hor, load_za, all_za, none)
+DEF_SME_ZA_FUNCTION (svld1_ver, load_za, all_za, none)
+DEF_SME_ZA_FUNCTION (svmopa, binary_za_m, mop_base, za_m)
+DEF_SME_ZA_FUNCTION (svmopa, binary_za_m, d_za, za_m)
+DEF_SME_ZA_FUNCTION (svmops, binary_za_m, mop_base, za_m)
+DEF_SME_ZA_FUNCTION (svmops, binary_za_m, d_za, za_m)
+DEF_SME_ZA_FUNCTION (svread_hor, read_za_m, za_all_data, m)
+DEF_SME_ZA_FUNCTION (svread_ver, read_za_m, za_all_data, m)
+DEF_SME_ZA_FUNCTION (svst1_hor, store_za, all_za, none)
+DEF_SME_ZA_FUNCTION (svst1_ver, store_za, all_za, none)
+DEF_SME_ZA_FUNCTION (svsumopa, binary_za_uint_m, mop_base_signed, za_m)
+DEF_SME_ZA_FUNCTION (svsumops, binary_za_uint_m, mop_base_signed, za_m)
+DEF_SME_ZA_FUNCTION (svusmopa, binary_za_int_m, mop_base_unsigned, za_m)
+DEF_SME_ZA_FUNCTION (svusmops, binary_za_int_m, mop_base_unsigned, za_m)
+DEF_SME_ZA_FUNCTION (svwrite_hor, write_za_m, za_all_data, za_m)
+DEF_SME_ZA_FUNCTION (svwrite_ver, write_za_m, za_all_data, za_m)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SME \
+ | AARCH64_FL_SME_I16I64 \
+ | AARCH64_FL_SM_ON)
+DEF_SME_ZA_FUNCTION (svaddha, unary_za_m, za_d_integer, za_m)
+DEF_SME_ZA_FUNCTION (svaddva, unary_za_m, za_d_integer, za_m)
+DEF_SME_ZA_FUNCTION (svmopa, binary_za_m, mop_i16i64, za_m)
+DEF_SME_ZA_FUNCTION (svmops, binary_za_m, mop_i16i64, za_m)
+DEF_SME_ZA_FUNCTION (svsumopa, binary_za_uint_m, mop_i16i64_signed, za_m)
+DEF_SME_ZA_FUNCTION (svsumops, binary_za_uint_m, mop_i16i64_signed, za_m)
+DEF_SME_ZA_FUNCTION (svusmopa, binary_za_int_m, mop_i16i64_unsigned, za_m)
+DEF_SME_ZA_FUNCTION (svusmops, binary_za_int_m, mop_i16i64_unsigned, za_m)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SME \
+ | AARCH64_FL_SME_F64F64 \
+ | AARCH64_FL_SM_ON)
+DEF_SME_ZA_FUNCTION (svmopa, binary_za_m, za_d_float, za_m)
+DEF_SME_ZA_FUNCTION (svmops, binary_za_m, za_d_float, za_m)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS AARCH64_FL_SME2
+DEF_SVE_FUNCTION (svldr_zt, ldr_zt, none, none)
+DEF_SVE_FUNCTION (svstr_zt, str_zt, none, none)
+DEF_SVE_FUNCTION (svzero_zt, inherent_zt, none, none)
+#undef REQUIRED_EXTENSIONS
+
+/* The d_za entries in this section just declare C _za64 overloads,
+ which will then be resolved to either an integer function or a
+ floating-point function. They are needed because the integer and
+ floating-point functions have different architecture requirements. */
+#define REQUIRED_EXTENSIONS AARCH64_FL_SME2 | AARCH64_FL_SM_ON
+DEF_SME_ZA_FUNCTION_GS (svadd, unary_za_slice, za_s_data, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svadd, unary_za_slice, d_za, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svadd_write, binary_za_slice_opt_single, za_s_integer,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION (svbmopa, binary_za_m, za_s_unsigned, za_m)
+DEF_SME_ZA_FUNCTION (svbmops, binary_za_m, za_s_unsigned, za_m)
+DEF_SME_ZA_FUNCTION_GS (svdot, binary_za_slice_opt_single, za_s_h_data,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svdot, binary_za_slice_opt_single, za_s_b_integer,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svdot_lane, dot_za_slice_lane, za_s_h_data,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svdot_lane, dot_za_slice_lane, za_s_b_integer,
+ vg1x24, none)
+DEF_SVE_FUNCTION_GS (svluti2_lane_zt, luti2_lane_zt, bhs_data, x124, none)
+DEF_SVE_FUNCTION_GS (svluti4_lane_zt, luti4_lane_zt, bhs_data, x12, none)
+DEF_SVE_FUNCTION_GS (svluti4_lane_zt, luti4_lane_zt, hs_data, x4, none)
+DEF_SME_ZA_FUNCTION_GS (svmla, binary_za_slice_opt_single, za_s_float,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svmla, binary_za_slice_opt_single, za_s_h_data,
+ vg2, none)
+DEF_SME_ZA_FUNCTION_GS (svmla, binary_za_slice_opt_single, za_s_b_integer,
+ vg4, none)
+DEF_SME_ZA_FUNCTION_GS (svmla_lane, binary_za_slice_lane, za_s_float,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svmla_lane, binary_za_slice_lane, za_s_h_data,
+ vg2, none)
+DEF_SME_ZA_FUNCTION_GS (svmla_lane, binary_za_slice_lane, za_s_b_integer,
+ vg4, none)
+DEF_SME_ZA_FUNCTION_GS (svmls, binary_za_slice_opt_single, za_s_float,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svmls, binary_za_slice_opt_single, za_s_h_data,
+ vg2, none)
+DEF_SME_ZA_FUNCTION_GS (svmls, binary_za_slice_opt_single, za_s_b_integer,
+ vg4, none)
+DEF_SME_ZA_FUNCTION_GS (svmls_lane, binary_za_slice_lane, za_s_float,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svmls_lane, binary_za_slice_lane, za_s_h_data,
+ vg2, none)
+DEF_SME_ZA_FUNCTION_GS (svmls_lane, binary_za_slice_lane, za_s_b_integer,
+ vg4, none)
+DEF_SME_ZA_FUNCTION (svmopa, binary_za_m, za_s_h_integer, za_m)
+DEF_SME_ZA_FUNCTION (svmops, binary_za_m, za_s_h_integer, za_m)
+DEF_SME_ZA_FUNCTION_GS (svread, read_za_slice, za_bhsd_data, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svread_hor, read_za, za_bhsd_data, vg24, none)
+DEF_SME_ZA_FUNCTION_GS (svread_ver, read_za, za_bhsd_data, vg24, none)
+DEF_SME_ZA_FUNCTION_GS (svsub, unary_za_slice, za_s_data, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svsub, unary_za_slice, d_za, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svsub_write, binary_za_slice_opt_single, za_s_integer,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svsudot, binary_za_slice_uint_opt_single,
+ za_s_b_signed, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svsudot_lane, dot_za_slice_uint_lane,
+ za_s_b_signed, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svsuvdot_lane, dot_za_slice_uint_lane,
+ za_s_b_signed, vg1x4, none)
+DEF_SME_ZA_FUNCTION_GS (svusdot, binary_za_slice_int_opt_single,
+ za_s_b_unsigned, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svusdot_lane, dot_za_slice_int_lane,
+ za_s_b_unsigned, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svusvdot_lane, dot_za_slice_int_lane,
+ za_s_b_unsigned, vg1x4, none)
+DEF_SME_ZA_FUNCTION_GS (svvdot_lane, dot_za_slice_lane, za_s_h_data,
+ vg1x2, none)
+DEF_SME_ZA_FUNCTION_GS (svvdot_lane, dot_za_slice_lane, za_s_b_integer,
+ vg1x4, none)
+DEF_SME_ZA_FUNCTION_GS (svwrite, write_za_slice, za_bhsd_data, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svwrite_hor, write_za, za_bhsd_data, vg24, none)
+DEF_SME_ZA_FUNCTION_GS (svwrite_ver, write_za, za_bhsd_data, vg24, none)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SME2 \
+ | AARCH64_FL_SME_I16I64 \
+ | AARCH64_FL_SM_ON)
+DEF_SME_ZA_FUNCTION_GS (svadd, unary_za_slice, za_d_integer, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svadd_write, binary_za_slice_opt_single, za_d_integer,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svdot, binary_za_slice_opt_single, za_d_h_integer,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svdot_lane, dot_za_slice_lane, za_d_h_integer,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svmla, binary_za_slice_opt_single, za_d_h_integer,
+ vg4, none)
+DEF_SME_ZA_FUNCTION_GS (svmla_lane, binary_za_slice_lane, za_d_h_integer,
+ vg4, none)
+DEF_SME_ZA_FUNCTION_GS (svmls, binary_za_slice_opt_single, za_d_h_integer,
+ vg4, none)
+DEF_SME_ZA_FUNCTION_GS (svmls_lane, binary_za_slice_lane, za_d_h_integer,
+ vg4, none)
+DEF_SME_ZA_FUNCTION_GS (svsub, unary_za_slice, za_d_integer, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svsub_write, binary_za_slice_opt_single, za_d_integer,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svvdot_lane, dot_za_slice_lane, za_d_h_integer,
+ vg1x4, none)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SME2 \
+ | AARCH64_FL_SME_F64F64 \
+ | AARCH64_FL_SM_ON)
+DEF_SME_ZA_FUNCTION_GS (svadd, unary_za_slice, za_d_float, vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svmla, binary_za_slice_opt_single, za_d_float,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svmla_lane, binary_za_slice_lane, za_d_float,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svmls, binary_za_slice_opt_single, za_d_float,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svmls_lane, binary_za_slice_lane, za_d_float,
+ vg1x24, none)
+DEF_SME_ZA_FUNCTION_GS (svsub, unary_za_slice, za_d_float, vg1x24, none)
+#undef REQUIRED_EXTENSIONS
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sme.h b/gcc/config/aarch64/aarch64-sve-builtins-sme.h
new file mode 100644
index 0000000..69aca0f
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sme.h
@@ -0,0 +1,83 @@
+/* ACLE support for AArch64 SME.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_AARCH64_SVE_BUILTINS_SME_H
+#define GCC_AARCH64_SVE_BUILTINS_SME_H
+
+namespace aarch64_sve
+{
+ namespace functions
+ {
+ extern const function_base *const arm_has_sme;
+ extern const function_base *const arm_in_streaming_mode;
+ extern const function_base *const svadd_za;
+ extern const function_base *const svadd_write_za;
+ extern const function_base *const svaddha_za;
+ extern const function_base *const svaddva_za;
+ extern const function_base *const svbmopa_za;
+ extern const function_base *const svbmops_za;
+ extern const function_base *const svcntsb;
+ extern const function_base *const svcntsd;
+ extern const function_base *const svcntsh;
+ extern const function_base *const svcntsw;
+ extern const function_base *const svdot_za;
+ extern const function_base *const svdot_lane_za;
+ extern const function_base *const svld1_hor_za;
+ extern const function_base *const svld1_ver_za;
+ extern const function_base *const svldr_za;
+ extern const function_base *const svldr_zt;
+ extern const function_base *const svluti2_lane_zt;
+ extern const function_base *const svluti4_lane_zt;
+ extern const function_base *const svmla_za;
+ extern const function_base *const svmla_lane_za;
+ extern const function_base *const svmls_za;
+ extern const function_base *const svmls_lane_za;
+ extern const function_base *const svmopa_za;
+ extern const function_base *const svmops_za;
+ extern const function_base *const svread_za;
+ extern const function_base *const svread_hor_za;
+ extern const function_base *const svread_ver_za;
+ extern const function_base *const svst1_hor_za;
+ extern const function_base *const svst1_ver_za;
+ extern const function_base *const svstr_za;
+ extern const function_base *const svstr_zt;
+ extern const function_base *const svsub_za;
+ extern const function_base *const svsub_write_za;
+ extern const function_base *const svsudot_za;
+ extern const function_base *const svsudot_lane_za;
+ extern const function_base *const svsuvdot_lane_za;
+ extern const function_base *const svsumopa_za;
+ extern const function_base *const svsumops_za;
+ extern const function_base *const svusdot_za;
+ extern const function_base *const svusdot_lane_za;
+ extern const function_base *const svusvdot_lane_za;
+ extern const function_base *const svusmopa_za;
+ extern const function_base *const svusmops_za;
+ extern const function_base *const svwrite_za;
+ extern const function_base *const svwrite_hor_za;
+ extern const function_base *const svwrite_ver_za;
+ extern const function_base *const svundef_za;
+ extern const function_base *const svvdot_lane_za;
+ extern const function_base *const svzero_mask_za;
+ extern const function_base *const svzero_za;
+ extern const function_base *const svzero_zt;
+ }
+}
+
+#endif
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc b/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
index 9e989fc..045e0d0 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
@@ -116,6 +116,39 @@ public:
}
};
+class svclamp_impl : public function_base
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ auto mode = e.tuple_mode (0);
+ insn_code icode;
+ if (e.type_suffix (0).float_p)
+ icode = (e.vectors_per_tuple () > 1
+ ? code_for_aarch64_sve_fclamp_single (mode)
+ : code_for_aarch64_sve_fclamp (mode));
+ else
+ {
+ auto max = e.type_suffix (0).unsigned_p ? UMAX : SMAX;
+ icode = (e.vectors_per_tuple () > 1
+ ? code_for_aarch64_sve_clamp_single (max, mode)
+ : code_for_aarch64_sve_clamp (max, mode));
+ }
+ return e.use_exact_insn (icode);
+ }
+};
+
+class svcvtn_impl : public function_base
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ return e.use_exact_insn (code_for_aarch64_sve_cvtn (e.result_mode ()));
+ }
+};
+
class svldnt1_gather_impl : public full_width_access
{
public:
@@ -188,6 +221,30 @@ public:
}
};
+class svpext_impl : public function_base
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ unsigned int bits = e.type_suffix (0).element_bits;
+ return e.use_exact_insn (e.vectors_per_tuple () == 2
+ ? code_for_aarch64_sve_pextx2 (bits)
+ : code_for_aarch64_sve_pext (bits));
+ }
+};
+
+class svpsel_impl : public function_base
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ unsigned int bits = e.type_suffix (0).element_bits;
+ return e.use_exact_insn (code_for_aarch64_sve_psel (bits));
+ }
+};
+
class svqcadd_impl : public function_base
{
public:
@@ -247,7 +304,7 @@ public:
that we can use for sensible shift amounts. */
function_instance instance ("svqshl", functions::svqshl,
shapes::binary_int_opt_n, MODE_n,
- f.type_suffix_ids, f.pred);
+ f.type_suffix_ids, GROUP_none, f.pred);
return f.redirect_call (instance);
}
else
@@ -255,8 +312,9 @@ public:
/* The saturation has no effect, and [SU]RSHL has immediate forms
that we can use for sensible shift amounts. */
function_instance instance ("svrshl", functions::svrshl,
- shapes::binary_int_opt_n, MODE_n,
- f.type_suffix_ids, f.pred);
+ shapes::binary_int_opt_single_n,
+ MODE_n, f.type_suffix_ids, GROUP_none,
+ f.pred);
return f.redirect_call (instance);
}
}
@@ -285,7 +343,7 @@ public:
-wi::to_wide (amount));
function_instance instance ("svasr", functions::svasr,
shapes::binary_uint_opt_n, MODE_n,
- f.type_suffix_ids, f.pred);
+ f.type_suffix_ids, GROUP_none, f.pred);
if (f.type_suffix (0).unsigned_p)
{
instance.base_name = "svlsr";
@@ -309,6 +367,9 @@ public:
gimple *
fold (gimple_folder &f) const override
{
+ if (f.vectors_per_tuple () > 1)
+ return nullptr;
+
if (tree amount = uniform_integer_cst_p (gimple_call_arg (f.call, 2)))
{
if (wi::to_widest (amount) >= 0)
@@ -317,7 +378,7 @@ public:
that we can use for sensible shift amounts. */
function_instance instance ("svlsl", functions::svlsl,
shapes::binary_uint_opt_n, MODE_n,
- f.type_suffix_ids, f.pred);
+ f.type_suffix_ids, GROUP_none, f.pred);
gcall *call = as_a <gcall *> (f.redirect_call (instance));
gimple_call_set_arg (call, 2, amount);
return call;
@@ -330,7 +391,7 @@ public:
-wi::to_wide (amount));
function_instance instance ("svrshr", functions::svrshr,
shapes::shift_right_imm, MODE_n,
- f.type_suffix_ids, f.pred);
+ f.type_suffix_ids, GROUP_none, f.pred);
gcall *call = as_a <gcall *> (f.redirect_call (instance));
gimple_call_set_arg (call, 2, amount);
return call;
@@ -349,7 +410,7 @@ public:
machine_mode mode = e.vector_mode (0);
if (e.pred == PRED_x
&& aarch64_sve_sqadd_sqsub_immediate_p (mode, e.args[2], false))
- return e.map_to_rtx_codes (UNKNOWN, US_PLUS, -1);
+ return e.map_to_rtx_codes (UNKNOWN, US_PLUS, -1, -1);
return e.map_to_unspecs (-1, UNSPEC_USQADD, -1);
}
};
@@ -412,6 +473,19 @@ public:
}
};
+class svunpk_impl : public function_base
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ optab op = (e.type_suffix (0).unsigned_p ? zext_optab : sext_optab);
+ insn_code icode = convert_optab_handler (op, e.result_mode (),
+ GET_MODE (e.args[0]));
+ return e.use_exact_insn (icode);
+ }
+};
+
class svuqadd_impl : public function_base
{
public:
@@ -474,13 +548,21 @@ FUNCTION (svaesmc, fixed_insn_function, (CODE_FOR_aarch64_sve2_aesmc))
FUNCTION (svbcax, CODE_FOR_MODE0 (aarch64_sve2_bcax),)
FUNCTION (svbdep, unspec_based_function, (UNSPEC_BDEP, UNSPEC_BDEP, -1))
FUNCTION (svbext, unspec_based_function, (UNSPEC_BEXT, UNSPEC_BEXT, -1))
+FUNCTION (svbfmlslb, fixed_insn_function, (CODE_FOR_aarch64_sve_bfmlslbvnx4sf))
+FUNCTION (svbfmlslb_lane, fixed_insn_function,
+ (CODE_FOR_aarch64_sve_bfmlslb_lanevnx4sf))
+FUNCTION (svbfmlslt, fixed_insn_function, (CODE_FOR_aarch64_sve_bfmlsltvnx4sf))
+FUNCTION (svbfmlslt_lane, fixed_insn_function,
+ (CODE_FOR_aarch64_sve_bfmlslt_lanevnx4sf))
FUNCTION (svbgrp, unspec_based_function, (UNSPEC_BGRP, UNSPEC_BGRP, -1))
FUNCTION (svbsl, CODE_FOR_MODE0 (aarch64_sve2_bsl),)
FUNCTION (svbsl1n, CODE_FOR_MODE0 (aarch64_sve2_bsl1n),)
FUNCTION (svbsl2n, CODE_FOR_MODE0 (aarch64_sve2_bsl2n),)
FUNCTION (svcdot, svcdot_impl,)
FUNCTION (svcdot_lane, svcdot_lane_impl,)
+FUNCTION (svclamp, svclamp_impl,)
FUNCTION (svcvtlt, unspec_based_function, (-1, -1, UNSPEC_COND_FCVTLT))
+FUNCTION (svcvtn, svcvtn_impl,)
FUNCTION (svcvtx, unspec_based_function, (-1, -1, UNSPEC_COND_FCVTX))
FUNCTION (svcvtxnt, CODE_FOR_MODE1 (aarch64_sve2_cvtxnt),)
FUNCTION (sveor3, CODE_FOR_MODE0 (aarch64_sve2_eor3),)
@@ -537,13 +619,19 @@ FUNCTION (svmullt_lane, unspec_based_lane_function, (UNSPEC_SMULLT,
UNSPEC_UMULLT, -1))
FUNCTION (svnbsl, CODE_FOR_MODE0 (aarch64_sve2_nbsl),)
FUNCTION (svnmatch, svmatch_svnmatch_impl, (UNSPEC_NMATCH))
+FUNCTION (svpext, svpext_impl,)
FUNCTION (svpmul, CODE_FOR_MODE0 (aarch64_sve2_pmul),)
FUNCTION (svpmullb, unspec_based_function, (-1, UNSPEC_PMULLB, -1))
FUNCTION (svpmullb_pair, unspec_based_function, (-1, UNSPEC_PMULLB_PAIR, -1))
FUNCTION (svpmullt, unspec_based_function, (-1, UNSPEC_PMULLT, -1))
FUNCTION (svpmullt_pair, unspec_based_function, (-1, UNSPEC_PMULLT_PAIR, -1))
+FUNCTION (svpsel, svpsel_impl,)
FUNCTION (svqabs, rtx_code_function, (SS_ABS, UNKNOWN, UNKNOWN))
FUNCTION (svqcadd, svqcadd_impl,)
+FUNCTION (svqcvt, integer_conversion, (UNSPEC_SQCVT, UNSPEC_SQCVTU,
+ UNSPEC_UQCVT, -1))
+FUNCTION (svqcvtn, integer_conversion, (UNSPEC_SQCVTN, UNSPEC_SQCVTUN,
+ UNSPEC_UQCVTN, -1))
FUNCTION (svqdmlalb, unspec_based_qadd_function, (UNSPEC_SQDMULLB, -1, -1))
FUNCTION (svqdmlalb_lane, unspec_based_qadd_lane_function, (UNSPEC_SQDMULLB,
-1, -1))
@@ -579,10 +667,16 @@ FUNCTION (svqrdmlsh, unspec_based_function, (UNSPEC_SQRDMLSH, -1, -1))
FUNCTION (svqrdmlsh_lane, unspec_based_lane_function, (UNSPEC_SQRDMLSH,
-1, -1))
FUNCTION (svqrshl, svqrshl_impl,)
+FUNCTION (svqrshr, unspec_based_uncond_function, (UNSPEC_SQRSHR,
+ UNSPEC_UQRSHR, -1, 1))
+FUNCTION (svqrshrn, unspec_based_uncond_function, (UNSPEC_SQRSHRN,
+ UNSPEC_UQRSHRN, -1, 1))
FUNCTION (svqrshrnb, unspec_based_function, (UNSPEC_SQRSHRNB,
UNSPEC_UQRSHRNB, -1))
FUNCTION (svqrshrnt, unspec_based_function, (UNSPEC_SQRSHRNT,
UNSPEC_UQRSHRNT, -1))
+FUNCTION (svqrshru, unspec_based_uncond_function, (UNSPEC_SQRSHRU, -1, -1, 1))
+FUNCTION (svqrshrun, unspec_based_uncond_function, (UNSPEC_SQRSHRUN, -1, -1, 1))
FUNCTION (svqrshrunb, unspec_based_function, (UNSPEC_SQRSHRUNB, -1, -1))
FUNCTION (svqrshrunt, unspec_based_function, (UNSPEC_SQRSHRUNT, -1, -1))
FUNCTION (svqshl, svqshl_impl,)
@@ -603,6 +697,8 @@ FUNCTION (svraddhnb, unspec_based_function, (UNSPEC_RADDHNB,
FUNCTION (svraddhnt, unspec_based_function, (UNSPEC_RADDHNT,
UNSPEC_RADDHNT, -1))
FUNCTION (svrax1, fixed_insn_function, (CODE_FOR_aarch64_sve2_rax1))
+FUNCTION (svrevd, unspec_based_function, (UNSPEC_REVD, UNSPEC_REVD,
+ UNSPEC_REVD))
FUNCTION (svrhadd, unspec_based_function, (UNSPEC_SRHADD, UNSPEC_URHADD, -1))
FUNCTION (svrshl, svrshl_impl,)
FUNCTION (svrshr, unspec_based_function, (UNSPEC_SRSHR, UNSPEC_URSHR, -1))
@@ -639,7 +735,12 @@ FUNCTION (svsubwb, unspec_based_function, (UNSPEC_SSUBWB, UNSPEC_USUBWB, -1))
FUNCTION (svsubwt, unspec_based_function, (UNSPEC_SSUBWT, UNSPEC_USUBWT, -1))
FUNCTION (svtbl2, svtbl2_impl,)
FUNCTION (svtbx, CODE_FOR_MODE0 (aarch64_sve2_tbx),)
+FUNCTION (svunpk, svunpk_impl,)
FUNCTION (svuqadd, svuqadd_impl,)
+FUNCTION (svuzp, multireg_permute, (UNSPEC_UZP))
+FUNCTION (svuzpq, multireg_permute, (UNSPEC_UZPQ))
+FUNCTION (svzip, multireg_permute, (UNSPEC_ZIP))
+FUNCTION (svzipq, multireg_permute, (UNSPEC_ZIPQ))
FUNCTION (svwhilege, while_comparison, (UNSPEC_WHILEGE, UNSPEC_WHILEHS))
FUNCTION (svwhilegt, while_comparison, (UNSPEC_WHILEGT, UNSPEC_WHILEHI))
FUNCTION (svwhilerw, svwhilerw_svwhilewr_impl, (UNSPEC_WHILERW))
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sve2.def b/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
index dd6d135..f37a5cc 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
@@ -17,7 +17,7 @@
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-#define REQUIRED_EXTENSIONS AARCH64_FL_SVE2
+#define REQUIRED_EXTENSIONS AARCH64_FL_SVE | AARCH64_FL_SVE2
DEF_SVE_FUNCTION (svaba, ternary_opt_n, all_integer, none)
DEF_SVE_FUNCTION (svabalb, ternary_long_opt_n, hsd_integer, none)
DEF_SVE_FUNCTION (svabalt, ternary_long_opt_n, hsd_integer, none)
@@ -51,24 +51,9 @@ DEF_SVE_FUNCTION (sveor3, ternary_opt_n, all_integer, none)
DEF_SVE_FUNCTION (sveorbt, ternary_opt_n, all_integer, none)
DEF_SVE_FUNCTION (sveortb, ternary_opt_n, all_integer, none)
DEF_SVE_FUNCTION (svhadd, binary_opt_n, all_integer, mxz)
-DEF_SVE_FUNCTION (svhistcnt, binary_to_uint, sd_integer, z)
-DEF_SVE_FUNCTION (svhistseg, binary_to_uint, b_integer, none)
DEF_SVE_FUNCTION (svhsub, binary_opt_n, all_integer, mxz)
DEF_SVE_FUNCTION (svhsubr, binary_opt_n, all_integer, mxz)
-DEF_SVE_FUNCTION (svldnt1_gather, load_gather_sv_restricted, sd_data, implicit)
-DEF_SVE_FUNCTION (svldnt1_gather, load_gather_vs, sd_data, implicit)
-DEF_SVE_FUNCTION (svldnt1sb_gather, load_ext_gather_offset_restricted, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldnt1sh_gather, load_ext_gather_offset_restricted, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldnt1sh_gather, load_ext_gather_index_restricted, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldnt1sw_gather, load_ext_gather_offset_restricted, d_integer, implicit)
-DEF_SVE_FUNCTION (svldnt1sw_gather, load_ext_gather_index_restricted, d_integer, implicit)
-DEF_SVE_FUNCTION (svldnt1ub_gather, load_ext_gather_offset_restricted, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldnt1uh_gather, load_ext_gather_offset_restricted, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldnt1uh_gather, load_ext_gather_index_restricted, sd_integer, implicit)
-DEF_SVE_FUNCTION (svldnt1uw_gather, load_ext_gather_offset_restricted, d_integer, implicit)
-DEF_SVE_FUNCTION (svldnt1uw_gather, load_ext_gather_index_restricted, d_integer, implicit)
DEF_SVE_FUNCTION (svlogb, unary_to_int, all_float, mxz)
-DEF_SVE_FUNCTION (svmatch, compare, bh_integer, implicit)
DEF_SVE_FUNCTION (svmaxp, binary, all_arith, mx)
DEF_SVE_FUNCTION (svmaxnmp, binary, all_float, mx)
DEF_SVE_FUNCTION (svmla_lane, ternary_lane, hsd_integer, none)
@@ -91,7 +76,6 @@ DEF_SVE_FUNCTION (svmullb_lane, binary_long_lane, sd_integer, none)
DEF_SVE_FUNCTION (svmullt, binary_long_opt_n, hsd_integer, none)
DEF_SVE_FUNCTION (svmullt_lane, binary_long_lane, sd_integer, none)
DEF_SVE_FUNCTION (svnbsl, ternary_opt_n, all_integer, none)
-DEF_SVE_FUNCTION (svnmatch, compare, bh_integer, implicit)
DEF_SVE_FUNCTION (svpmul, binary_opt_n, b_unsigned, none)
DEF_SVE_FUNCTION (svpmullb, binary_long_opt_n, hd_unsigned, none)
DEF_SVE_FUNCTION (svpmullb_pair, binary_opt_n, bs_unsigned, none)
@@ -110,7 +94,7 @@ DEF_SVE_FUNCTION (svqdmlslb_lane, ternary_long_lane, sd_signed, none)
DEF_SVE_FUNCTION (svqdmlslbt, ternary_long_opt_n, hsd_signed, none)
DEF_SVE_FUNCTION (svqdmlslt, ternary_long_opt_n, hsd_signed, none)
DEF_SVE_FUNCTION (svqdmlslt_lane, ternary_long_lane, sd_signed, none)
-DEF_SVE_FUNCTION (svqdmulh, binary_opt_n, all_signed, none)
+DEF_SVE_FUNCTION (svqdmulh, binary_opt_single_n, all_signed, none)
DEF_SVE_FUNCTION (svqdmulh_lane, binary_lane, hsd_signed, none)
DEF_SVE_FUNCTION (svqdmullb, binary_long_opt_n, hsd_signed, none)
DEF_SVE_FUNCTION (svqdmullb_lane, binary_long_lane, sd_signed, none)
@@ -147,7 +131,7 @@ DEF_SVE_FUNCTION (svraddhnt, binary_narrowt_opt_n, hsd_integer, none)
DEF_SVE_FUNCTION (svrecpe, unary, s_unsigned, mxz)
DEF_SVE_FUNCTION (svrhadd, binary_opt_n, all_integer, mxz)
DEF_SVE_FUNCTION (svrsqrte, unary, s_unsigned, mxz)
-DEF_SVE_FUNCTION (svrshl, binary_int_opt_n, all_integer, mxz)
+DEF_SVE_FUNCTION (svrshl, binary_int_opt_single_n, all_integer, mxz)
DEF_SVE_FUNCTION (svrshr, shift_right_imm, all_integer, mxz)
DEF_SVE_FUNCTION (svrshrnb, shift_right_imm_narrowb, hsd_integer, none)
DEF_SVE_FUNCTION (svrshrnt, shift_right_imm_narrowt, hsd_integer, none)
@@ -164,13 +148,6 @@ DEF_SVE_FUNCTION (svsli, ternary_shift_left_imm, all_integer, none)
DEF_SVE_FUNCTION (svsqadd, binary_int_opt_n, all_unsigned, mxz)
DEF_SVE_FUNCTION (svsra, ternary_shift_right_imm, all_integer, none)
DEF_SVE_FUNCTION (svsri, ternary_shift_right_imm, all_integer, none)
-DEF_SVE_FUNCTION (svstnt1_scatter, store_scatter_index_restricted, sd_data, implicit)
-DEF_SVE_FUNCTION (svstnt1_scatter, store_scatter_offset_restricted, sd_data, implicit)
-DEF_SVE_FUNCTION (svstnt1b_scatter, store_scatter_offset_restricted, sd_integer, implicit)
-DEF_SVE_FUNCTION (svstnt1h_scatter, store_scatter_index_restricted, sd_integer, implicit)
-DEF_SVE_FUNCTION (svstnt1h_scatter, store_scatter_offset_restricted, sd_integer, implicit)
-DEF_SVE_FUNCTION (svstnt1w_scatter, store_scatter_index_restricted, d_integer, implicit)
-DEF_SVE_FUNCTION (svstnt1w_scatter, store_scatter_offset_restricted, d_integer, implicit)
DEF_SVE_FUNCTION (svsubhnb, binary_narrowb_opt_n, hsd_integer, none)
DEF_SVE_FUNCTION (svsubhnt, binary_narrowt_opt_n, hsd_integer, none)
DEF_SVE_FUNCTION (svsublb, binary_long_opt_n, hsd_integer, none)
@@ -189,7 +166,38 @@ DEF_SVE_FUNCTION (svwhilewr, compare_ptr, all_data, none)
DEF_SVE_FUNCTION (svxar, ternary_shift_right_imm, all_integer, none)
#undef REQUIRED_EXTENSIONS
-#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE2 | AARCH64_FL_SVE2_AES)
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE \
+ | AARCH64_FL_SVE2 \
+ | AARCH64_FL_SM_OFF)
+DEF_SVE_FUNCTION (svhistcnt, binary_to_uint, sd_integer, z)
+DEF_SVE_FUNCTION (svhistseg, binary_to_uint, b_integer, none)
+DEF_SVE_FUNCTION (svldnt1_gather, load_gather_sv_restricted, sd_data, implicit)
+DEF_SVE_FUNCTION (svldnt1_gather, load_gather_vs, sd_data, implicit)
+DEF_SVE_FUNCTION (svldnt1sb_gather, load_ext_gather_offset_restricted, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldnt1sh_gather, load_ext_gather_offset_restricted, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldnt1sh_gather, load_ext_gather_index_restricted, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldnt1sw_gather, load_ext_gather_offset_restricted, d_integer, implicit)
+DEF_SVE_FUNCTION (svldnt1sw_gather, load_ext_gather_index_restricted, d_integer, implicit)
+DEF_SVE_FUNCTION (svldnt1ub_gather, load_ext_gather_offset_restricted, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldnt1uh_gather, load_ext_gather_offset_restricted, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldnt1uh_gather, load_ext_gather_index_restricted, sd_integer, implicit)
+DEF_SVE_FUNCTION (svldnt1uw_gather, load_ext_gather_offset_restricted, d_integer, implicit)
+DEF_SVE_FUNCTION (svldnt1uw_gather, load_ext_gather_index_restricted, d_integer, implicit)
+DEF_SVE_FUNCTION (svmatch, compare, bh_integer, implicit)
+DEF_SVE_FUNCTION (svnmatch, compare, bh_integer, implicit)
+DEF_SVE_FUNCTION (svstnt1_scatter, store_scatter_index_restricted, sd_data, implicit)
+DEF_SVE_FUNCTION (svstnt1_scatter, store_scatter_offset_restricted, sd_data, implicit)
+DEF_SVE_FUNCTION (svstnt1b_scatter, store_scatter_offset_restricted, sd_integer, implicit)
+DEF_SVE_FUNCTION (svstnt1h_scatter, store_scatter_index_restricted, sd_integer, implicit)
+DEF_SVE_FUNCTION (svstnt1h_scatter, store_scatter_offset_restricted, sd_integer, implicit)
+DEF_SVE_FUNCTION (svstnt1w_scatter, store_scatter_index_restricted, d_integer, implicit)
+DEF_SVE_FUNCTION (svstnt1w_scatter, store_scatter_offset_restricted, d_integer, implicit)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE \
+ | AARCH64_FL_SVE2 \
+ | AARCH64_FL_SVE2_AES \
+ | AARCH64_FL_SM_OFF)
DEF_SVE_FUNCTION (svaesd, binary, b_unsigned, none)
DEF_SVE_FUNCTION (svaese, binary, b_unsigned, none)
DEF_SVE_FUNCTION (svaesmc, unary, b_unsigned, none)
@@ -198,17 +206,96 @@ DEF_SVE_FUNCTION (svpmullb_pair, binary_opt_n, d_unsigned, none)
DEF_SVE_FUNCTION (svpmullt_pair, binary_opt_n, d_unsigned, none)
#undef REQUIRED_EXTENSIONS
-#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE2 | AARCH64_FL_SVE2_BITPERM)
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE \
+ | AARCH64_FL_SVE2 \
+ | AARCH64_FL_SVE2_BITPERM \
+ | AARCH64_FL_SM_OFF)
DEF_SVE_FUNCTION (svbdep, binary_opt_n, all_unsigned, none)
DEF_SVE_FUNCTION (svbext, binary_opt_n, all_unsigned, none)
DEF_SVE_FUNCTION (svbgrp, binary_opt_n, all_unsigned, none)
#undef REQUIRED_EXTENSIONS
-#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE2 | AARCH64_FL_SVE2_SHA3)
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE \
+ | AARCH64_FL_SVE2 \
+ | AARCH64_FL_SVE2_SHA3 \
+ | AARCH64_FL_SM_OFF)
DEF_SVE_FUNCTION (svrax1, binary, d_integer, none)
#undef REQUIRED_EXTENSIONS
-#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE2 | AARCH64_FL_SVE2_SM4)
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE \
+ | AARCH64_FL_SVE2 \
+ | AARCH64_FL_SVE2_SM4 \
+ | AARCH64_FL_SM_OFF)
DEF_SVE_FUNCTION (svsm4e, binary, s_unsigned, none)
DEF_SVE_FUNCTION (svsm4ekey, binary, s_unsigned, none)
#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE \
+ | AARCH64_FL_SVE2 \
+ | AARCH64_FL_SME \
+ | AARCH64_FL_SM_ON)
+DEF_SVE_FUNCTION (svclamp, clamp, all_integer, none)
+DEF_SVE_FUNCTION (svpsel, select_pred, all_pred_count, none)
+DEF_SVE_FUNCTION (svrevd, unary, all_data, mxz)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS (AARCH64_FL_SVE \
+ | AARCH64_FL_SVE2 \
+ | AARCH64_FL_SME2 \
+ | AARCH64_FL_SM_ON)
+DEF_SVE_FUNCTION_GS (svadd, binary_single, all_integer, x24, none)
+DEF_SVE_FUNCTION (svbfmlslb, ternary_bfloat_opt_n, s_float, none)
+DEF_SVE_FUNCTION (svbfmlslb_lane, ternary_bfloat_lane, s_float, none)
+DEF_SVE_FUNCTION (svbfmlslt, ternary_bfloat_opt_n, s_float, none)
+DEF_SVE_FUNCTION (svbfmlslt_lane, ternary_bfloat_lane, s_float, none)
+DEF_SVE_FUNCTION (svclamp, clamp, all_float, none)
+DEF_SVE_FUNCTION_GS (svclamp, clamp, all_arith, x24, none)
+DEF_SVE_FUNCTION (svcntp, count_pred_c, all_count, none)
+DEF_SVE_FUNCTION_GS (svcvt, unary_convertxn, cvt_h_s_float, x2, none)
+DEF_SVE_FUNCTION_GS (svcvt, unary_convertxn, cvt_s_s, x24, none)
+DEF_SVE_FUNCTION_GS (svcvtn, unary_convertxn, cvt_h_s_float, x2, none)
+DEF_SVE_FUNCTION (svdot, ternary_qq_opt_n_or_011, s_narrow_fsu, none)
+DEF_SVE_FUNCTION (svdot_lane, ternary_qq_or_011_lane, s_narrow_fsu, none)
+DEF_SVE_FUNCTION_GS (svld1, load, all_data, x24, implicit)
+DEF_SVE_FUNCTION_GS (svldnt1, load, all_data, x24, implicit)
+DEF_SVE_FUNCTION_GS (svmax, binary_opt_single_n, all_arith, x24, none)
+DEF_SVE_FUNCTION_GS (svmaxnm, binary_opt_single_n, all_float, x24, none)
+DEF_SVE_FUNCTION_GS (svmin, binary_opt_single_n, all_arith, x24, none)
+DEF_SVE_FUNCTION_GS (svminnm, binary_opt_single_n, all_float, x24, none)
+DEF_SVE_FUNCTION_GS (svpext, extract_pred, all_count, x12, none)
+DEF_SVE_FUNCTION (svptrue, inherent, all_count, none)
+DEF_SVE_FUNCTION_GS (svqcvt, unary_convertxn, qcvt_x2, x2, none)
+DEF_SVE_FUNCTION_GS (svqcvt, unary_convertxn, qcvt_x4, x4, none)
+DEF_SVE_FUNCTION_GS (svqcvtn, unary_convertxn, qcvt_x2, x2, none)
+DEF_SVE_FUNCTION_GS (svqcvtn, unary_convertxn, qcvt_x4, x4, none)
+DEF_SVE_FUNCTION_GS (svqdmulh, binary_opt_single_n, all_signed, x24, none)
+DEF_SVE_FUNCTION_GS (svqrshr, shift_right_imm_narrowxn, qrshr_x2, x2, none)
+DEF_SVE_FUNCTION_GS (svqrshr, shift_right_imm_narrowxn, qrshr_x4, x4, none)
+DEF_SVE_FUNCTION_GS (svqrshrn, shift_right_imm_narrowxn, qrshr_x2, x2, none)
+DEF_SVE_FUNCTION_GS (svqrshrn, shift_right_imm_narrowxn, qrshr_x4, x4, none)
+DEF_SVE_FUNCTION_GS (svqrshru, shift_right_imm_narrowxn, qrshru_x2, x2, none)
+DEF_SVE_FUNCTION_GS (svqrshru, shift_right_imm_narrowxn, qrshru_x4, x4, none)
+DEF_SVE_FUNCTION_GS (svqrshrun, shift_right_imm_narrowxn, qrshru_x2, x2, none)
+DEF_SVE_FUNCTION_GS (svqrshrun, shift_right_imm_narrowxn, qrshru_x4, x4, none)
+DEF_SVE_FUNCTION_GS (svrinta, unaryxn, s_float, x24, none)
+DEF_SVE_FUNCTION_GS (svrintm, unaryxn, s_float, x24, none)
+DEF_SVE_FUNCTION_GS (svrintn, unaryxn, s_float, x24, none)
+DEF_SVE_FUNCTION_GS (svrintp, unaryxn, s_float, x24, none)
+DEF_SVE_FUNCTION_GS (svrshl, binary_int_opt_single_n, all_integer, x24, none)
+DEF_SVE_FUNCTION_GS (svsel, binaryxn, all_data, x24, implicit)
+DEF_SVE_FUNCTION_GS (svst1, storexn, all_data, x24, implicit)
+DEF_SVE_FUNCTION_GS (svstnt1, storexn, all_data, x24, implicit)
+DEF_SVE_FUNCTION_GS (svunpk, unary_convertxn, bhs_widen, x24, none)
+DEF_SVE_FUNCTION_GS (svuzp, unaryxn, all_data, x24, none)
+DEF_SVE_FUNCTION_GS (svuzpq, unaryxn, all_data, x24, none)
+DEF_SVE_FUNCTION_GS (svwhilege, compare_scalar, while_x, x2, none)
+DEF_SVE_FUNCTION (svwhilege, compare_scalar_count, while_x_c, none)
+DEF_SVE_FUNCTION_GS (svwhilegt, compare_scalar, while_x, x2, none)
+DEF_SVE_FUNCTION (svwhilegt, compare_scalar_count, while_x_c, none)
+DEF_SVE_FUNCTION_GS (svwhilele, compare_scalar, while_x, x2, none)
+DEF_SVE_FUNCTION (svwhilele, compare_scalar_count, while_x_c, none)
+DEF_SVE_FUNCTION_GS (svwhilelt, compare_scalar, while_x, x2, none)
+DEF_SVE_FUNCTION (svwhilelt, compare_scalar_count, while_x_c, none)
+DEF_SVE_FUNCTION_GS (svzip, unaryxn, all_data, x24, none)
+DEF_SVE_FUNCTION_GS (svzipq, unaryxn, all_data, x24, none)
+#undef REQUIRED_EXTENSIONS
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sve2.h b/gcc/config/aarch64/aarch64-sve-builtins-sve2.h
index 1cd4477..24ee612 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-sve2.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sve2.h
@@ -47,13 +47,20 @@ namespace aarch64_sve
extern const function_base *const svbcax;
extern const function_base *const svbdep;
extern const function_base *const svbext;
+ extern const function_base *const svbfmlslb;
+ extern const function_base *const svbfmlslb_lane;
+ extern const function_base *const svbfmlslt;
+ extern const function_base *const svbfmlslt_lane;
extern const function_base *const svbgrp;
extern const function_base *const svbsl;
extern const function_base *const svbsl1n;
extern const function_base *const svbsl2n;
extern const function_base *const svcdot;
extern const function_base *const svcdot_lane;
+ extern const function_base *const svclamp;
+ extern const function_base *const svcntp;
extern const function_base *const svcvtlt;
+ extern const function_base *const svcvtn;
extern const function_base *const svcvtx;
extern const function_base *const svcvtxnt;
extern const function_base *const sveor3;
@@ -93,13 +100,17 @@ namespace aarch64_sve
extern const function_base *const svmullt_lane;
extern const function_base *const svnbsl;
extern const function_base *const svnmatch;
+ extern const function_base *const svpext;
extern const function_base *const svpmul;
extern const function_base *const svpmullb;
extern const function_base *const svpmullb_pair;
extern const function_base *const svpmullt;
extern const function_base *const svpmullt_pair;
+ extern const function_base *const svpsel;
extern const function_base *const svqabs;
extern const function_base *const svqcadd;
+ extern const function_base *const svqcvt;
+ extern const function_base *const svqcvtn;
extern const function_base *const svqdmlalb;
extern const function_base *const svqdmlalb_lane;
extern const function_base *const svqdmlalbt;
@@ -126,8 +137,12 @@ namespace aarch64_sve
extern const function_base *const svqrdmlsh;
extern const function_base *const svqrdmlsh_lane;
extern const function_base *const svqrshl;
+ extern const function_base *const svqrshr;
+ extern const function_base *const svqrshrn;
extern const function_base *const svqrshrnb;
extern const function_base *const svqrshrnt;
+ extern const function_base *const svqrshru;
+ extern const function_base *const svqrshrun;
extern const function_base *const svqrshrunb;
extern const function_base *const svqrshrunt;
extern const function_base *const svqshl;
@@ -144,6 +159,7 @@ namespace aarch64_sve
extern const function_base *const svraddhnb;
extern const function_base *const svraddhnt;
extern const function_base *const svrax1;
+ extern const function_base *const svrevd;
extern const function_base *const svrhadd;
extern const function_base *const svrshl;
extern const function_base *const svrshr;
@@ -178,7 +194,12 @@ namespace aarch64_sve
extern const function_base *const svsubwt;
extern const function_base *const svtbl2;
extern const function_base *const svtbx;
+ extern const function_base *const svunpk;
extern const function_base *const svuqadd;
+ extern const function_base *const svuzp;
+ extern const function_base *const svuzpq;
+ extern const function_base *const svzip;
+ extern const function_base *const svzipq;
extern const function_base *const svwhilege;
extern const function_base *const svwhilegt;
extern const function_base *const svwhilerw;
diff --git a/gcc/config/aarch64/aarch64-sve-builtins.cc b/gcc/config/aarch64/aarch64-sve-builtins.cc
index 161a14e..15fa590 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins.cc
@@ -51,6 +51,7 @@
#include "aarch64-sve-builtins.h"
#include "aarch64-sve-builtins-base.h"
#include "aarch64-sve-builtins-sve2.h"
+#include "aarch64-sve-builtins-sme.h"
#include "aarch64-sve-builtins-shapes.h"
namespace aarch64_sve {
@@ -112,6 +113,7 @@ static const char *const pred_suffixes[NUM_PREDS + 1] = {
"_m",
"_x",
"_z",
+ "_m",
""
};
@@ -136,12 +138,35 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
TYPE_##CLASS == TYPE_signed || TYPE_##CLASS == TYPE_unsigned, \
TYPE_##CLASS == TYPE_unsigned, \
TYPE_##CLASS == TYPE_float, \
+ TYPE_##CLASS != TYPE_bool, \
TYPE_##CLASS == TYPE_bool, \
+ false, \
+ 0, \
+ MODE },
+#define DEF_SME_ZA_SUFFIX(NAME, BITS, MODE) \
+ { "_" #NAME, \
+ NUM_VECTOR_TYPES, \
+ NUM_TYPE_CLASSES, \
+ BITS, \
+ BITS / BITS_PER_UNIT, \
+ false, \
+ false, \
+ false, \
+ false, \
+ false, \
+ true, \
0, \
MODE },
#include "aarch64-sve-builtins.def"
{ "", NUM_VECTOR_TYPES, TYPE_bool, 0, 0, false, false, false, false,
- 0, VOIDmode }
+ false, false, 0, VOIDmode }
+};
+
+CONSTEXPR const group_suffix_info group_suffixes[] = {
+#define DEF_SVE_GROUP_SUFFIX(NAME, VG, VECTORS_PER_TUPLE) \
+ { "_" #NAME, VG, VECTORS_PER_TUPLE },
+#include "aarch64-sve-builtins.def"
+ { "", 0, 1 }
};
/* Define a TYPES_<combination> macro for each combination of type
@@ -159,6 +184,16 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
#define TYPES_all_pred(S, D) \
S (b8), S (b16), S (b32), S (b64)
+/* _c8 _c16 _c32 _c64. */
+#define TYPES_all_count(S, D) \
+ S (c8), S (c16), S (c32), S (c64)
+
+/* _b8 _b16 _b32 _b64
+ _c8 _c16 _c32 _c64. */
+#define TYPES_all_pred_count(S, D) \
+ TYPES_all_pred (S, D), \
+ TYPES_all_count (S, D)
+
/* _f16 _f32 _f64. */
#define TYPES_all_float(S, D) \
S (f16), S (f32), S (f64)
@@ -198,6 +233,10 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
#define TYPES_b(S, D) \
S (b)
+/* _c only. */
+#define TYPES_c(S, D) \
+ S (c)
+
/* _u8. */
#define TYPES_b_unsigned(S, D) \
S (u8)
@@ -229,6 +268,19 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
#define TYPES_bhs_integer(S, D) \
TYPES_bhs_signed (S, D), TYPES_bhs_unsigned (S, D)
+/* _bf16
+ _f16 _f32
+ _s8 _s16 _s32
+ _u8 _u16 _u32. */
+#define TYPES_bhs_data(S, D) \
+ S (bf16), S (f16), S (f32), TYPES_bhs_integer (S, D)
+
+/* _s16_s8 _s32_s16 _s64_s32
+ _u16_u8 _u32_u16 _u64_u32. */
+#define TYPES_bhs_widen(S, D) \
+ D (s16, s8), D (s32, s16), D (s64, s32), \
+ D (u16, u8), D (u32, u16), D (u64, u32)
+
/* _s16
_u16. */
#define TYPES_h_integer(S, D) \
@@ -247,6 +299,13 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
#define TYPES_hs_float(S, D) \
S (f16), S (f32)
+/* _bf16
+ _f16 _f32
+ _s16 _s32
+ _u16 _u32. */
+#define TYPES_hs_data(S, D) \
+ S (bf16), S (f16), S (f32), TYPES_hs_integer (S, D)
+
/* _u16 _u64. */
#define TYPES_hd_unsigned(S, D) \
S (u16), S (u64)
@@ -358,6 +417,10 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
#define TYPES_cvt_bfloat(S, D) \
D (bf16, f32)
+/* { _bf16 _f16 } x _f32. */
+#define TYPES_cvt_h_s_float(S, D) \
+ D (bf16, f32), D (f16, f32)
+
/* _f32_f16
_f64_f32. */
#define TYPES_cvt_long(S, D) \
@@ -372,6 +435,15 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
#define TYPES_cvt_narrow(S, D) \
D (f16, f32), TYPES_cvt_narrow_s (S, D)
+/* { _s32 _u32 } x _f32
+
+ _f32 x { _s32 _u32 }. */
+#define TYPES_cvt_s_s(S, D) \
+ D (s32, f32), \
+ D (u32, f32), \
+ D (f32, s32), \
+ D (f32, u32)
+
/* { _s32 _s64 } x { _b8 _b16 _b32 _b64 }
{ _u32 _u64 }. */
#define TYPES_inc_dec_n1(D, A) \
@@ -382,6 +454,55 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
TYPES_inc_dec_n1 (D, u32), \
TYPES_inc_dec_n1 (D, u64)
+/* { _s16 _u16 } x _s32
+
+ { _u16 } x _u32. */
+#define TYPES_qcvt_x2(S, D) \
+ D (s16, s32), \
+ D (u16, u32), \
+ D (u16, s32)
+
+/* { _s8 _u8 } x _s32
+
+ { _u8 } x _u32
+
+ { _s16 _u16 } x _s64
+
+ { _u16 } x _u64. */
+#define TYPES_qcvt_x4(S, D) \
+ D (s8, s32), \
+ D (u8, u32), \
+ D (u8, s32), \
+ D (s16, s64), \
+ D (u16, u64), \
+ D (u16, s64)
+
+/* _s16_s32
+ _u16_u32. */
+#define TYPES_qrshr_x2(S, D) \
+ D (s16, s32), \
+ D (u16, u32)
+
+/* _u16_s32. */
+#define TYPES_qrshru_x2(S, D) \
+ D (u16, s32)
+
+/* _s8_s32
+ _s16_s64
+ _u8_u32
+ _u16_u64. */
+#define TYPES_qrshr_x4(S, D) \
+ D (s8, s32), \
+ D (s16, s64), \
+ D (u8, u32), \
+ D (u16, u64)
+
+/* _u8_s32
+ _u16_s64. */
+#define TYPES_qrshru_x4(S, D) \
+ D (u8, s32), \
+ D (u16, s64)
+
/* { _bf16 } { _bf16 }
{ _f16 _f32 _f64 } { _f16 _f32 _f64 }
{ _s8 _s16 _s32 _s64 } x { _s8 _s16 _s32 _s64 }
@@ -405,6 +526,12 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
TYPES_reinterpret1 (D, u32), \
TYPES_reinterpret1 (D, u64)
+/* _b_c
+ _c_b. */
+#define TYPES_reinterpret_b(S, D) \
+ D (b, c), \
+ D (c, b)
+
/* { _b8 _b16 _b32 _b64 } x { _s32 _s64 }
{ _u32 _u64 } */
#define TYPES_while1(D, bn) \
@@ -415,6 +542,136 @@ CONSTEXPR const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
TYPES_while1 (D, b32), \
TYPES_while1 (D, b64)
+/* { _b8 _b16 _b32 _b64 } x { _s64 }
+ { _u64 } */
+#define TYPES_while_x(S, D) \
+ D (b8, s64), D (b8, u64), \
+ D (b16, s64), D (b16, u64), \
+ D (b32, s64), D (b32, u64), \
+ D (b64, s64), D (b64, u64)
+
+/* { _c8 _c16 _c32 _c64 } x { _s64 }
+ { _u64 } */
+#define TYPES_while_x_c(S, D) \
+ D (c8, s64), D (c8, u64), \
+ D (c16, s64), D (c16, u64), \
+ D (c32, s64), D (c32, u64), \
+ D (c64, s64), D (c64, u64)
+
+/* _f32_f16
+ _s32_s16
+ _u32_u16. */
+#define TYPES_s_narrow_fsu(S, D) \
+ D (f32, f16), D (s32, s16), D (u32, u16)
+
+/* _za8 _za16 _za32 _za64 _za128. */
+#define TYPES_all_za(S, D) \
+ S (za8), S (za16), S (za32), S (za64), S (za128)
+
+/* _za64. */
+#define TYPES_d_za(S, D) \
+ S (za64)
+
+/* { _za8 } x { _s8 _u8 }
+
+ { _za16 } x { _bf16 _f16 _s16 _u16 }
+
+ { _za32 } x { _f32 _s32 _u32 }
+
+ { _za64 } x { _f64 _s64 _u64 }. */
+#define TYPES_za_bhsd_data(S, D) \
+ D (za8, s8), D (za8, u8), \
+ D (za16, bf16), D (za16, f16), D (za16, s16), D (za16, u16), \
+ D (za32, f32), D (za32, s32), D (za32, u32), \
+ D (za64, f64), D (za64, s64), D (za64, u64)
+
+/* Likewise, plus:
+
+ { _za128 } x { _bf16 }
+ { _f16 _f32 _f64 }
+ { _s8 _s16 _s32 _s64 }
+ { _u8 _u16 _u32 _u64 }. */
+
+#define TYPES_za_all_data(S, D) \
+ TYPES_za_bhsd_data (S, D), \
+ TYPES_reinterpret1 (D, za128)
+
+/* _za32_s8. */
+#define TYPES_za_s_b_signed(S, D) \
+ D (za32, s8)
+
+/* _za32_u8. */
+#define TYPES_za_s_b_unsigned(S, D) \
+ D (za32, u8)
+
+/* _za32 x { _s8 _u8 }. */
+#define TYPES_za_s_b_integer(S, D) \
+ D (za32, s8), D (za32, u8)
+
+/* _za32 x { _s16 _u16 }. */
+#define TYPES_za_s_h_integer(S, D) \
+ D (za32, s16), D (za32, u16)
+
+/* _za32 x { _bf16 _f16 _s16 _u16 }. */
+#define TYPES_za_s_h_data(S, D) \
+ D (za32, bf16), D (za32, f16), D (za32, s16), D (za32, u16)
+
+/* _za32_u32. */
+#define TYPES_za_s_unsigned(S, D) \
+ D (za32, u32)
+
+/* _za32 x { _s32 _u32 }. */
+#define TYPES_za_s_integer(S, D) \
+ D (za32, s32), D (za32, u32)
+
+/* _za32_f32. */
+#define TYPES_za_s_float(S, D) \
+ D (za32, f32)
+
+/* _za32 x { _f32 _s32 _u32 }. */
+#define TYPES_za_s_data(S, D) \
+ D (za32, f32), D (za32, s32), D (za32, u32)
+
+/* _za64 x { _s16 _u16 }. */
+#define TYPES_za_d_h_integer(S, D) \
+ D (za64, s16), D (za64, u16)
+
+/* _za64_f64. */
+#define TYPES_za_d_float(S, D) \
+ D (za64, f64)
+
+/* _za64 x { _s64 _u64 }. */
+#define TYPES_za_d_integer(S, D) \
+ D (za64, s64), D (za64, u64)
+
+/* _za32 x { _s8 _u8 _bf16 _f16 _f32 }. */
+#define TYPES_mop_base(S, D) \
+ D (za32, s8), D (za32, u8), D (za32, bf16), D (za32, f16), D (za32, f32)
+
+/* _za32_s8. */
+#define TYPES_mop_base_signed(S, D) \
+ D (za32, s8)
+
+/* _za32_u8. */
+#define TYPES_mop_base_unsigned(S, D) \
+ D (za32, u8)
+
+/* _za64 x { _s16 _u16 }. */
+#define TYPES_mop_i16i64(S, D) \
+ D (za64, s16), D (za64, u16)
+
+/* _za64_s16. */
+#define TYPES_mop_i16i64_signed(S, D) \
+ D (za64, s16)
+
+/* _za64_u16. */
+#define TYPES_mop_i16i64_unsigned(S, D) \
+ D (za64, u16)
+
+/* _za. */
+#define TYPES_za(S, D) \
+ S (za)
+
/* Describe a pair of type suffixes in which only the first is used. */
#define DEF_VECTOR_TYPE(X) { TYPE_SUFFIX_ ## X, NUM_TYPE_SUFFIXES }
@@ -437,6 +694,8 @@ static const type_suffix_pair types_none[] = {
/* Create an array for each TYPES_<combination> macro above. */
DEF_SVE_TYPES_ARRAY (all_pred);
+DEF_SVE_TYPES_ARRAY (all_count);
+DEF_SVE_TYPES_ARRAY (all_pred_count);
DEF_SVE_TYPES_ARRAY (all_float);
DEF_SVE_TYPES_ARRAY (all_signed);
DEF_SVE_TYPES_ARRAY (all_float_and_signed);
@@ -452,10 +711,14 @@ DEF_SVE_TYPES_ARRAY (bs_unsigned);
DEF_SVE_TYPES_ARRAY (bhs_signed);
DEF_SVE_TYPES_ARRAY (bhs_unsigned);
DEF_SVE_TYPES_ARRAY (bhs_integer);
+DEF_SVE_TYPES_ARRAY (bhs_data);
+DEF_SVE_TYPES_ARRAY (bhs_widen);
+DEF_SVE_TYPES_ARRAY (c);
DEF_SVE_TYPES_ARRAY (h_integer);
DEF_SVE_TYPES_ARRAY (hs_signed);
DEF_SVE_TYPES_ARRAY (hs_integer);
DEF_SVE_TYPES_ARRAY (hs_float);
+DEF_SVE_TYPES_ARRAY (hs_data);
DEF_SVE_TYPES_ARRAY (hd_unsigned);
DEF_SVE_TYPES_ARRAY (hsd_signed);
DEF_SVE_TYPES_ARRAY (hsd_integer);
@@ -476,12 +739,95 @@ DEF_SVE_TYPES_ARRAY (d_integer);
DEF_SVE_TYPES_ARRAY (d_data);
DEF_SVE_TYPES_ARRAY (cvt);
DEF_SVE_TYPES_ARRAY (cvt_bfloat);
+DEF_SVE_TYPES_ARRAY (cvt_h_s_float);
DEF_SVE_TYPES_ARRAY (cvt_long);
DEF_SVE_TYPES_ARRAY (cvt_narrow_s);
DEF_SVE_TYPES_ARRAY (cvt_narrow);
+DEF_SVE_TYPES_ARRAY (cvt_s_s);
DEF_SVE_TYPES_ARRAY (inc_dec_n);
+DEF_SVE_TYPES_ARRAY (qcvt_x2);
+DEF_SVE_TYPES_ARRAY (qcvt_x4);
+DEF_SVE_TYPES_ARRAY (qrshr_x2);
+DEF_SVE_TYPES_ARRAY (qrshr_x4);
+DEF_SVE_TYPES_ARRAY (qrshru_x2);
+DEF_SVE_TYPES_ARRAY (qrshru_x4);
DEF_SVE_TYPES_ARRAY (reinterpret);
+DEF_SVE_TYPES_ARRAY (reinterpret_b);
DEF_SVE_TYPES_ARRAY (while);
+DEF_SVE_TYPES_ARRAY (while_x);
+DEF_SVE_TYPES_ARRAY (while_x_c);
+DEF_SVE_TYPES_ARRAY (s_narrow_fsu);
+DEF_SVE_TYPES_ARRAY (all_za);
+DEF_SVE_TYPES_ARRAY (d_za);
+DEF_SVE_TYPES_ARRAY (za_bhsd_data);
+DEF_SVE_TYPES_ARRAY (za_all_data);
+DEF_SVE_TYPES_ARRAY (za_s_b_signed);
+DEF_SVE_TYPES_ARRAY (za_s_b_unsigned);
+DEF_SVE_TYPES_ARRAY (za_s_b_integer);
+DEF_SVE_TYPES_ARRAY (za_s_h_integer);
+DEF_SVE_TYPES_ARRAY (za_s_h_data);
+DEF_SVE_TYPES_ARRAY (za_s_unsigned);
+DEF_SVE_TYPES_ARRAY (za_s_integer);
+DEF_SVE_TYPES_ARRAY (za_s_float);
+DEF_SVE_TYPES_ARRAY (za_s_data);
+DEF_SVE_TYPES_ARRAY (za_d_h_integer);
+DEF_SVE_TYPES_ARRAY (za_d_float);
+DEF_SVE_TYPES_ARRAY (za_d_integer);
+DEF_SVE_TYPES_ARRAY (mop_base);
+DEF_SVE_TYPES_ARRAY (mop_base_signed);
+DEF_SVE_TYPES_ARRAY (mop_base_unsigned);
+DEF_SVE_TYPES_ARRAY (mop_i16i64);
+DEF_SVE_TYPES_ARRAY (mop_i16i64_signed);
+DEF_SVE_TYPES_ARRAY (mop_i16i64_unsigned);
+DEF_SVE_TYPES_ARRAY (za);
+
+static const group_suffix_index groups_none[] = {
+ GROUP_none, NUM_GROUP_SUFFIXES
+};
+
+static const group_suffix_index groups_x2[] = { GROUP_x2, NUM_GROUP_SUFFIXES };
+
+static const group_suffix_index groups_x12[] = {
+ GROUP_none, GROUP_x2, NUM_GROUP_SUFFIXES
+};
+
+static const group_suffix_index groups_x4[] = { GROUP_x4, NUM_GROUP_SUFFIXES };
+
+static const group_suffix_index groups_x24[] = {
+ GROUP_x2, GROUP_x4, NUM_GROUP_SUFFIXES
+};
+
+static const group_suffix_index groups_x124[] = {
+ GROUP_none, GROUP_x2, GROUP_x4, NUM_GROUP_SUFFIXES
+};
+
+static const group_suffix_index groups_x1234[] = {
+ GROUP_none, GROUP_x2, GROUP_x3, GROUP_x4, NUM_GROUP_SUFFIXES
+};
+
+static const group_suffix_index groups_vg1x2[] = {
+ GROUP_vg1x2, NUM_GROUP_SUFFIXES
+};
+
+static const group_suffix_index groups_vg1x4[] = {
+ GROUP_vg1x4, NUM_GROUP_SUFFIXES
+};
+
+static const group_suffix_index groups_vg1x24[] = {
+ GROUP_vg1x2, GROUP_vg1x4, NUM_GROUP_SUFFIXES
+};
+
+static const group_suffix_index groups_vg2[] = {
+ GROUP_vg2x1, GROUP_vg2x2, GROUP_vg2x4, NUM_GROUP_SUFFIXES
+};
+
+static const group_suffix_index groups_vg4[] = {
+ GROUP_vg4x1, GROUP_vg4x2, GROUP_vg4x4, NUM_GROUP_SUFFIXES
+};
+
+static const group_suffix_index groups_vg24[] = {
+ GROUP_vg2, GROUP_vg4, NUM_GROUP_SUFFIXES
+};
/* Used by functions that have no governing predicate. */
static const predication_index preds_none[] = { PRED_none, NUM_PREDS };
@@ -490,6 +836,9 @@ static const predication_index preds_none[] = { PRED_none, NUM_PREDS };
explicit suffix. */
static const predication_index preds_implicit[] = { PRED_implicit, NUM_PREDS };
+/* Used by functions that only support "_m" predication. */
+static const predication_index preds_m[] = { PRED_m, NUM_PREDS };
+
/* Used by functions that allow merging and "don't care" predication,
but are not suitable for predicated MOVPRFX. */
static const predication_index preds_mx[] = {
@@ -521,17 +870,23 @@ static const predication_index preds_z_or_none[] = {
/* Used by (mostly predicate) functions that only support "_z" predication. */
static const predication_index preds_z[] = { PRED_z, NUM_PREDS };
+/* Used by SME instructions that always merge into ZA. */
+static const predication_index preds_za_m[] = { PRED_za_m, NUM_PREDS };
+
/* A list of all SVE ACLE functions. */
static CONSTEXPR const function_group_info function_groups[] = {
-#define DEF_SVE_FUNCTION(NAME, SHAPE, TYPES, PREDS) \
- { #NAME, &functions::NAME, &shapes::SHAPE, types_##TYPES, preds_##PREDS, \
- REQUIRED_EXTENSIONS | AARCH64_FL_SVE },
+#define DEF_SVE_FUNCTION_GS(NAME, SHAPE, TYPES, GROUPS, PREDS) \
+ { #NAME, &functions::NAME, &shapes::SHAPE, types_##TYPES, groups_##GROUPS, \
+ preds_##PREDS, REQUIRED_EXTENSIONS },
+#define DEF_SME_ZA_FUNCTION_GS(NAME, SHAPE, TYPES, GROUPS, PREDS) \
+ { #NAME, &functions::NAME##_za, &shapes::SHAPE, types_##TYPES, \
+ groups_##GROUPS, preds_##PREDS, (REQUIRED_EXTENSIONS | AARCH64_FL_ZA_ON) },
#include "aarch64-sve-builtins.def"
};
/* The scalar type associated with each vector type. */
-extern GTY(()) tree scalar_types[NUM_VECTOR_TYPES];
-tree scalar_types[NUM_VECTOR_TYPES];
+extern GTY(()) tree scalar_types[NUM_VECTOR_TYPES + 1];
+tree scalar_types[NUM_VECTOR_TYPES + 1];
/* The single-predicate and single-vector types, with their built-in
"__SV..._t" name. Allow an index of NUM_VECTOR_TYPES, which always
@@ -639,7 +994,7 @@ find_type_suffix_for_scalar_type (const_tree type)
/* A linear search should be OK here, since the code isn't hot and
the number of types is only small. */
for (unsigned int suffix_i = 0; suffix_i < NUM_TYPE_SUFFIXES; ++suffix_i)
- if (!type_suffixes[suffix_i].bool_p)
+ if (type_suffixes[suffix_i].vector_p)
{
vector_type_index vector_i = type_suffixes[suffix_i].vector_type;
if (matches_type_p (scalar_types[vector_i], type))
@@ -648,6 +1003,29 @@ find_type_suffix_for_scalar_type (const_tree type)
return NUM_TYPE_SUFFIXES;
}
+/* Return the implicit group suffix for intrinsics that operate on NVECTORS
+ vectors. */
+static group_suffix_index
+num_vectors_to_group (unsigned int nvectors)
+{
+ switch (nvectors)
+ {
+ case 1: return GROUP_none;
+ case 2: return GROUP_x2;
+ case 3: return GROUP_x3;
+ case 4: return GROUP_x4;
+ }
+ gcc_unreachable ();
+}
+
+/* Return the vector type associated with TYPE. */
+static tree
+get_vector_type (sve_type type)
+{
+ auto vector_type = type_suffixes[type.type].vector_type;
+ return acle_vector_types[type.num_vectors - 1][vector_type];
+}
+
/* Report an error against LOCATION that the user has tried to use
function FNDECL when extension EXTENSION is disabled. */
static void
@@ -700,6 +1078,27 @@ check_required_extensions (location_t location, tree fndecl,
if (missing_extensions == 0)
return check_required_registers (location, fndecl);
+ if (missing_extensions & AARCH64_FL_SM_OFF)
+ {
+ error_at (location, "ACLE function %qD cannot be called when"
+ " SME streaming mode is enabled", fndecl);
+ return false;
+ }
+
+ if (missing_extensions & AARCH64_FL_SM_ON)
+ {
+ error_at (location, "ACLE function %qD can only be called when"
+ " SME streaming mode is enabled", fndecl);
+ return false;
+ }
+
+ if (missing_extensions & AARCH64_FL_ZA_ON)
+ {
+ error_at (location, "ACLE function %qD can only be called from"
+ " a function that has %qs state", fndecl, "za");
+ return false;
+ }
+
static const struct {
aarch64_feature_flags flag;
const char *name;
@@ -735,9 +1134,13 @@ report_out_of_range (location_t location, tree fndecl, unsigned int argno,
HOST_WIDE_INT actual, HOST_WIDE_INT min,
HOST_WIDE_INT max)
{
- error_at (location, "passing %wd to argument %d of %qE, which expects"
- " a value in the range [%wd, %wd]", actual, argno + 1, fndecl,
- min, max);
+ if (min == max)
+ error_at (location, "passing %wd to argument %d of %qE, which expects"
+ " the value %wd", actual, argno + 1, fndecl, min);
+ else
+ error_at (location, "passing %wd to argument %d of %qE, which expects"
+ " a value in the range [%wd, %wd]", actual, argno + 1, fndecl,
+ min, max);
}
/* Report that LOCATION has a call to FNDECL in which argument ARGNO has
@@ -788,6 +1191,7 @@ function_instance::hash () const
h.add_int (mode_suffix_id);
h.add_int (type_suffix_ids[0]);
h.add_int (type_suffix_ids[1]);
+ h.add_int (group_suffix_id);
h.add_int (pred);
return h.end ();
}
@@ -823,7 +1227,7 @@ function_instance::reads_global_state_p () const
return true;
/* Handle direct reads of global state. */
- return flags & (CP_READ_MEMORY | CP_READ_FFR);
+ return flags & (CP_READ_MEMORY | CP_READ_FFR | CP_READ_ZA | CP_READ_ZT0);
}
/* Return true if calls to the function could modify some form of
@@ -844,7 +1248,7 @@ function_instance::modifies_global_state_p () const
return true;
/* Handle direct modifications of global state. */
- return flags & (CP_WRITE_MEMORY | CP_WRITE_FFR);
+ return flags & (CP_WRITE_MEMORY | CP_WRITE_FFR | CP_WRITE_ZA | CP_WRITE_ZT0);
}
/* Return true if calls to the function could raise a signal. */
@@ -876,8 +1280,8 @@ registered_function_hasher::equal (value_type value, const compare_type &key)
return value->instance == key;
}
-sve_switcher::sve_switcher ()
- : aarch64_simd_switcher (AARCH64_FL_F16 | AARCH64_FL_SVE)
+sve_switcher::sve_switcher (aarch64_feature_flags flags)
+ : aarch64_simd_switcher (AARCH64_FL_F16 | AARCH64_FL_SVE | flags)
{
/* Changing the ISA flags and have_regs_of_mode should be enough here.
We shouldn't need to pay the compile-time cost of a full target
@@ -933,6 +1337,10 @@ char *
function_builder::get_name (const function_instance &instance,
bool overloaded_p)
{
+ /* __arm_* functions are listed as arm_*, so that the associated GCC
+ code is not in the implementation namespace. */
+ if (strncmp (instance.base_name, "arm_", 4) == 0)
+ append_name ("__");
append_name (instance.base_name);
if (overloaded_p)
switch (instance.displacement_units ())
@@ -957,6 +1365,8 @@ function_builder::get_name (const function_instance &instance,
for (unsigned int i = 0; i < 2; ++i)
if (!overloaded_p || instance.shape->explicit_type_suffix_p (i))
append_name (instance.type_suffix (i).string);
+ if (!overloaded_p || instance.shape->explicit_group_suffix_p ())
+ append_name (instance.group_suffix ().string);
append_name (pred_suffixes[instance.pred]);
return finish_name ();
}
@@ -968,12 +1378,73 @@ add_attribute (const char *name, tree attrs)
return tree_cons (get_identifier (name), NULL_TREE, attrs);
}
-/* Return the appropriate function attributes for INSTANCE. */
+/* Add attribute NS::NAME to ATTRS. */
+static tree
+add_attribute (const char *ns, const char *name, tree value, tree attrs)
+{
+ return tree_cons (build_tree_list (get_identifier (ns),
+ get_identifier (name)),
+ value, attrs);
+}
+
+/* Attribute arm::NAME describes shared state that is an input if IS_IN
+ and an output if IS_OUT. Check whether a call with call properties
+ CALL_FLAGS needs such an attribute. Add it to in-progress attribute
+ list ATTRS if so. Return the new attribute list. */
+static tree
+add_shared_state_attribute (const char *name, bool is_in, bool is_out,
+ unsigned int call_flags, tree attrs)
+{
+ struct state_flag_info
+ {
+ const char *name;
+ unsigned int read_flag;
+ unsigned int write_flag;
+ };
+ static state_flag_info state_flags[] =
+ {
+ { "za", CP_READ_ZA, CP_WRITE_ZA },
+ { "zt0", CP_READ_ZT0, CP_WRITE_ZT0 }
+ };
+
+ tree args = NULL_TREE;
+ for (const auto &state_flag : state_flags)
+ {
+ auto all_flags = state_flag.read_flag | state_flag.write_flag;
+ auto these_flags = ((is_in ? state_flag.read_flag : 0)
+ | (is_out ? state_flag.write_flag : 0));
+ if ((call_flags & all_flags) == these_flags)
+ {
+ tree value = build_string (strlen (state_flag.name) + 1,
+ state_flag.name);
+ args = tree_cons (NULL_TREE, value, args);
+ }
+ }
+ if (args)
+ attrs = add_attribute ("arm", name, args, attrs);
+ return attrs;
+}
+
+/* Return the appropriate function attributes for INSTANCE, which requires
+ the feature flags in REQUIRED_EXTENSIONS. */
tree
-function_builder::get_attributes (const function_instance &instance)
+function_builder::get_attributes (const function_instance &instance,
+ aarch64_feature_flags required_extensions)
{
tree attrs = NULL_TREE;
+ if (required_extensions & AARCH64_FL_SM_ON)
+ attrs = add_attribute ("arm", "streaming", NULL_TREE, attrs);
+ else if (!(required_extensions & AARCH64_FL_SM_OFF))
+ attrs = add_attribute ("arm", "streaming_compatible", NULL_TREE, attrs);
+
+ attrs = add_shared_state_attribute ("in", true, false,
+ instance.call_properties (), attrs);
+ attrs = add_shared_state_attribute ("out", false, true,
+ instance.call_properties (), attrs);
+ attrs = add_shared_state_attribute ("inout", true, true,
+ instance.call_properties (), attrs);
+
if (!instance.modifies_global_state_p ())
{
if (instance.reads_global_state_p ())
@@ -1049,7 +1520,7 @@ add_unique_function (const function_instance &instance,
tree fntype = build_function_type_array (return_type,
argument_types.length (),
argument_types.address ());
- tree attrs = get_attributes (instance);
+ tree attrs = get_attributes (instance, required_extensions);
registered_function &rfn = add_function (instance, name, fntype, attrs,
required_extensions, false, false);
@@ -1066,7 +1537,7 @@ add_unique_function (const function_instance &instance,
if (strcmp (name, overload_name) != 0)
{
/* Attribute lists shouldn't be shared. */
- tree attrs = get_attributes (instance);
+ tree attrs = get_attributes (instance, required_extensions);
bool placeholder_p = !(m_direct_overloads || force_direct_overloads);
add_function (instance, overload_name, fntype, attrs,
required_extensions, false, placeholder_p);
@@ -1113,19 +1584,30 @@ void
function_builder::add_overloaded_functions (const function_group_info &group,
mode_suffix_index mode)
{
- unsigned int explicit_type0 = (*group.shape)->explicit_type_suffix_p (0);
- unsigned int explicit_type1 = (*group.shape)->explicit_type_suffix_p (1);
- for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
+ bool explicit_type0 = (*group.shape)->explicit_type_suffix_p (0);
+ bool explicit_type1 = (*group.shape)->explicit_type_suffix_p (1);
+ bool explicit_group = (*group.shape)->explicit_group_suffix_p ();
+ auto add_function = [&](const type_suffix_pair &types,
+ group_suffix_index group_suffix_id,
+ unsigned int pi)
{
+ function_instance instance (group.base_name, *group.base,
+ *group.shape, mode, types,
+ group_suffix_id, group.preds[pi]);
+ add_overloaded_function (instance, group.required_extensions);
+ };
+
+ auto add_group_suffix = [&](group_suffix_index group_suffix_id,
+ unsigned int pi)
+ {
+ if (mode == MODE_single
+ && group_suffixes[group_suffix_id].vectors_per_tuple == 1)
+ return;
+
if (!explicit_type0 && !explicit_type1)
- {
- /* Deal with the common case in which there is one overloaded
- function for all type combinations. */
- function_instance instance (group.base_name, *group.base,
- *group.shape, mode, types_none[0],
- group.preds[pi]);
- add_overloaded_function (instance, group.required_extensions);
- }
+ /* Deal with the common case in which there is one overloaded
+ function for all type combinations. */
+ add_function (types_none[0], group_suffix_id, pi);
else
for (unsigned int ti = 0; group.types[ti][0] != NUM_TYPE_SUFFIXES;
++ti)
@@ -1136,12 +1618,16 @@ function_builder::add_overloaded_functions (const function_group_info &group,
explicit_type0 ? group.types[ti][0] : NUM_TYPE_SUFFIXES,
explicit_type1 ? group.types[ti][1] : NUM_TYPE_SUFFIXES
};
- function_instance instance (group.base_name, *group.base,
- *group.shape, mode, types,
- group.preds[pi]);
- add_overloaded_function (instance, group.required_extensions);
+ add_function (types, group_suffix_id, pi);
}
- }
+ };
+
+ for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
+ if (explicit_group)
+ for (unsigned int gi = 0; group.groups[gi] != NUM_GROUP_SUFFIXES; ++gi)
+ add_group_suffix (group.groups[gi], pi);
+ else
+ add_group_suffix (GROUP_none, pi);
}
/* Register all the functions in GROUP. */
@@ -1165,13 +1651,6 @@ function_resolver::function_resolver (location_t location,
{
}
-/* Return the vector type associated with type suffix TYPE. */
-tree
-function_resolver::get_vector_type (type_suffix_index type)
-{
- return acle_vector_types[0][type_suffixes[type].vector_type];
-}
-
/* Return the <stdint.h> name associated with TYPE. Using the <stdint.h>
name should be more user-friendly than the underlying canonical type,
since it makes the signedness and bitwidth explicit. */
@@ -1202,10 +1681,78 @@ function_resolver::scalar_argument_p (unsigned int i)
|| SCALAR_FLOAT_TYPE_P (type));
}
-/* Report that the function has no form that takes type suffix TYPE.
+/* Report that argument ARGNO was expected to have NUM_VECTORS vectors.
+ TYPE is the type that ARGNO actually has. */
+void
+function_resolver::report_incorrect_num_vectors (unsigned int argno,
+ sve_type type,
+ unsigned int num_vectors)
+{
+ if (num_vectors == 1)
+ error_at (location, "passing %qT to argument %d of %qE, which"
+ " expects a single SVE vector rather than a tuple",
+ get_vector_type (type), argno + 1, fndecl);
+ else if (type.num_vectors == 1
+ && type.type != TYPE_SUFFIX_b)
+ /* num_vectors is always != 1, so the singular isn't needed. */
+ error_n (location, num_vectors, "%qT%d%qE%d",
+ "passing single vector %qT to argument %d"
+ " of %qE, which expects a tuple of %d vectors",
+ get_vector_type (type), argno + 1, fndecl, num_vectors);
+ else
+ /* num_vectors is always != 1, so the singular isn't needed. */
+ error_n (location, num_vectors, "%qT%d%qE%d",
+ "passing %qT to argument %d of %qE, which"
+ " expects a tuple of %d vectors", get_vector_type (type),
+ argno + 1, fndecl, num_vectors);
+}
+
+/* Report that arguments FIRST_ARGNO and ARGNO have different numbers
+ of vectors, but are required to have the same number of vectors.
+ FIRST_TYPE and TYPE are the types that arguments FIRST_ARGNO and
+ ARGNO actually have. */
+void
+function_resolver::report_mismatched_num_vectors (unsigned int first_argno,
+ sve_type first_type,
+ unsigned int argno,
+ sve_type type)
+{
+ /* If the tuple size is implied by the group suffix, and if the first
+ type had the right number of vectors, treat argument ARGNO as being
+ individually wrong, rather than wrong in relation to FIRST_ARGNO. */
+ if (group_suffix_id != GROUP_none
+ && first_type.num_vectors == vectors_per_tuple ())
+ {
+ report_incorrect_num_vectors (argno, type, first_type.num_vectors);
+ return;
+ }
+
+ /* Make sure that FIRST_TYPE itself is sensible before using it
+ as a basis for an error message. */
+ if (resolve_to (mode_suffix_id, first_type) == error_mark_node)
+ return;
+
+ if (type.num_vectors != 1 && first_type.num_vectors == 1)
+ error_at (location, "passing tuple %qT to argument %d of %qE after"
+ " passing single vector %qT to argument %d",
+ get_vector_type (type), argno + 1, fndecl,
+ get_vector_type (first_type), first_argno + 1);
+ else if (type.num_vectors == 1 && first_type.num_vectors != 1)
+ error_at (location, "passing single vector %qT to argument %d"
+ " of %qE after passing tuple %qT to argument %d",
+ get_vector_type (type), argno + 1, fndecl,
+ get_vector_type (first_type), first_argno + 1);
+ else
+ error_at (location, "passing mismatched tuple types %qT and %qT"
+ " to arguments %d and %d of %qE",
+ get_vector_type (first_type), get_vector_type (type),
+ first_argno + 1, argno + 1, fndecl);
+}
+
+/* Report that the function has no form that takes type TYPE.
Return error_mark_node. */
tree
-function_resolver::report_no_such_form (type_suffix_index type)
+function_resolver::report_no_such_form (sve_type type)
{
error_at (location, "%qE has no form that takes %qT arguments",
fndecl, get_vector_type (type));
@@ -1213,41 +1760,129 @@ function_resolver::report_no_such_form (type_suffix_index type)
}
/* Silently check whether there is an instance of the function with the
- mode suffix given by MODE and the type suffixes given by TYPE0 and TYPE1.
- Return its function decl if so, otherwise return null. */
+ mode suffix given by MODE, the type suffixes given by TYPE0 and TYPE1,
+ and the group suffix given by GROUP. Return its function decl if so,
+ otherwise return null. */
tree
function_resolver::lookup_form (mode_suffix_index mode,
type_suffix_index type0,
- type_suffix_index type1)
+ type_suffix_index type1,
+ group_suffix_index group)
{
type_suffix_pair types = { type0, type1 };
- function_instance instance (base_name, base, shape, mode, types, pred);
+ function_instance instance (base_name, base, shape, mode, types,
+ group, pred);
registered_function *rfn
= function_table->find_with_hash (instance, instance.hash ());
return rfn ? rfn->decl : NULL_TREE;
}
-/* Resolve the function to one with the mode suffix given by MODE and the
- type suffixes given by TYPE0 and TYPE1. Return its function decl on
- success, otherwise report an error and return error_mark_node. */
+/* Silently check whether there is an instance of the function that has the
+ mode suffix given by MODE and the type and group suffixes implied by TYPE.
+ If the overloaded function has an explicit first type suffix (like
+ conversions do), TYPE describes the implicit second type suffix.
+ Otherwise, TYPE describes the only type suffix.
+
+ Return the decl of the function if it exists, otherwise return null. */
+tree
+function_resolver::lookup_form (mode_suffix_index mode, sve_type type)
+{
+ type_suffix_index type0 = type_suffix_ids[0];
+ type_suffix_index type1 = type_suffix_ids[1];
+ (type0 == NUM_TYPE_SUFFIXES ? type0 : type1) = type.type;
+
+ group_suffix_index group = group_suffix_id;
+ if (group == GROUP_none && type.num_vectors != vectors_per_tuple ())
+ group = num_vectors_to_group (type.num_vectors);
+
+ return lookup_form (mode, type0, type1, group);
+}
+
+/* Resolve the function to one with the mode suffix given by MODE, the
+ type suffixes given by TYPE0 and TYPE1, and group suffix given by
+ GROUP. Return its function decl on success, otherwise report an
+ error and return error_mark_node. */
tree
function_resolver::resolve_to (mode_suffix_index mode,
type_suffix_index type0,
- type_suffix_index type1)
+ type_suffix_index type1,
+ group_suffix_index group)
{
- tree res = lookup_form (mode, type0, type1);
+ tree res = lookup_form (mode, type0, type1, group);
if (!res)
{
if (type1 == NUM_TYPE_SUFFIXES)
return report_no_such_form (type0);
if (type0 == type_suffix_ids[0])
return report_no_such_form (type1);
- /* To be filled in when we have other cases. */
- gcc_unreachable ();
+ error_at (location, "%qE has no form that takes %qT and %qT arguments",
+ fndecl, get_vector_type (type0), get_vector_type (type1));
+ return error_mark_node;
}
return res;
}
+/* Resolve the function to one that has the suffixes associated with MODE
+ and TYPE; see lookup_form for how TYPE is interpreted. Return the
+ function decl on success, otherwise report an error and return
+ error_mark_node. */
+tree
+function_resolver::resolve_to (mode_suffix_index mode, sve_type type)
+{
+ if (tree res = lookup_form (mode, type))
+ return res;
+
+ return report_no_such_form (type);
+}
+
+/* Like resolve_to, but used for a conversion function with the following
+ properties:
+
+ - The function has an explicit first type suffix.
+ - The elements of the argument (which has type TYPE) might be narrower
+ or wider than the elements of the return type.
+ - The return type has enough vectors to represent the converted value
+ of every element.
+ - The group suffix describes the wider of the argument type and the
+ return type. */
+tree
+function_resolver::resolve_conversion (mode_suffix_index mode, sve_type type)
+{
+ auto ret_type = type_suffix_ids[0];
+ unsigned int num_ret_vectors = (type.num_vectors
+ * type_suffixes[ret_type].element_bits
+ / type_suffixes[type.type].element_bits);
+ if (num_ret_vectors == 1
+ || num_ret_vectors == 2
+ || num_ret_vectors == 4)
+ {
+ unsigned int num_vectors = MAX (num_ret_vectors, type.num_vectors);
+ if (tree res = lookup_form (mode, { type.type, num_vectors }))
+ return res;
+ }
+ return report_no_such_form (type);
+}
+
+/* Require argument ARGNO to be an svbool_t or svcount_t predicate.
+ Return its type on success, otherwise report an error and return
+ NUM_VECTOR_TYPES. */
+vector_type_index
+function_resolver::infer_predicate_type (unsigned int argno)
+{
+ tree actual = get_argument_type (argno);
+ if (actual == error_mark_node)
+ return NUM_VECTOR_TYPES;
+
+ for (auto index : { VECTOR_TYPE_svbool_t, VECTOR_TYPE_svcount_t })
+ if (matches_type_p (acle_vector_types[0][index], actual))
+ return index;
+
+ error_at (location, "passing %qT to argument %d of %qE, which expects"
+ " an %qs or %qs", actual, argno + 1, fndecl, "svbool_t",
+ "svcount_t");
+ return NUM_VECTOR_TYPES;
+}
+
/* Require argument ARGNO to be a 32-bit or 64-bit scalar integer type.
Return the associated type suffix on success, otherwise report an
error and return NUM_TYPE_SUFFIXES. */
@@ -1278,6 +1913,50 @@ function_resolver::infer_integer_scalar_type (unsigned int argno)
return NUM_TYPE_SUFFIXES;
}
+/* Return arguments ARGNO and ARGNO + 1 to be 64-bit scalar integers
+ of the same signedness, or be a combination that converts unambiguously
+ to such a pair. Return the associated type suffix if they are,
+ otherwise report an error and return NUM_TYPE_SUFFIXES. */
+type_suffix_index
+function_resolver::infer_64bit_scalar_integer_pair (unsigned int argno)
+{
+ /* Require two scalar integers, with one having 64 bits and the other
+ one being no bigger. */
+ tree types[] = { get_argument_type (argno), get_argument_type (argno + 1) };
+ if (!INTEGRAL_TYPE_P (types[0])
+ || !INTEGRAL_TYPE_P (types[1])
+ || MAX (TYPE_PRECISION (types[0]), TYPE_PRECISION (types[1])) != 64)
+ {
+ error_at (location, "passing %qT and %qT to arguments %d and %d of %qE,"
+ " which expects a pair of 64-bit integers", types[0], types[1],
+ argno + 1, argno + 2, fndecl);
+ return NUM_TYPE_SUFFIXES;
+ }
+
+ /* Allow signed integers smaller than int64_t to be paired with an int64_t.
+ Allow unsigned integers smaller than uint64_t to be paired with any
+ 64-bit integer. */
+ for (int i = 0; i < 2; ++i)
+ {
+ if (TYPE_PRECISION (types[i]) != 64)
+ continue;
+
+ if (TYPE_UNSIGNED (types[1 - i]) != TYPE_UNSIGNED (types[i]))
+ {
+ if (TYPE_PRECISION (types[1 - i]) == 64)
+ continue;
+ if (!TYPE_UNSIGNED (types[1 - i]))
+ continue;
+ }
+ return TYPE_UNSIGNED (types[i]) ? TYPE_SUFFIX_u64 : TYPE_SUFFIX_s64;
+ }
+
+ error_at (location, "passing mismatched integer types %qT and %qT"
+ " to arguments %d and %d of %qE", types[0], types[1],
+ argno + 1, argno + 2, fndecl);
+ return NUM_TYPE_SUFFIXES;
+}
+
/* Require argument ARGNO to be a pointer to a scalar type that has a
corresponding type suffix. Return that type suffix on success,
otherwise report an error and return NUM_TYPE_SUFFIXES.
@@ -1322,57 +2001,65 @@ function_resolver::infer_pointer_type (unsigned int argno,
return type;
}
-/* Require argument ARGNO to be a single vector or a tuple of NUM_VECTORS
- vectors; NUM_VECTORS is 1 for the former. Return the associated type
- suffix on success, using TYPE_SUFFIX_b for predicates. Report an error
- and return NUM_TYPE_SUFFIXES on failure. */
-type_suffix_index
-function_resolver::infer_vector_or_tuple_type (unsigned int argno,
- unsigned int num_vectors)
+/* If TYPE is an SVE predicate or vector type, or a tuple of such a type,
+ return the associated sve_type, otherwise return an invalid sve_type. */
+static sve_type
+find_sve_type (const_tree type)
{
- tree actual = get_argument_type (argno);
- if (actual == error_mark_node)
- return NUM_TYPE_SUFFIXES;
-
/* A linear search should be OK here, since the code isn't hot and
the number of types is only small. */
for (unsigned int size_i = 0; size_i < MAX_TUPLE_SIZE; ++size_i)
for (unsigned int suffix_i = 0; suffix_i < NUM_TYPE_SUFFIXES; ++suffix_i)
{
vector_type_index type_i = type_suffixes[suffix_i].vector_type;
- tree type = acle_vector_types[size_i][type_i];
- if (type && matches_type_p (type, actual))
- {
- if (size_i + 1 == num_vectors)
- return type_suffix_index (suffix_i);
-
- if (num_vectors == 1)
- error_at (location, "passing %qT to argument %d of %qE, which"
- " expects a single SVE vector rather than a tuple",
- actual, argno + 1, fndecl);
- else if (size_i == 0 && type_i != VECTOR_TYPE_svbool_t)
- /* num_vectors is always != 1, so the singular isn't needed. */
- error_n (location, num_vectors, "%qT%d%qE%d",
- "passing single vector %qT to argument %d"
- " of %qE, which expects a tuple of %d vectors",
- actual, argno + 1, fndecl, num_vectors);
- else
- /* num_vectors is always != 1, so the singular isn't needed. */
- error_n (location, num_vectors, "%qT%d%qE%d",
- "passing %qT to argument %d of %qE, which"
- " expects a tuple of %d vectors", actual, argno + 1,
- fndecl, num_vectors);
- return NUM_TYPE_SUFFIXES;
- }
+ tree this_type = acle_vector_types[size_i][type_i];
+ if (this_type && matches_type_p (this_type, type))
+ return { type_suffix_index (suffix_i), size_i + 1 };
}
- if (num_vectors == 1)
+ return {};
+}
+
+/* Require argument ARGNO to be an SVE type (i.e. something that can be
+ represented by sve_type). Return the (valid) type if it is, otherwise
+ report an error and return an invalid type. */
+sve_type
+function_resolver::infer_sve_type (unsigned int argno)
+{
+ tree actual = get_argument_type (argno);
+ if (actual == error_mark_node)
+ return {};
+
+ if (sve_type type = find_sve_type (actual))
+ return type;
+
+ if (scalar_argument_p (argno))
error_at (location, "passing %qT to argument %d of %qE, which"
- " expects an SVE vector type", actual, argno + 1, fndecl);
+ " expects an SVE type rather than a scalar type",
+ actual, argno + 1, fndecl);
else
error_at (location, "passing %qT to argument %d of %qE, which"
- " expects an SVE tuple type", actual, argno + 1, fndecl);
- return NUM_TYPE_SUFFIXES;
+ " expects an SVE type",
+ actual, argno + 1, fndecl);
+ return {};
+}
+
+/* Require argument ARGNO to be a single vector or a tuple of NUM_VECTORS
+ vectors; NUM_VECTORS is 1 for the former. Return the associated type
+ on success. Report an error on failure. */
+sve_type
+function_resolver::infer_vector_or_tuple_type (unsigned int argno,
+ unsigned int num_vectors)
+{
+ auto type = infer_sve_type (argno);
+ if (!type)
+ return type;
+
+ if (type.num_vectors == num_vectors)
+ return type;
+
+ report_incorrect_num_vectors (argno, type, num_vectors);
+ return {};
}
/* Require argument ARGNO to have some form of vector type. Return the
@@ -1381,7 +2068,9 @@ function_resolver::infer_vector_or_tuple_type (unsigned int argno,
type_suffix_index
function_resolver::infer_vector_type (unsigned int argno)
{
- return infer_vector_or_tuple_type (argno, 1);
+ if (auto type = infer_vector_or_tuple_type (argno, 1))
+ return type.type;
+ return NUM_TYPE_SUFFIXES;
}
/* Like infer_vector_type, but also require the type to be integral. */
@@ -1446,15 +2135,45 @@ function_resolver::infer_sd_vector_type (unsigned int argno)
/* If the function operates on tuples of vectors, require argument ARGNO to be
a tuple with the appropriate number of vectors, otherwise require it to be
- a single vector. Return the associated type suffix on success, using
- TYPE_SUFFIX_b for predicates. Report an error and return NUM_TYPE_SUFFIXES
+ a single vector. Return the associated type on success. Report an error
on failure. */
-type_suffix_index
+sve_type
function_resolver::infer_tuple_type (unsigned int argno)
{
return infer_vector_or_tuple_type (argno, vectors_per_tuple ());
}
+/* PRED_TYPE is the type of a governing predicate argument and DATA_TYPE
+ is the type of an argument that it predicates. Require the two types
+ to "agree": svcount_t must be used for multiple vectors and svbool_t
+ for single vectors.
+
+ Return true if they do agree, otherwise report an error and
+ return false. */
+bool function_resolver::
+require_matching_predicate_type (vector_type_index pred_type,
+ sve_type data_type)
+{
+ if (pred_type == VECTOR_TYPE_svbool_t && data_type.num_vectors == 1)
+ return true;
+
+ if (pred_type == VECTOR_TYPE_svcount_t && data_type.num_vectors != 1)
+ return true;
+
+ /* Make sure that FIRST_TYPE itself is sensible before using it
+ as a basis for an error message. */
+ if (resolve_to (mode_suffix_id, data_type) == error_mark_node)
+ return false;
+
+ if (data_type.num_vectors > 1)
+ error_at (location, "operations on multiple vectors must be predicated"
+ " by %qs rather than %qs", "svcount_t", "svbool_t");
+ else
+ error_at (location, "operations on single vectors must be predicated"
+ " by %qs rather than %qs", "svbool_t", "svcount_t");
+ return false;
+}
+
/* Require argument ARGNO to be a vector or scalar argument. Return true
if it is, otherwise report an appropriate error. */
bool
@@ -1495,29 +2214,37 @@ function_resolver::require_vector_type (unsigned int argno,
return true;
}
-/* Like require_vector_type, but TYPE is inferred from previous arguments
+/* Like require_vector_type, but TYPE is inferred from argument FIRST_ARGNO
rather than being a fixed part of the function signature. This changes
the nature of the error messages. */
bool
function_resolver::require_matching_vector_type (unsigned int argno,
- type_suffix_index type)
+ unsigned int first_argno,
+ sve_type type)
{
- type_suffix_index new_type = infer_vector_type (argno);
- if (new_type == NUM_TYPE_SUFFIXES)
+ sve_type new_type = infer_sve_type (argno);
+ if (!new_type)
return false;
+ if (type.num_vectors != new_type.num_vectors)
+ {
+ report_mismatched_num_vectors (first_argno, type, argno, new_type);
+ return false;
+ }
+
if (type != new_type)
{
error_at (location, "passing %qT to argument %d of %qE, but"
- " previous arguments had type %qT",
+ " argument %d had type %qT",
get_vector_type (new_type), argno + 1, fndecl,
- get_vector_type (type));
+ first_argno + 1, get_vector_type (type));
return false;
}
return true;
}
-/* Require argument ARGNO to be a vector type with the following properties:
+/* Require argument ARGNO to be a vector or tuple type with the following
+ properties:
- the type class must be the same as FIRST_TYPE's if EXPECTED_TCLASS
is SAME_TYPE_CLASS, otherwise it must be EXPECTED_TCLASS itself.
@@ -1529,6 +2256,9 @@ function_resolver::require_matching_vector_type (unsigned int argno,
- a quarter of FIRST_TYPE's if EXPECTED_BITS == QUARTER_SIZE
- EXPECTED_BITS itself otherwise
+ - the number of vectors must be the same as FIRST_TYPE's if
+ EXPECTED_NUM_VECTORS is zero, otherwise it must be EXPECTED_NUM_VECTORS.
+
Return true if the argument has the required type, otherwise report
an appropriate error.
@@ -1546,40 +2276,45 @@ function_resolver::require_matching_vector_type (unsigned int argno,
bool function_resolver::
require_derived_vector_type (unsigned int argno,
unsigned int first_argno,
- type_suffix_index first_type,
+ sve_type first_type,
type_class_index expected_tclass,
- unsigned int expected_bits)
+ unsigned int expected_bits,
+ unsigned int expected_num_vectors)
{
/* If the type needs to match FIRST_ARGNO exactly, use the preferred
- error message for that case. The VECTOR_TYPE_P test excludes tuple
- types, which we handle below instead. */
- bool both_vectors_p = VECTOR_TYPE_P (get_argument_type (first_argno));
- if (both_vectors_p
- && expected_tclass == SAME_TYPE_CLASS
- && expected_bits == SAME_SIZE)
+ error message for that case. */
+ if (expected_tclass == SAME_TYPE_CLASS
+ && expected_bits == SAME_SIZE
+ && expected_num_vectors == 0)
{
/* There's no need to resolve this case out of order. */
gcc_assert (argno > first_argno);
- return require_matching_vector_type (argno, first_type);
+ return require_matching_vector_type (argno, first_argno, first_type);
}
/* Use FIRST_TYPE to get the expected type class and element size. */
+ auto &first_type_suffix = type_suffixes[first_type.type];
type_class_index orig_expected_tclass = expected_tclass;
if (expected_tclass == NUM_TYPE_CLASSES)
- expected_tclass = type_suffixes[first_type].tclass;
+ expected_tclass = first_type_suffix.tclass;
unsigned int orig_expected_bits = expected_bits;
if (expected_bits == SAME_SIZE)
- expected_bits = type_suffixes[first_type].element_bits;
+ expected_bits = first_type_suffix.element_bits;
else if (expected_bits == HALF_SIZE)
- expected_bits = type_suffixes[first_type].element_bits / 2;
+ expected_bits = first_type_suffix.element_bits / 2;
else if (expected_bits == QUARTER_SIZE)
- expected_bits = type_suffixes[first_type].element_bits / 4;
+ expected_bits = first_type_suffix.element_bits / 4;
+
+ unsigned int orig_expected_num_vectors = expected_num_vectors;
+ if (expected_num_vectors == 0)
+ expected_num_vectors = first_type.num_vectors;
/* If the expected type doesn't depend on FIRST_TYPE at all,
just check for the fixed choice of vector type. */
if (expected_tclass == orig_expected_tclass
- && expected_bits == orig_expected_bits)
+ && expected_bits == orig_expected_bits
+ && orig_expected_num_vectors == 1)
{
const type_suffix_info &expected_suffix
= type_suffixes[find_type_suffix (expected_tclass, expected_bits)];
@@ -1588,13 +2323,44 @@ require_derived_vector_type (unsigned int argno,
/* Require the argument to be some form of SVE vector type,
without being specific about the type of vector we want. */
- type_suffix_index actual_type = infer_vector_type (argno);
- if (actual_type == NUM_TYPE_SUFFIXES)
+ sve_type actual_type = infer_sve_type (argno);
+ if (!actual_type)
return false;
+ if (actual_type.num_vectors != expected_num_vectors)
+ {
+ if (orig_expected_num_vectors == 0)
+ report_mismatched_num_vectors (first_argno, first_type,
+ argno, actual_type);
+ else
+ report_incorrect_num_vectors (argno, actual_type,
+ expected_num_vectors);
+ return false;
+ }
+
+ if (orig_expected_tclass == SAME_TYPE_CLASS
+ && orig_expected_bits == SAME_SIZE)
+ {
+ if (actual_type.type == first_type.type)
+ return true;
+
+ if (first_type.num_vectors > 1)
+ error_at (location, "passing %qT to argument %d of %qE, but"
+ " argument %d was a tuple of %qT",
+ get_vector_type (actual_type), argno + 1, fndecl,
+ first_argno + 1, get_vector_type (first_type.type));
+ else
+ error_at (location, "passing %qT to argument %d of %qE, but"
+ " argument %d had type %qT",
+ get_vector_type (actual_type), argno + 1, fndecl,
+ first_argno + 1, get_vector_type (first_type));
+ return false;
+ }
+
/* Exit now if we got the right type. */
- bool tclass_ok_p = (type_suffixes[actual_type].tclass == expected_tclass);
- bool size_ok_p = (type_suffixes[actual_type].element_bits == expected_bits);
+ auto &actual_type_suffix = type_suffixes[actual_type.type];
+ bool tclass_ok_p = (actual_type_suffix.tclass == expected_tclass);
+ bool size_ok_p = (actual_type_suffix.element_bits == expected_bits);
if (tclass_ok_p && size_ok_p)
return true;
@@ -1602,10 +2368,16 @@ require_derived_vector_type (unsigned int argno,
size requirement, without having to refer to FIRST_TYPE. */
if (!size_ok_p && expected_bits == orig_expected_bits)
{
- error_at (location, "passing %qT to argument %d of %qE, which"
- " expects a vector of %d-bit elements",
- get_vector_type (actual_type), argno + 1, fndecl,
- expected_bits);
+ if (expected_num_vectors == 1)
+ error_at (location, "passing %qT to argument %d of %qE, which"
+ " expects a vector of %d-bit elements",
+ get_vector_type (actual_type), argno + 1, fndecl,
+ expected_bits);
+ else
+ error_at (location, "passing %qT to argument %d of %qE, which"
+ " expects vectors of %d-bit elements",
+ get_vector_type (actual_type), argno + 1, fndecl,
+ expected_bits);
return false;
}
@@ -1614,16 +2386,30 @@ require_derived_vector_type (unsigned int argno,
translation work for other type classes. */
if (!tclass_ok_p && orig_expected_tclass == TYPE_signed)
{
- error_at (location, "passing %qT to argument %d of %qE, which"
- " expects a vector of signed integers",
- get_vector_type (actual_type), argno + 1, fndecl);
+ if (expected_num_vectors == 1)
+ error_at (location, "passing %qT to argument %d of %qE, which"
+ " expects a vector of signed integers",
+ get_vector_type (actual_type), argno + 1, fndecl);
+ else
+ /* Translation note: could also be written "expects a tuple of
+ signed integer vectors". */
+ error_at (location, "passing %qT to argument %d of %qE, which"
+ " expects vectors of signed integers",
+ get_vector_type (actual_type), argno + 1, fndecl);
return false;
}
if (!tclass_ok_p && orig_expected_tclass == TYPE_unsigned)
{
- error_at (location, "passing %qT to argument %d of %qE, which"
- " expects a vector of unsigned integers",
- get_vector_type (actual_type), argno + 1, fndecl);
+ if (expected_num_vectors == 1)
+ error_at (location, "passing %qT to argument %d of %qE, which"
+ " expects a vector of unsigned integers",
+ get_vector_type (actual_type), argno + 1, fndecl);
+ else
+ /* Translation note: could also be written "expects a tuple of
+ unsigned integer vectors". */
+ error_at (location, "passing %qT to argument %d of %qE, which"
+ " expects vectors of unsigned integers",
+ get_vector_type (actual_type), argno + 1, fndecl);
return false;
}
@@ -1634,7 +2420,7 @@ require_derived_vector_type (unsigned int argno,
/* If the arguments have consistent type classes, but a link between
the sizes has been broken, try to describe the error in those terms. */
- if (both_vectors_p && tclass_ok_p && orig_expected_bits == SAME_SIZE)
+ if (tclass_ok_p && orig_expected_bits == SAME_SIZE)
{
if (argno < first_argno)
{
@@ -1651,11 +2437,10 @@ require_derived_vector_type (unsigned int argno,
/* Likewise in reverse: look for cases in which the sizes are consistent
but a link between the type classes has been broken. */
- if (both_vectors_p
- && size_ok_p
+ if (size_ok_p
&& orig_expected_tclass == SAME_TYPE_CLASS
- && type_suffixes[first_type].integer_p
- && type_suffixes[actual_type].integer_p)
+ && first_type_suffix.integer_p
+ && actual_type_suffix.integer_p)
{
if (argno < first_argno)
{
@@ -1712,9 +2497,28 @@ function_resolver::require_scalar_type (unsigned int argno,
{
if (!scalar_argument_p (argno))
{
+ if (expected)
+ error_at (location, "passing %qT to argument %d of %qE, which"
+ " expects %qs", get_argument_type (argno), argno + 1,
+ fndecl, expected);
+ return false;
+ }
+ return true;
+}
+
+/* Require argument ARGNO to be a nonscalar type, given that it has already
+ passed require_vector_or_scalar_type. Return true if it is, otherwise
+ report an error. This is used when two sets of instructions share the
+ same overloaded function and one accepts scalars while the other
+ doesn't. */
+bool
+function_resolver::require_nonscalar_type (unsigned int argno)
+{
+ if (scalar_argument_p (argno))
+ {
error_at (location, "passing %qT to argument %d of %qE, which"
- " expects %qs", get_argument_type (argno), argno + 1,
- fndecl, expected);
+ " does not accept scalars for this combination of arguments",
+ get_argument_type (argno), argno + 1, fndecl);
return false;
}
return true;
@@ -2141,14 +2945,15 @@ bool
function_resolver::check_gp_argument (unsigned int nops,
unsigned int &i, unsigned int &nargs)
{
+ gcc_assert (pred != PRED_za_m);
i = 0;
if (pred != PRED_none)
{
/* Unary merge operations should use resolve_unary instead. */
- gcc_assert (nops != 1 || pred != PRED_m);
+ gcc_assert (!shape->has_merge_argument_p (*this, nops));
nargs = nops + 1;
if (!check_num_arguments (nargs)
- || !require_vector_type (i, VECTOR_TYPE_svbool_t))
+ || !require_vector_type (i, gp_type_index ()))
return false;
i += 1;
}
@@ -2218,6 +3023,58 @@ finish_opt_n_resolution (unsigned int argno, unsigned int first_argno,
return resolve_to (mode_suffix_id, inferred_type);
}
+/* Finish resolving a function whose final argument can be a tuple
+ or a vector, with the function having an implicit "_single" suffix
+ in the latter case. This "_single" form might only exist for certain
+ type suffixes.
+
+ ARGNO is the index of the final argument. The inferred type suffix
+ was obtained from argument FIRST_ARGNO, which has type FIRST_TYPE.
+ EXPECTED_TCLASS gives the expected type class for the final tuple
+ or vector.
+
+ Return the function decl of the resolved function on success,
+ otherwise report a suitable error and return error_mark_node. */
+tree function_resolver::
+finish_opt_single_resolution (unsigned int argno, unsigned int first_argno,
+ sve_type first_type,
+ type_class_index expected_tclass)
+{
+ sve_type new_type = infer_sve_type (argno);
+ if (!new_type)
+ return error_mark_node;
+
+ /* If the type is a tuple, require it to match the group suffix. */
+ unsigned int num_vectors = vectors_per_tuple ();
+ if (num_vectors != 1
+ && new_type.num_vectors != 1
+ && new_type.num_vectors != num_vectors)
+ {
+ report_incorrect_num_vectors (argno, new_type, num_vectors);
+ return error_mark_node;
+ }
+
+ auto expected_num_vectors = (new_type.num_vectors == 1 ? 1 : 0);
+ if (!require_derived_vector_type (argno, first_argno, first_type,
+ expected_tclass, SAME_SIZE,
+ expected_num_vectors))
+ return error_mark_node;
+
+ if (new_type.num_vectors == 1 && first_type.num_vectors > 1)
+ {
+ if (tree single_form = lookup_form (MODE_single, first_type))
+ return single_form;
+
+ if (resolve_to (mode_suffix_id, first_type) != error_mark_node)
+ error_at (location, "passing %qT to argument %d of %qE, but its"
+ " %qT form does not accept single vectors",
+ get_vector_type (new_type), argno + 1, fndecl,
+ get_vector_type (first_type));
+ return error_mark_node;
+ }
+ return resolve_to (mode_suffix_id, first_type);
+}
+
/* Resolve a (possibly predicated) unary function. If the function uses
merge predication or if TREAT_AS_MERGE_P is true, there is an extra
vector argument before the governing predicate that specifies the
@@ -2248,7 +3105,7 @@ function_resolver::resolve_unary (type_class_index merge_tclass,
so we can use normal left-to-right resolution. */
if ((type = infer_vector_type (0)) == NUM_TYPE_SUFFIXES
|| !require_vector_type (1, VECTOR_TYPE_svbool_t)
- || !require_matching_vector_type (2, type))
+ || !require_matching_vector_type (2, 0, type))
return error_mark_node;
}
else
@@ -2293,9 +3150,9 @@ function_resolver::resolve_uniform (unsigned int nops, unsigned int nimm)
|| (type = infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
return error_mark_node;
- i += 1;
+ unsigned int first_arg = i++;
for (; i < nargs - nimm; ++i)
- if (!require_matching_vector_type (i, type))
+ if (!require_matching_vector_type (i, first_arg, type))
return error_mark_node;
for (; i < nargs; ++i)
@@ -2324,7 +3181,7 @@ function_resolver::resolve_uniform_opt_n (unsigned int nops)
unsigned int first_arg = i++;
for (; i < nargs - 1; ++i)
- if (!require_matching_vector_type (i, type))
+ if (!require_matching_vector_type (i, first_arg, type))
return error_mark_node;
return finish_opt_n_resolution (i, first_arg, type);
@@ -2346,9 +3203,7 @@ function_checker::function_checker (location_t location,
unsigned int nargs, tree *args)
: function_call_info (location, instance, fndecl),
m_fntype (fntype), m_nargs (nargs), m_args (args),
- /* We don't have to worry about unary _m operations here, since they
- never have arguments that need checking. */
- m_base_arg (pred != PRED_none ? 1 : 0)
+ m_base_arg (pred != PRED_none && pred != PRED_za_m ? 1 : 0)
{
}
@@ -2404,7 +3259,7 @@ function_checker::require_immediate_either_or (unsigned int rel_argno,
if (actual != value0 && actual != value1)
{
- report_neither_nor (location, fndecl, argno, actual, 90, 270);
+ report_neither_nor (location, fndecl, argno, actual, value0, value1);
return false;
}
@@ -2440,20 +3295,26 @@ function_checker::require_immediate_enum (unsigned int rel_argno, tree type)
return false;
}
-/* Check that argument REL_ARGNO is suitable for indexing argument
- REL_ARGNO - 1, in groups of GROUP_SIZE elements. REL_ARGNO counts
- from the end of the predication arguments. */
+/* The intrinsic conceptually divides vector argument REL_VEC_ARGNO into
+ groups of GROUP_SIZE elements. Return true if argument REL_ARGNO is
+ a suitable constant index for selecting one of these groups. The
+ selection happens within a 128-bit quadword, rather than the whole vector.
+
+ REL_ARGNO and REL_VEC_ARGNO count from the end of the predication
+ arguments. */
bool
function_checker::require_immediate_lane_index (unsigned int rel_argno,
+ unsigned int rel_vec_argno,
unsigned int group_size)
{
unsigned int argno = m_base_arg + rel_argno;
if (!argument_exists_p (argno))
return true;
- /* Get the type of the previous argument. tree_argument_type wants a
- 1-based number, whereas ARGNO is 0-based. */
- machine_mode mode = TYPE_MODE (type_argument_type (m_fntype, argno));
+ /* Get the type of the vector argument. tree_argument_type wants a
+ 1-based number, whereas VEC_ARGNO is 0-based. */
+ unsigned int vec_argno = m_base_arg + rel_vec_argno;
+ machine_mode mode = TYPE_MODE (type_argument_type (m_fntype, vec_argno + 1));
gcc_assert (VECTOR_MODE_P (mode));
unsigned int nlanes = 128 / (group_size * GET_MODE_UNIT_BITSIZE (mode));
return require_immediate_range (rel_argno, 0, nlanes - 1);
@@ -2541,6 +3402,37 @@ function_checker::check ()
return shape->check (*this);
}
+/* Return true if V is a vector constant and if, for every in-range integer I,
+ element STEP*I is equal to element 0. */
+bool
+vector_cst_all_same (tree v, unsigned int step)
+{
+ if (TREE_CODE (v) != VECTOR_CST)
+ return false;
+
+ /* VECTOR_CST_NELTS_PER_PATTERN applies to any multiple of
+ VECTOR_CST_NPATTERNS. */
+ unsigned int lcm = least_common_multiple (step, VECTOR_CST_NPATTERNS (v));
+ unsigned int nelts = lcm * VECTOR_CST_NELTS_PER_PATTERN (v);
+ tree first_el = VECTOR_CST_ENCODED_ELT (v, 0);
+ for (unsigned int i = 0; i < nelts; i += step)
+ if (!operand_equal_p (VECTOR_CST_ENCODED_ELT (v, i), first_el, 0))
+ return false;
+
+ return true;
+}
+
+/* Return true if V is a constant predicate that acts as a ptrue when
+ predicating STEP-byte elements. */
+bool
+is_ptrue (tree v, unsigned int step)
+{
+ return (TREE_CODE (v) == VECTOR_CST
+ && TYPE_MODE (TREE_TYPE (v)) == VNx16BImode
+ && integer_nonzerop (VECTOR_CST_ENCODED_ELT (v, 0))
+ && vector_cst_all_same (v, step));
+}
+
gimple_folder::gimple_folder (const function_instance &instance, tree fndecl,
gimple_stmt_iterator *gsi_in, gcall *call_in)
: function_call_info (gimple_location (call_in), instance, fndecl),
@@ -2615,6 +3507,37 @@ gimple_folder::redirect_call (const function_instance &instance)
return call;
}
+/* Redirect _z and _m calls to _x functions if the predicate is all-true.
+ This allows us to use unpredicated instructions, where available. */
+gimple *
+gimple_folder::redirect_pred_x ()
+{
+ if (pred != PRED_z && pred != PRED_m)
+ return nullptr;
+
+ if (gimple_call_num_args (call) < 2)
+ return nullptr;
+
+ tree lhs_type = TREE_TYPE (TREE_TYPE (fndecl));
+ tree arg0_type = type_argument_type (TREE_TYPE (fndecl), 1);
+ tree arg1_type = type_argument_type (TREE_TYPE (fndecl), 2);
+ if (!VECTOR_TYPE_P (lhs_type)
+ || !VECTOR_TYPE_P (arg0_type)
+ || !VECTOR_TYPE_P (arg1_type))
+ return nullptr;
+
+ auto lhs_step = element_precision (lhs_type);
+ auto rhs_step = element_precision (arg1_type);
+ auto step = MAX (lhs_step, rhs_step);
+ if (!multiple_p (step, BITS_PER_UNIT)
+ || !is_ptrue (gimple_call_arg (call, 0), step / BITS_PER_UNIT))
+ return nullptr;
+
+ function_instance instance (*this);
+ instance.pred = PRED_x;
+ return redirect_call (instance);
+}
+
/* Fold the call to constant VAL. */
gimple *
gimple_folder::fold_to_cstu (poly_uint64 val)
@@ -2687,6 +3610,10 @@ gimple_folder::fold ()
if (!lhs && TREE_TYPE (gimple_call_fntype (call)) != void_type_node)
return NULL;
+ /* First try some simplifications that are common to many functions. */
+ if (auto *call = redirect_pred_x ())
+ return call;
+
return base->fold (*this);
}
@@ -2702,7 +3629,7 @@ function_expander::function_expander (const function_instance &instance,
insn_code
function_expander::direct_optab_handler (optab op, unsigned int suffix_i)
{
- return ::direct_optab_handler (op, vector_mode (suffix_i));
+ return ::direct_optab_handler (op, tuple_mode (suffix_i));
}
/* Choose between signed and unsigned direct optabs SIGNED_OP and
@@ -2741,21 +3668,51 @@ function_expander::convert_to_pmode (rtx x)
}
/* Return the base address for a contiguous load or store function.
- MEM_MODE is the mode of the addressed memory. */
+ MEM_MODE is the mode of the addressed memory, BASE_ARGNO is
+ the index of the base argument, and VNUM_ARGNO is the index of
+ the vnum offset argument (if any). VL_ISA_MODE is AARCH64_FL_SM_ON
+ if the vnum argument is a factor of the SME vector length, 0 if it
+ is a factor of the current prevailing vector length. */
rtx
-function_expander::get_contiguous_base (machine_mode mem_mode)
+function_expander::get_contiguous_base (machine_mode mem_mode,
+ unsigned int base_argno,
+ unsigned int vnum_argno,
+ aarch64_feature_flags vl_isa_mode)
{
- rtx base = convert_to_pmode (args[1]);
+ rtx base = convert_to_pmode (args[base_argno]);
if (mode_suffix_id == MODE_vnum)
{
- /* Use the size of the memory mode for extending loads and truncating
- stores. Use the size of a full vector for non-extending loads
- and non-truncating stores (including svld[234] and svst[234]). */
- poly_int64 size = ordered_min (GET_MODE_SIZE (mem_mode),
- BYTES_PER_SVE_VECTOR);
- rtx offset = gen_int_mode (size, Pmode);
- offset = simplify_gen_binary (MULT, Pmode, args[2], offset);
- base = simplify_gen_binary (PLUS, Pmode, base, offset);
+ rtx vnum = args[vnum_argno];
+ if (vnum != const0_rtx)
+ {
+ /* Use the size of the memory mode for extending loads and truncating
+ stores. Use the size of a full vector for non-extending loads
+ and non-truncating stores (including svld[234] and svst[234]). */
+ poly_int64 size = ordered_min (GET_MODE_SIZE (mem_mode),
+ BYTES_PER_SVE_VECTOR);
+ rtx offset;
+ if ((vl_isa_mode & AARCH64_FL_SM_ON)
+ && !TARGET_STREAMING
+ && !size.is_constant ())
+ {
+ gcc_assert (known_eq (size, BYTES_PER_SVE_VECTOR));
+ if (CONST_INT_P (vnum) && IN_RANGE (INTVAL (vnum), -32, 31))
+ offset = aarch64_sme_vq_immediate (Pmode, INTVAL (vnum) * 16,
+ AARCH64_ISA_MODE);
+ else
+ {
+ offset = aarch64_sme_vq_immediate (Pmode, 16,
+ AARCH64_ISA_MODE);
+ offset = simplify_gen_binary (MULT, Pmode, vnum, offset);
+ }
+ }
+ else
+ {
+ offset = gen_int_mode (size, Pmode);
+ offset = simplify_gen_binary (MULT, Pmode, vnum, offset);
+ }
+ base = simplify_gen_binary (PLUS, Pmode, base, offset);
+ }
}
return base;
}
@@ -2783,7 +3740,7 @@ function_expander::get_fallback_value (machine_mode mode, unsigned int nops,
gcc_assert (pred == PRED_m || pred == PRED_x);
if (merge_argno == DEFAULT_MERGE_ARGNO)
- merge_argno = nops == 1 && pred == PRED_m ? 0 : 1;
+ merge_argno = shape->has_merge_argument_p (*this, nops) ? 0 : 1;
if (merge_argno == 0)
return args[argno++];
@@ -2796,7 +3753,7 @@ function_expander::get_fallback_value (machine_mode mode, unsigned int nops,
rtx
function_expander::get_reg_target ()
{
- machine_mode target_mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl)));
+ machine_mode target_mode = result_mode ();
if (!possible_target || GET_MODE (possible_target) != target_mode)
possible_target = gen_reg_rtx (target_mode);
return possible_target;
@@ -2843,11 +3800,18 @@ function_expander::add_input_operand (insn_code icode, rtx x)
machine_mode mode = operand.mode;
if (mode == VOIDmode)
{
- /* The only allowable use of VOIDmode is the wildcard
- aarch64_any_register_operand, which is used to avoid
- combinatorial explosion in the reinterpret patterns. */
- gcc_assert (operand.predicate == aarch64_any_register_operand);
- mode = GET_MODE (x);
+ /* The only allowable uses of VOIDmode are:
+
+ - the wildcard aarch64_any_register_operand, which is used
+ to avoid combinatorial explosion in the reinterpret patterns
+
+ - pmode_register_operand, which always has mode Pmode. */
+ if (operand.predicate == aarch64_any_register_operand)
+ mode = GET_MODE (x);
+ else if (operand.predicate == pmode_register_operand)
+ mode = Pmode;
+ else
+ gcc_unreachable ();
}
else if (!VECTOR_MODE_P (GET_MODE (x)) && VECTOR_MODE_P (mode))
x = expand_vector_broadcast (mode, x);
@@ -2862,7 +3826,7 @@ function_expander::add_input_operand (insn_code icode, rtx x)
/* Add an integer operand with value X to the instruction. */
void
-function_expander::add_integer_operand (HOST_WIDE_INT x)
+function_expander::add_integer_operand (poly_int64 x)
{
m_ops.safe_grow (m_ops.length () + 1, true);
create_integer_operand (&m_ops.last (), x);
@@ -3100,7 +4064,8 @@ function_expander::use_pred_x_insn (insn_code icode)
has_float_operand_p = true;
}
- if (has_float_operand_p)
+ if (has_float_operand_p
+ && insn_data[icode].n_operands > (int) nops + 2)
{
/* Add a flag that indicates whether unpredicated instructions
are allowed. */
@@ -3233,7 +4198,8 @@ function_expander::use_contiguous_store_insn (insn_code icode)
- CODE_FOR_SINT for signed integers
- CODE_FOR_UINT for unsigned integers
- - UNSPEC_FOR_FP for floating-point values
+ - UNSPEC_FOR_COND_FP for predicated floating-point
+ - UNSPEC_FOR_UNCOND_FP for unpredicated floating-point
and where <code_optab> is like <optab>, but uses CODE_FOR_SINT instead
of UNSPEC_FOR_FP for floating-point values.
@@ -3243,13 +4209,24 @@ function_expander::use_contiguous_store_insn (insn_code icode)
rtx
function_expander::map_to_rtx_codes (rtx_code code_for_sint,
rtx_code code_for_uint,
- int unspec_for_fp,
+ int unspec_for_cond_fp,
+ int unspec_for_uncond_fp,
unsigned int merge_argno)
{
- machine_mode mode = vector_mode (0);
+ machine_mode mode = tuple_mode (0);
rtx_code code = (type_suffix (0).unsigned_p ? code_for_uint : code_for_sint);
insn_code icode;
+ if (mode_suffix_id == MODE_single)
+ {
+ gcc_assert (pred == PRED_none);
+ if (type_suffix (0).integer_p)
+ icode = code_for_aarch64_sve_single (code, mode);
+ else
+ icode = code_for_aarch64_sve_single (unspec_for_uncond_fp, mode);
+ return use_exact_insn (icode);
+ }
+
/* Handle predicate logic operations, which always use _z predication. */
if (type_suffix (0).tclass == TYPE_bool)
{
@@ -3264,7 +4241,7 @@ function_expander::map_to_rtx_codes (rtx_code code_for_sint,
if (type_suffix (0).integer_p)
icode = maybe_code_for_aarch64_pred (code, mode);
else
- icode = maybe_code_for_aarch64_pred (unspec_for_fp, mode);
+ icode = maybe_code_for_aarch64_pred (unspec_for_cond_fp, mode);
if (icode != CODE_FOR_nothing)
return use_pred_x_insn (icode);
}
@@ -3273,7 +4250,10 @@ function_expander::map_to_rtx_codes (rtx_code code_for_sint,
Floating-point operations conventionally use the signed rtx code. */
if (pred == PRED_none || pred == PRED_x)
{
- icode = direct_optab_handler (code_to_optab (code), 0);
+ if (type_suffix (0).float_p && unspec_for_uncond_fp >= 0)
+ icode = maybe_code_for_aarch64_sve (unspec_for_uncond_fp, mode);
+ else
+ icode = direct_optab_handler (code_to_optab (code), 0);
if (icode == CODE_FOR_nothing)
icode = code_for_aarch64_sve (code, mode);
return use_unpred_insn (icode);
@@ -3283,7 +4263,7 @@ function_expander::map_to_rtx_codes (rtx_code code_for_sint,
if (type_suffix (0).integer_p)
icode = code_for_cond (code, mode);
else
- icode = code_for_cond (unspec_for_fp, mode);
+ icode = code_for_cond (unspec_for_cond_fp, mode);
return use_cond_insn (icode, merge_argno);
}
@@ -3309,11 +4289,17 @@ rtx
function_expander::map_to_unspecs (int unspec_for_sint, int unspec_for_uint,
int unspec_for_fp, unsigned int merge_argno)
{
- machine_mode mode = vector_mode (0);
+ machine_mode mode = tuple_mode (0);
int unspec = (!type_suffix (0).integer_p ? unspec_for_fp
: type_suffix (0).unsigned_p ? unspec_for_uint
: unspec_for_sint);
+ if (mode_suffix_id == MODE_single)
+ {
+ gcc_assert (pred == PRED_none);
+ return use_exact_insn (code_for_aarch64_sve_single (unspec, mode));
+ }
+
if (pred == PRED_x)
{
insn_code icode = maybe_code_for_aarch64_pred (unspec, mode);
@@ -3344,6 +4330,49 @@ function_expander::expand ()
return base->expand (*this);
}
+/* Return a structure type that contains a single field of type FIELD_TYPE.
+ The field is called __val, but that's an internal detail rather than
+ an exposed part of the API. */
+static tree
+wrap_type_in_struct (tree field_type)
+{
+ tree field = build_decl (input_location, FIELD_DECL,
+ get_identifier ("__val"), field_type);
+ tree struct_type = lang_hooks.types.make_type (RECORD_TYPE);
+ DECL_FIELD_CONTEXT (field) = struct_type;
+ TYPE_FIELDS (struct_type) = field;
+ make_type_sizeless (struct_type);
+ layout_type (struct_type);
+ return struct_type;
+}
+
+/* Register a built-in TYPE_DECL called NAME for TYPE. This is used/needed
+ when TYPE is a structure type. */
+static void
+register_type_decl (tree type, const char *name)
+{
+ tree decl = build_decl (input_location, TYPE_DECL,
+ get_identifier (name), type);
+ TYPE_NAME (type) = decl;
+ TYPE_STUB_DECL (type) = decl;
+ lang_hooks.decls.pushdecl (decl);
+ /* ??? Undo the effect of set_underlying_type for C. The C frontend
+ doesn't recognize DECL as a built-in because (as intended) the decl has
+ a real location instead of BUILTINS_LOCATION. The frontend therefore
+ treats the decl like a normal C "typedef struct foo foo;", expecting
+ the type for tag "struct foo" to have a dummy unnamed TYPE_DECL instead
+ of the named one we attached above. It then sets DECL_ORIGINAL_TYPE
+ on the supposedly unnamed decl, creating a circularity that upsets
+ dwarf2out.
+
+ We don't want to follow the normal C model and create "struct foo"
+ tags for tuple types since (a) the types are supposed to be opaque
+ and (b) they couldn't be defined as a real struct anyway. Treating
+ the TYPE_DECLs as "typedef struct foo foo;" without creating
+ "struct foo" would lead to confusing error messages. */
+ DECL_ORIGINAL_TYPE (decl) = NULL_TREE;
+}
+
/* Register the built-in SVE ABI types, such as __SVBool_t. */
static void
register_builtin_types ()
@@ -3354,48 +4383,63 @@ register_builtin_types ()
for (unsigned int i = 0; i < NUM_VECTOR_TYPES; ++i)
{
- tree eltype = scalar_types[i];
tree vectype;
unsigned int num_zr = 0, num_pr = 0;
- if (eltype == boolean_type_node)
+ if (vector_type_index (i) == VECTOR_TYPE_svcount_t)
{
- vectype = build_truth_vector_type_for_mode (BYTES_PER_SVE_VECTOR,
- VNx16BImode);
- gcc_assert (TYPE_MODE (vectype) == VNx16BImode
- && TYPE_MODE (vectype) == TYPE_MODE_RAW (vectype)
- && TYPE_ALIGN (vectype) == 16
- && known_eq (wi::to_poly_offset (TYPE_SIZE (vectype)),
- BYTES_PER_SVE_VECTOR));
+ vectype = abi_vector_types[VECTOR_TYPE_svbool_t];
+ vectype = wrap_type_in_struct (vectype);
num_pr = 1;
}
else
{
- scalar_mode elmode = SCALAR_TYPE_MODE (eltype);
- unsigned int elbytes = GET_MODE_SIZE (elmode);
- poly_uint64 nunits = exact_div (BYTES_PER_SVE_VECTOR, elbytes);
- machine_mode mode
- = aarch64_sve_data_mode (elmode, nunits).require ();
- vectype = build_vector_type_for_mode (eltype, mode);
- gcc_assert (VECTOR_MODE_P (TYPE_MODE (vectype))
- && TYPE_MODE (vectype) == mode
- && TYPE_MODE_RAW (vectype) == mode
- && TYPE_ALIGN (vectype) == 128
- && known_eq (wi::to_poly_offset (TYPE_SIZE (vectype)),
- BITS_PER_SVE_VECTOR));
- num_zr = 1;
+ tree eltype = scalar_types[i];
+ if (eltype == boolean_type_node)
+ {
+ vectype = build_truth_vector_type_for_mode (BYTES_PER_SVE_VECTOR,
+ VNx16BImode);
+ num_pr = 1;
+ }
+ else
+ {
+ scalar_mode elmode = SCALAR_TYPE_MODE (eltype);
+ unsigned int elbytes = GET_MODE_SIZE (elmode);
+ poly_uint64 nunits = exact_div (BYTES_PER_SVE_VECTOR, elbytes);
+ machine_mode mode
+ = aarch64_sve_data_mode (elmode, nunits).require ();
+ vectype = build_vector_type_for_mode (eltype, mode);
+ auto size = wi::to_poly_offset (TYPE_SIZE (vectype));
+ gcc_assert (VECTOR_MODE_P (TYPE_MODE (vectype))
+ && TYPE_MODE (vectype) == mode
+ && TYPE_MODE_RAW (vectype) == mode
+ && TYPE_ALIGN (vectype) == 128
+ && known_eq (size, BITS_PER_SVE_VECTOR));
+ num_zr = 1;
+ }
+ vectype = build_distinct_type_copy (vectype);
+ gcc_assert (vectype == TYPE_MAIN_VARIANT (vectype));
+ SET_TYPE_STRUCTURAL_EQUALITY (vectype);
+ TYPE_ARTIFICIAL (vectype) = 1;
+ TYPE_INDIVISIBLE_P (vectype) = 1;
+ make_type_sizeless (vectype);
+ }
+ if (num_pr)
+ {
+ auto size = wi::to_poly_offset (TYPE_SIZE (vectype));
+ gcc_assert (TYPE_MODE (vectype) == VNx16BImode
+ && TYPE_MODE (vectype) == TYPE_MODE_RAW (vectype)
+ && TYPE_ALIGN (vectype) == 16
+ && known_eq (size, BYTES_PER_SVE_VECTOR));
}
- vectype = build_distinct_type_copy (vectype);
- gcc_assert (vectype == TYPE_MAIN_VARIANT (vectype));
- SET_TYPE_STRUCTURAL_EQUALITY (vectype);
- TYPE_ARTIFICIAL (vectype) = 1;
- TYPE_INDIVISIBLE_P (vectype) = 1;
add_sve_type_attribute (vectype, num_zr, num_pr,
vector_types[i].mangled_name,
vector_types[i].acle_name);
- make_type_sizeless (vectype);
abi_vector_types[i] = vectype;
- lang_hooks.types.register_builtin_type (vectype,
- vector_types[i].abi_name);
+ if (TREE_CODE (vectype) == RECORD_TYPE)
+ register_type_decl (vectype, vector_types[i].abi_name);
+ else
+ lang_hooks.types.register_builtin_type (vectype,
+ vector_types[i].abi_name);
}
}
@@ -3407,7 +4451,10 @@ init_builtins ()
sve_switcher sve;
register_builtin_types ();
if (in_lto_p)
- handle_arm_sve_h ();
+ {
+ handle_arm_sve_h ();
+ handle_arm_sme_h ();
+ }
}
/* Register vector type TYPE under its arm_sve.h name. */
@@ -3436,7 +4483,8 @@ register_vector_type (vector_type_index type)
static void
register_tuple_type (unsigned int num_vectors, vector_type_index type)
{
- tree tuple_type = lang_hooks.types.make_type (RECORD_TYPE);
+ tree vector_type = acle_vector_types[0][type];
+ bool is_pred = GET_MODE_CLASS (TYPE_MODE (vector_type)) == MODE_VECTOR_BOOL;
/* Work out the structure name. */
char buffer[sizeof ("svbfloat16x4_t")];
@@ -3458,43 +4506,21 @@ register_tuple_type (unsigned int num_vectors, vector_type_index type)
Using arrays simplifies the handling of svget and svset for variable
arguments. */
- tree vector_type = acle_vector_types[0][type];
tree array_type = build_array_type_nelts (vector_type, num_vectors);
gcc_assert (VECTOR_MODE_P (TYPE_MODE (array_type))
&& TYPE_MODE_RAW (array_type) == TYPE_MODE (array_type)
- && TYPE_ALIGN (array_type) == 128);
+ && TYPE_ALIGN (array_type) == (is_pred ? 16 : 128));
- tree field = build_decl (input_location, FIELD_DECL,
- get_identifier ("__val"), array_type);
- DECL_FIELD_CONTEXT (field) = tuple_type;
- TYPE_FIELDS (tuple_type) = field;
- add_sve_type_attribute (tuple_type, num_vectors, 0, NULL, buffer);
- make_type_sizeless (tuple_type);
- layout_type (tuple_type);
+ tree tuple_type = wrap_type_in_struct (array_type);
+ if (is_pred)
+ add_sve_type_attribute (tuple_type, 0, num_vectors, NULL, buffer);
+ else
+ add_sve_type_attribute (tuple_type, num_vectors, 0, NULL, buffer);
gcc_assert (VECTOR_MODE_P (TYPE_MODE (tuple_type))
&& TYPE_MODE_RAW (tuple_type) == TYPE_MODE (tuple_type)
- && TYPE_ALIGN (tuple_type) == 128);
-
- tree decl = build_decl (input_location, TYPE_DECL,
- get_identifier (buffer), tuple_type);
- TYPE_NAME (tuple_type) = decl;
- TYPE_STUB_DECL (tuple_type) = decl;
- lang_hooks.decls.pushdecl (decl);
- /* ??? Undo the effect of set_underlying_type for C. The C frontend
- doesn't recognize DECL as a built-in because (as intended) the decl has
- a real location instead of BUILTINS_LOCATION. The frontend therefore
- treats the decl like a normal C "typedef struct foo foo;", expecting
- the type for tag "struct foo" to have a dummy unnamed TYPE_DECL instead
- of the named one we attached above. It then sets DECL_ORIGINAL_TYPE
- on the supposedly unnamed decl, creating a circularity that upsets
- dwarf2out.
+ && TYPE_ALIGN (tuple_type) == TYPE_ALIGN (array_type));
- We don't want to follow the normal C model and create "struct foo"
- tags for tuple types since (a) the types are supposed to be opaque
- and (b) they couldn't be defined as a real struct anyway. Treating
- the TYPE_DECLs as "typedef struct foo foo;" without creating
- "struct foo" would lead to confusing error messages. */
- DECL_ORIGINAL_TYPE (decl) = NULL_TREE;
+ register_type_decl (tuple_type, buffer);
acle_vector_types[num_vectors - 1][type] = tuple_type;
}
@@ -3544,9 +4570,10 @@ handle_arm_sve_h ()
{
vector_type_index type = vector_type_index (type_i);
register_vector_type (type);
- if (type != VECTOR_TYPE_svbool_t)
+ if (type != VECTOR_TYPE_svcount_t)
for (unsigned int count = 2; count <= MAX_TUPLE_SIZE; ++count)
- register_tuple_type (count, type);
+ if (type != VECTOR_TYPE_svbool_t || count == 2)
+ register_tuple_type (count, type);
}
/* Define the enums. */
@@ -3557,7 +4584,8 @@ handle_arm_sve_h ()
function_table = new hash_table<registered_function_hasher> (1023);
function_builder builder;
for (unsigned int i = 0; i < ARRAY_SIZE (function_groups); ++i)
- builder.register_function_group (function_groups[i]);
+ if (!(function_groups[i].required_extensions & AARCH64_FL_SME))
+ builder.register_function_group (function_groups[i]);
}
/* Return the function decl with SVE function subcode CODE, or error_mark_node
@@ -3570,6 +4598,33 @@ builtin_decl (unsigned int code, bool)
return (*registered_functions)[code]->decl;
}
+/* Implement #pragma GCC aarch64 "arm_sme.h". */
+void
+handle_arm_sme_h ()
+{
+ if (!function_table)
+ {
+ error ("%qs defined without first defining %qs",
+ "arm_sme.h", "arm_sve.h");
+ return;
+ }
+
+ static bool initialized_p;
+ if (initialized_p)
+ {
+ error ("duplicate definition of %qs", "arm_sme.h");
+ return;
+ }
+ initialized_p = true;
+
+ sme_switcher sme;
+
+ function_builder builder;
+ for (unsigned int i = 0; i < ARRAY_SIZE (function_groups); ++i)
+ if (function_groups[i].required_extensions & AARCH64_FL_SME)
+ builder.register_function_group (function_groups[i]);
+}
+
/* If we're implementing manual overloading, check whether the SVE
function with subcode CODE is overloaded, and if so attempt to
determine the corresponding non-overloaded function. The call
diff --git a/gcc/config/aarch64/aarch64-sve-builtins.def b/gcc/config/aarch64/aarch64-sve-builtins.def
index 534f6e6..23ef788 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins.def
@@ -29,11 +29,35 @@
#define DEF_SVE_TYPE_SUFFIX(A, B, C, D, E)
#endif
+#ifndef DEF_SME_ZA_SUFFIX
+#define DEF_SME_ZA_SUFFIX(A, B, C)
+#endif
+
+#ifndef DEF_SVE_GROUP_SUFFIX
+#define DEF_SVE_GROUP_SUFFIX(A, B, C)
+#endif
+
+#ifndef DEF_SVE_FUNCTION_GS
+#define DEF_SVE_FUNCTION_GS(A, B, C, D, E)
+#endif
+
#ifndef DEF_SVE_FUNCTION
-#define DEF_SVE_FUNCTION(A, B, C, D)
+#define DEF_SVE_FUNCTION(NAME, SHAPE, TYPES, PREDS) \
+ DEF_SVE_FUNCTION_GS (NAME, SHAPE, TYPES, none, PREDS)
+#endif
+
+#ifndef DEF_SME_ZA_FUNCTION_GS
+#define DEF_SME_ZA_FUNCTION_GS(NAME, SHAPE, TYPES, GROUP, PREDS) \
+ DEF_SVE_FUNCTION_GS(NAME, SHAPE, TYPES, GROUP, PREDS)
+#endif
+
+#ifndef DEF_SME_ZA_FUNCTION
+#define DEF_SME_ZA_FUNCTION(NAME, SHAPE, TYPES, PREDS) \
+ DEF_SME_ZA_FUNCTION_GS (NAME, SHAPE, TYPES, none, PREDS)
#endif
DEF_SVE_MODE (n, none, none, none)
+DEF_SVE_MODE (single, none, none, none)
DEF_SVE_MODE (index, none, none, elements)
DEF_SVE_MODE (offset, none, none, bytes)
DEF_SVE_MODE (s32index, none, svint32_t, elements)
@@ -61,6 +85,7 @@ DEF_SVE_MODE (u64offset, none, svuint64_t, bytes)
DEF_SVE_MODE (vnum, none, none, vectors)
DEF_SVE_TYPE (svbool_t, 10, __SVBool_t, boolean_type_node)
+DEF_SVE_TYPE (svcount_t, 11, __SVCount_t, boolean_type_node)
DEF_SVE_TYPE (svbfloat16_t, 14, __SVBfloat16_t, bfloat16_type_node)
DEF_SVE_TYPE (svfloat16_t, 13, __SVFloat16_t, aarch64_fp16_type_node)
DEF_SVE_TYPE (svfloat32_t, 13, __SVFloat32_t, float_type_node)
@@ -83,6 +108,11 @@ DEF_SVE_TYPE_SUFFIX (b16, svbool_t, bool, 16, VNx8BImode)
DEF_SVE_TYPE_SUFFIX (b32, svbool_t, bool, 32, VNx4BImode)
DEF_SVE_TYPE_SUFFIX (b64, svbool_t, bool, 64, VNx2BImode)
DEF_SVE_TYPE_SUFFIX (bf16, svbfloat16_t, bfloat, 16, VNx8BFmode)
+DEF_SVE_TYPE_SUFFIX (c, svcount_t, count, 8, VNx16BImode)
+DEF_SVE_TYPE_SUFFIX (c8, svcount_t, count, 8, VNx16BImode)
+DEF_SVE_TYPE_SUFFIX (c16, svcount_t, count, 16, VNx16BImode)
+DEF_SVE_TYPE_SUFFIX (c32, svcount_t, count, 32, VNx16BImode)
+DEF_SVE_TYPE_SUFFIX (c64, svcount_t, count, 64, VNx16BImode)
DEF_SVE_TYPE_SUFFIX (f16, svfloat16_t, float, 16, VNx8HFmode)
DEF_SVE_TYPE_SUFFIX (f32, svfloat32_t, float, 32, VNx4SFmode)
DEF_SVE_TYPE_SUFFIX (f64, svfloat64_t, float, 64, VNx2DFmode)
@@ -95,10 +125,40 @@ DEF_SVE_TYPE_SUFFIX (u16, svuint16_t, unsigned, 16, VNx8HImode)
DEF_SVE_TYPE_SUFFIX (u32, svuint32_t, unsigned, 32, VNx4SImode)
DEF_SVE_TYPE_SUFFIX (u64, svuint64_t, unsigned, 64, VNx2DImode)
+/* Associate _za with bytes. This is needed for svldr_vnum_za and
+ svstr_vnum_za, whose ZA offset can be in the range [0, 15], as for za8. */
+DEF_SME_ZA_SUFFIX (za, 8, VNx16QImode)
+
+DEF_SME_ZA_SUFFIX (za8, 8, VNx16QImode)
+DEF_SME_ZA_SUFFIX (za16, 16, VNx8HImode)
+DEF_SME_ZA_SUFFIX (za32, 32, VNx4SImode)
+DEF_SME_ZA_SUFFIX (za64, 64, VNx2DImode)
+DEF_SME_ZA_SUFFIX (za128, 128, VNx1TImode)
+
+DEF_SVE_GROUP_SUFFIX (x2, 0, 2)
+DEF_SVE_GROUP_SUFFIX (x3, 0, 3)
+DEF_SVE_GROUP_SUFFIX (x4, 0, 4)
+DEF_SVE_GROUP_SUFFIX (vg1x2, 1, 2)
+DEF_SVE_GROUP_SUFFIX (vg1x4, 1, 4)
+DEF_SVE_GROUP_SUFFIX (vg2, 2, 2)
+DEF_SVE_GROUP_SUFFIX (vg2x1, 2, 1)
+DEF_SVE_GROUP_SUFFIX (vg2x2, 2, 2)
+DEF_SVE_GROUP_SUFFIX (vg2x4, 2, 4)
+DEF_SVE_GROUP_SUFFIX (vg4, 4, 4)
+DEF_SVE_GROUP_SUFFIX (vg4x1, 4, 1)
+DEF_SVE_GROUP_SUFFIX (vg4x2, 4, 2)
+DEF_SVE_GROUP_SUFFIX (vg4x4, 4, 4)
+
#include "aarch64-sve-builtins-base.def"
#include "aarch64-sve-builtins-sve2.def"
+#include "aarch64-sve-builtins-sme.def"
+#undef DEF_SME_ZA_FUNCTION
#undef DEF_SVE_FUNCTION
+#undef DEF_SME_ZA_FUNCTION_GS
+#undef DEF_SVE_FUNCTION_GS
+#undef DEF_SVE_GROUP_SUFFIX
+#undef DEF_SME_ZA_SUFFIX
#undef DEF_SVE_TYPE_SUFFIX
#undef DEF_SVE_TYPE
#undef DEF_SVE_MODE
diff --git a/gcc/config/aarch64/aarch64-sve-builtins.h b/gcc/config/aarch64/aarch64-sve-builtins.h
index a301570..e67c465 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins.h
@@ -97,6 +97,10 @@ const unsigned int CP_PREFETCH_MEMORY = 1U << 3;
const unsigned int CP_WRITE_MEMORY = 1U << 4;
const unsigned int CP_READ_FFR = 1U << 5;
const unsigned int CP_WRITE_FFR = 1U << 6;
+const unsigned int CP_READ_ZA = 1U << 7;
+const unsigned int CP_WRITE_ZA = 1U << 8;
+const unsigned int CP_READ_ZT0 = 1U << 9;
+const unsigned int CP_WRITE_ZT0 = 1U << 10;
/* Enumerates the SVE predicate and (data) vector types, together called
"vector types" for brevity. */
@@ -142,15 +146,21 @@ enum predication_index
/* Zero predication: set inactive lanes of the vector result to zero. */
PRED_z,
+ /* Merging predication for SME's ZA: merge into slices of the array
+ instead of overwriting the whole slices. */
+ PRED_za_m,
+
NUM_PREDS
};
/* Classifies element types, based on type suffixes with the bit count
- removed. */
+ removed. "count" isn't really an element type, but we pretend it is
+ for consistency. */
enum type_class_index
{
TYPE_bool,
TYPE_bfloat,
+ TYPE_count,
TYPE_float,
TYPE_signed,
TYPE_unsigned,
@@ -176,10 +186,23 @@ enum type_suffix_index
{
#define DEF_SVE_TYPE_SUFFIX(NAME, ACLE_TYPE, CLASS, BITS, MODE) \
TYPE_SUFFIX_ ## NAME,
+#define DEF_SME_ZA_SUFFIX(NAME, BITS, MODE) \
+ TYPE_SUFFIX_ ## NAME,
#include "aarch64-sve-builtins.def"
NUM_TYPE_SUFFIXES
};
+/* Enumerates the possible group suffixes. Each suffix combines two
+ optional pieces of information: the vector group size in a ZA index,
+ and the number of vectors in the largest tuple argument. */
+enum group_suffix_index
+{
+#define DEF_SVE_GROUP_SUFFIX(NAME, VG, VECTORS_PER_TUPLE) GROUP_##NAME,
+#include "aarch64-sve-builtins.def"
+ GROUP_none,
+ NUM_GROUP_SUFFIXES
+};
+
/* Combines two type suffixes. */
typedef enum type_suffix_index type_suffix_pair[2];
@@ -229,14 +252,67 @@ struct type_suffix_info
unsigned int unsigned_p : 1;
/* True if the suffix is for a floating-point type. */
unsigned int float_p : 1;
+ /* True if the suffix is for a vector type (integer or float). */
+ unsigned int vector_p : 1;
/* True if the suffix is for a boolean type. */
unsigned int bool_p : 1;
- unsigned int spare : 12;
+ /* True if the suffix is for SME's ZA. */
+ unsigned int za_p : 1;
+ unsigned int spare : 10;
/* The associated vector or predicate mode. */
machine_mode vector_mode : 16;
};
+/* Static information about a group suffix. */
+struct group_suffix_info
+{
+ /* The suffix string itself. */
+ const char *string;
+
+ /* If the suffix describes a vector group in a ZA index, this is the
+ size of that group, otherwise it is zero. */
+ unsigned int vg;
+
+ /* The number of vectors in the largest (or only) tuple argument,
+ or 1 if the suffix does not convey this information. */
+ unsigned int vectors_per_tuple;
+};
+
+/* Represents an SVE vector, predicate, tuple of vectors, or tuple of
+ predicates. There is also a representation of "no type"/"invalid type". */
+struct sve_type
+{
+ sve_type () = default;
+ sve_type (type_suffix_index type) : type (type), num_vectors (1) {}
+ sve_type (type_suffix_index type, unsigned int num_vectors)
+ : type (type), num_vectors (num_vectors) {}
+
+ /* Return true if the type is valid. */
+ explicit operator bool () const { return type != NUM_TYPE_SUFFIXES; }
+
+ bool operator== (const sve_type &) const;
+ bool operator!= (const sve_type &x) const { return !operator== (x); }
+
+ /* This is one of:
+
+ - TYPE_SUFFIX_b for svbool_t-based types
+ - TYPE_SUFFIX_c for svcount_t-based types
+ - the type suffix of a data element for SVE data vectors and tuples
+ - NUM_TYPE_SUFFIXES for invalid types. */
+ type_suffix_index type = NUM_TYPE_SUFFIXES;
+
+ /* If the type is a tuple, this is the number of vectors in the tuple,
+ otherwise it is 1. */
+ unsigned int num_vectors = 1;
+};
+
+inline bool
+sve_type::operator== (const sve_type &other) const
+{
+ return type == other.type && num_vectors == other.num_vectors;
+}
+
/* Static information about a set of functions. */
struct function_group_info
{
@@ -251,14 +327,16 @@ struct function_group_info
shapes. */
const function_shape *const *shape;
- /* A list of the available type suffixes, and of the available predication
- types. The function supports every combination of the two.
+ /* A list of the available type suffixes, group suffixes, and predication
+ types. The function supports every combination of the three.
+
+ The list of type suffixes is terminated by two NUM_TYPE_SUFFIXES.
+ It is lexicographically ordered based on the index value.
- The list of type suffixes is terminated by two NUM_TYPE_SUFFIXES
- while the list of predication types is terminated by NUM_PREDS.
- The list of type suffixes is lexicographically ordered based
- on the index value. */
+ The list of group suffixes is terminated by NUM_GROUP_SUFFIXES
+ and the list of predication types is terminated by NUM_PREDS. */
const type_suffix_pair *types;
+ const group_suffix_index *groups;
const predication_index *preds;
/* The architecture extensions that the functions require, as a set of
@@ -273,7 +351,8 @@ class GTY((user)) function_instance
public:
function_instance (const char *, const function_base *,
const function_shape *, mode_suffix_index,
- const type_suffix_pair &, predication_index);
+ const type_suffix_pair &, group_suffix_index,
+ predication_index);
bool operator== (const function_instance &) const;
bool operator!= (const function_instance &) const;
@@ -284,6 +363,9 @@ public:
bool modifies_global_state_p () const;
bool could_trap_p () const;
+ vector_type_index gp_type_index () const;
+ tree gp_type () const;
+
unsigned int vectors_per_tuple () const;
tree memory_scalar_type () const;
machine_mode memory_vector_mode () const;
@@ -293,22 +375,27 @@ public:
tree displacement_vector_type () const;
units_index displacement_units () const;
+ unsigned int num_za_tiles () const;
+
const type_suffix_info &type_suffix (unsigned int) const;
+ const group_suffix_info &group_suffix () const;
+
tree scalar_type (unsigned int) const;
tree vector_type (unsigned int) const;
tree tuple_type (unsigned int) const;
- unsigned int elements_per_vq (unsigned int i) const;
+ unsigned int elements_per_vq (unsigned int) const;
machine_mode vector_mode (unsigned int) const;
+ machine_mode tuple_mode (unsigned int) const;
machine_mode gp_mode (unsigned int) const;
- /* The properties of the function. (The explicit "enum"s are required
- for gengtype.) */
+ /* The properties of the function. */
const char *base_name;
const function_base *base;
const function_shape *shape;
- enum mode_suffix_index mode_suffix_id;
+ mode_suffix_index mode_suffix_id;
type_suffix_pair type_suffix_ids;
- enum predication_index pred;
+ group_suffix_index group_suffix_id;
+ predication_index pred;
};
class registered_function;
@@ -335,7 +422,7 @@ private:
char *get_name (const function_instance &, bool);
- tree get_attributes (const function_instance &);
+ tree get_attributes (const function_instance &, aarch64_feature_flags);
registered_function &add_function (const function_instance &,
const char *, tree, tree,
@@ -382,38 +469,51 @@ public:
function_resolver (location_t, const function_instance &, tree,
vec<tree, va_gc> &);
- tree get_vector_type (type_suffix_index);
const char *get_scalar_type_name (type_suffix_index);
tree get_argument_type (unsigned int);
bool scalar_argument_p (unsigned int);
- tree report_no_such_form (type_suffix_index);
+ void report_incorrect_num_vectors (unsigned int, sve_type, unsigned int);
+ void report_mismatched_num_vectors (unsigned int, sve_type,
+ unsigned int, sve_type);
+
+ tree report_no_such_form (sve_type);
tree lookup_form (mode_suffix_index,
type_suffix_index = NUM_TYPE_SUFFIXES,
- type_suffix_index = NUM_TYPE_SUFFIXES);
+ type_suffix_index = NUM_TYPE_SUFFIXES,
+ group_suffix_index = GROUP_none);
+ tree lookup_form (mode_suffix_index, sve_type);
tree resolve_to (mode_suffix_index,
type_suffix_index = NUM_TYPE_SUFFIXES,
- type_suffix_index = NUM_TYPE_SUFFIXES);
+ type_suffix_index = NUM_TYPE_SUFFIXES,
+ group_suffix_index = GROUP_none);
+ tree resolve_to (mode_suffix_index, sve_type);
+ tree resolve_conversion (mode_suffix_index, sve_type);
+ vector_type_index infer_predicate_type (unsigned int);
type_suffix_index infer_integer_scalar_type (unsigned int);
+ type_suffix_index infer_64bit_scalar_integer_pair (unsigned int);
type_suffix_index infer_pointer_type (unsigned int, bool = false);
- type_suffix_index infer_vector_or_tuple_type (unsigned int, unsigned int);
+ sve_type infer_sve_type (unsigned int);
+ sve_type infer_vector_or_tuple_type (unsigned int, unsigned int);
type_suffix_index infer_vector_type (unsigned int);
type_suffix_index infer_integer_vector_type (unsigned int);
type_suffix_index infer_unsigned_vector_type (unsigned int);
type_suffix_index infer_sd_vector_type (unsigned int);
- type_suffix_index infer_tuple_type (unsigned int);
+ sve_type infer_tuple_type (unsigned int);
bool require_vector_or_scalar_type (unsigned int);
+ bool require_matching_predicate_type (vector_type_index, sve_type);
bool require_vector_type (unsigned int, vector_type_index);
- bool require_matching_vector_type (unsigned int, type_suffix_index);
- bool require_derived_vector_type (unsigned int, unsigned int,
- type_suffix_index,
+ bool require_matching_vector_type (unsigned int, unsigned int, sve_type);
+ bool require_derived_vector_type (unsigned int, unsigned int, sve_type,
type_class_index = SAME_TYPE_CLASS,
- unsigned int = SAME_SIZE);
+ unsigned int = SAME_SIZE,
+ unsigned int = 1);
bool require_scalar_type (unsigned int, const char *);
+ bool require_nonscalar_type (unsigned int);
bool require_pointer_type (unsigned int);
bool require_matching_integer_scalar_type (unsigned int, unsigned int,
type_suffix_index);
@@ -442,6 +542,8 @@ public:
type_class_index = SAME_TYPE_CLASS,
unsigned int = SAME_SIZE,
type_suffix_index = NUM_TYPE_SUFFIXES);
+ tree finish_opt_single_resolution (unsigned int, unsigned int, sve_type,
+ type_class_index = SAME_TYPE_CLASS);
tree resolve ();
@@ -463,7 +565,8 @@ public:
bool require_immediate_either_or (unsigned int, HOST_WIDE_INT,
HOST_WIDE_INT);
bool require_immediate_enum (unsigned int, tree);
- bool require_immediate_lane_index (unsigned int, unsigned int = 1);
+ bool require_immediate_lane_index (unsigned int, unsigned int,
+ unsigned int = 1);
bool require_immediate_one_of (unsigned int, HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT, HOST_WIDE_INT);
bool require_immediate_range (unsigned int, HOST_WIDE_INT, HOST_WIDE_INT);
@@ -500,6 +603,8 @@ public:
tree load_store_cookie (tree);
gimple *redirect_call (const function_instance &);
+ gimple *redirect_pred_x ();
+
gimple *fold_to_cstu (poly_uint64);
gimple *fold_to_pfalse ();
gimple *fold_to_ptrue ();
@@ -528,10 +633,13 @@ public:
insn_code direct_optab_handler_for_sign (optab, optab, unsigned int = 0,
machine_mode = E_VOIDmode);
+ machine_mode result_mode () const;
+
bool overlaps_input_p (rtx);
rtx convert_to_pmode (rtx);
- rtx get_contiguous_base (machine_mode);
+ rtx get_contiguous_base (machine_mode, unsigned int = 1, unsigned int = 2,
+ aarch64_feature_flags = 0);
rtx get_fallback_value (machine_mode, unsigned int,
unsigned int, unsigned int &);
rtx get_reg_target ();
@@ -539,7 +647,7 @@ public:
void add_output_operand (insn_code);
void add_input_operand (insn_code, rtx);
- void add_integer_operand (HOST_WIDE_INT);
+ void add_integer_operand (poly_int64);
void add_mem_operand (machine_mode, rtx);
void add_address_operand (rtx);
void add_fixed_operand (rtx);
@@ -560,7 +668,7 @@ public:
rtx use_contiguous_prefetch_insn (insn_code);
rtx use_contiguous_store_insn (insn_code);
- rtx map_to_rtx_codes (rtx_code, rtx_code, int,
+ rtx map_to_rtx_codes (rtx_code, rtx_code, int, int,
unsigned int = DEFAULT_MERGE_ARGNO);
rtx map_to_unspecs (int, int, int, unsigned int = DEFAULT_MERGE_ARGNO);
@@ -591,7 +699,7 @@ public:
/* If the function operates on tuples of vectors, return the number
of vectors in the tuples, otherwise return 1. */
- virtual unsigned int vectors_per_tuple () const { return 1; }
+ virtual unsigned int vectors_per_tuple (const function_instance &) const;
/* If the function addresses memory, return the type of a single
scalar memory element. */
@@ -636,8 +744,16 @@ public:
class function_shape
{
public:
+ virtual bool has_merge_argument_p (const function_instance &,
+ unsigned int) const;
+
virtual bool explicit_type_suffix_p (unsigned int) const = 0;
+ /* True if the group suffix is present in overloaded names.
+ This isn't meaningful for pre-SME intrinsics, and true is
+ more common than false, so provide a default definition. */
+ virtual bool explicit_group_suffix_p () const { return true; }
+
/* Define all functions associated with the given group. */
virtual void build (function_builder &,
const function_group_info &) const = 0;
@@ -656,7 +772,7 @@ public:
class sve_switcher : public aarch64_simd_switcher
{
public:
- sve_switcher ();
+ sve_switcher (aarch64_feature_flags = 0);
~sve_switcher ();
private:
@@ -664,20 +780,24 @@ private:
bool m_old_have_regs_of_mode[MAX_MACHINE_MODE];
};
+/* Extends sve_switch enough for defining arm_sme.h. */
+class sme_switcher : public sve_switcher
+{
+public:
+ sme_switcher () : sve_switcher (AARCH64_FL_SME) {}
+};
+
extern const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1];
extern const mode_suffix_info mode_suffixes[MODE_none + 1];
+extern const group_suffix_info group_suffixes[NUM_GROUP_SUFFIXES];
-extern tree scalar_types[NUM_VECTOR_TYPES];
+extern tree scalar_types[NUM_VECTOR_TYPES + 1];
extern tree acle_vector_types[MAX_TUPLE_SIZE][NUM_VECTOR_TYPES + 1];
extern tree acle_svpattern;
extern tree acle_svprfop;
-/* Return the ACLE type svbool_t. */
-inline tree
-get_svbool_t (void)
-{
- return acle_vector_types[0][VECTOR_TYPE_svbool_t];
-}
+bool vector_cst_all_same (tree, unsigned int);
+bool is_ptrue (tree, unsigned int);
/* Try to find a mode with the given mode_suffix_info fields. Return the
mode on success or MODE_none on failure. */
@@ -725,9 +845,11 @@ function_instance (const char *base_name_in,
const function_shape *shape_in,
mode_suffix_index mode_suffix_id_in,
const type_suffix_pair &type_suffix_ids_in,
+ group_suffix_index group_suffix_id_in,
predication_index pred_in)
: base_name (base_name_in), base (base_in), shape (shape_in),
- mode_suffix_id (mode_suffix_id_in), pred (pred_in)
+ mode_suffix_id (mode_suffix_id_in), group_suffix_id (group_suffix_id_in),
+ pred (pred_in)
{
memcpy (type_suffix_ids, type_suffix_ids_in, sizeof (type_suffix_ids));
}
@@ -738,9 +860,10 @@ function_instance::operator== (const function_instance &other) const
return (base == other.base
&& shape == other.shape
&& mode_suffix_id == other.mode_suffix_id
- && pred == other.pred
&& type_suffix_ids[0] == other.type_suffix_ids[0]
- && type_suffix_ids[1] == other.type_suffix_ids[1]);
+ && type_suffix_ids[1] == other.type_suffix_ids[1]
+ && group_suffix_id == other.group_suffix_id
+ && pred == other.pred);
}
inline bool
@@ -749,12 +872,30 @@ function_instance::operator!= (const function_instance &other) const
return !operator== (other);
}
+/* Return the index of the type that should be used as the governing
+ predicate of this function. */
+inline vector_type_index
+function_instance::gp_type_index () const
+{
+ if (group_suffix ().vectors_per_tuple > 1)
+ return VECTOR_TYPE_svcount_t;
+ return VECTOR_TYPE_svbool_t;
+}
+
+/* Return the type that should be used as the governing predicate of
+ this function. */
+inline tree
+function_instance::gp_type () const
+{
+ return acle_vector_types[0][gp_type_index ()];
+}
+
/* If the function operates on tuples of vectors, return the number
of vectors in the tuples, otherwise return 1. */
inline unsigned int
function_instance::vectors_per_tuple () const
{
- return base->vectors_per_tuple ();
+ return base->vectors_per_tuple (*this);
}
/* If the function addresses memory, return the type of a single
@@ -797,6 +938,16 @@ function_instance::displacement_vector_type () const
return acle_vector_types[0][mode_suffix ().displacement_vector_type];
}
+/* Return the number of ZA tiles associated with the _za<N> suffix
+ (which is always the first type suffix). */
+inline unsigned int
+function_instance::num_za_tiles () const
+{
+ auto &suffix = type_suffix (0);
+ gcc_checking_assert (suffix.za_p);
+ return suffix.element_bytes;
+}
+
/* If the function takes a vector or scalar displacement, return the units
in which the displacement is measured, otherwise return UNITS_none. */
inline units_index
@@ -812,6 +963,13 @@ function_instance::type_suffix (unsigned int i) const
return type_suffixes[type_suffix_ids[i]];
}
+/* Return information about the function's group suffix. */
+inline const group_suffix_info &
+function_instance::group_suffix () const
+{
+ return group_suffixes[group_suffix_id];
+}
+
/* Return the scalar type associated with type suffix I. */
inline tree
function_instance::scalar_type (unsigned int i) const
@@ -851,11 +1009,24 @@ function_instance::vector_mode (unsigned int i) const
return type_suffix (i).vector_mode;
}
+/* Return the mode of tuple_type (I). */
+inline machine_mode
+function_instance::tuple_mode (unsigned int i) const
+{
+ if (group_suffix ().vectors_per_tuple > 1)
+ return TYPE_MODE (tuple_type (i));
+ return vector_mode (i);
+}
+
/* Return the mode of the governing predicate to use when operating on
type suffix I. */
inline machine_mode
function_instance::gp_mode (unsigned int i) const
{
+ /* Multi-vector operations are predicated on an svcount_t, which has
+ mode VNx16BI. */
+ if (group_suffix ().vectors_per_tuple > 1)
+ return VNx16BImode;
return aarch64_sve_pred_mode (type_suffix (i).element_bytes).require ();
}
@@ -877,6 +1048,29 @@ function_base::call_properties (const function_instance &instance) const
return flags;
}
+inline unsigned int
+function_base::vectors_per_tuple (const function_instance &instance) const
+{
+ return instance.group_suffix ().vectors_per_tuple;
+}
+
+/* Return true if INSTANCE (which has NARGS arguments) has an initial
+ vector argument whose only purpose is to specify the values of
+ inactive lanes. */
+inline bool
+function_shape::has_merge_argument_p (const function_instance &instance,
+ unsigned int nargs) const
+{
+ return nargs == 1 && instance.pred == PRED_m;
+}
+
+/* Return the mode of the result of a call. */
+inline machine_mode
+function_expander::result_mode () const
+{
+ return TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl)));
+}
+
}
#endif
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index cfadac4..d911f65 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -33,6 +33,7 @@
;; ---- Moves of single vectors
;; ---- Moves of multiple vectors
;; ---- Moves of predicates
+;; ---- Moves of multiple predicates
;; ---- Moves relating to the FFR
;;
;; == Loads
@@ -787,8 +788,8 @@
;; This is equivalent to a subreg on little-endian targets but not for
;; big-endian; see the comment at the head of the file for details.
(define_expand "@aarch64_sve_reinterpret<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_ALL_STRUCT 0 "register_operand")
+ (unspec:SVE_ALL_STRUCT
[(match_operand 1 "aarch64_any_register_operand")]
UNSPEC_REINTERPRET))]
"TARGET_SVE"
@@ -805,8 +806,8 @@
;; A pattern for handling type punning on big-endian targets. We use a
;; special predicate for operand 1 to reduce the number of patterns.
(define_insn_and_split "*aarch64_sve_reinterpret<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_ALL_STRUCT 0 "register_operand" "=w")
+ (unspec:SVE_ALL_STRUCT
[(match_operand 1 "aarch64_any_register_operand" "w")]
UNSPEC_REINTERPRET))]
"TARGET_SVE"
@@ -1070,6 +1071,27 @@
)
;; -------------------------------------------------------------------------
+;; ---- Moves of multiple predicates
+;; -------------------------------------------------------------------------
+
+(define_insn_and_split "movvnx32bi"
+ [(set (match_operand:VNx32BI 0 "nonimmediate_operand")
+ (match_operand:VNx32BI 1 "aarch64_mov_operand"))]
+ "TARGET_SVE"
+ {@ [ cons: =0 , 1 ]
+ [ Upa , Upa ] #
+ [ Upa , m ] #
+ [ m , Upa ] #
+ }
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_double_move (operands[0], operands[1], VNx16BImode);
+ DONE;
+ }
+)
+
+;; -------------------------------------------------------------------------
;; ---- Moves relating to the FFR
;; -------------------------------------------------------------------------
;; RDFFR
@@ -1086,7 +1108,7 @@
(match_operand:VNx16BI 0 "aarch64_simd_reg_or_minus_one"))
(set (reg:VNx16BI FFRT_REGNUM)
(unspec:VNx16BI [(match_dup 0)] UNSPEC_WRFFR))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: 0 ]
[ Dm ] setffr
[ Upa ] wrffr\t%0.b
@@ -1128,7 +1150,7 @@
(define_insn "aarch64_rdffr"
[(set (match_operand:VNx16BI 0 "register_operand" "=Upa")
(reg:VNx16BI FFRT_REGNUM))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"rdffr\t%0.b"
)
@@ -1138,7 +1160,7 @@
(and:VNx16BI
(reg:VNx16BI FFRT_REGNUM)
(match_operand:VNx16BI 1 "register_operand" "Upa")))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"rdffr\t%0.b, %1/z"
)
@@ -1154,7 +1176,7 @@
(match_dup 1))]
UNSPEC_PTEST))
(clobber (match_scratch:VNx16BI 0 "=Upa"))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"rdffrs\t%0.b, %1/z"
)
@@ -1168,7 +1190,7 @@
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_PTEST))
(clobber (match_scratch:VNx16BI 0 "=Upa"))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"rdffrs\t%0.b, %1/z"
)
@@ -1187,7 +1209,7 @@
(and:VNx16BI
(reg:VNx16BI FFRT_REGNUM)
(match_dup 1)))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"rdffrs\t%0.b, %1/z"
)
@@ -1202,7 +1224,7 @@
UNSPEC_PTEST))
(set (match_operand:VNx16BI 0 "register_operand" "=Upa")
(reg:VNx16BI FFRT_REGNUM))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"rdffrs\t%0.b, %1/z"
)
@@ -1244,7 +1266,7 @@
;; - LD4W
;; -------------------------------------------------------------------------
-;; Predicated LD1.
+;; Predicated LD1 (single).
(define_insn "maskload<mode><vpred>"
[(set (match_operand:SVE_ALL 0 "register_operand" "=w")
(unspec:SVE_ALL
@@ -1255,6 +1277,17 @@
"ld1<Vesize>\t%0.<Vctype>, %2/z, %1"
)
+;; Predicated LD1 (multi), with a count as predicate.
+(define_insn "@aarch64_ld1<mode>"
+ [(set (match_operand:SVE_FULLx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_FULLx24
+ [(match_operand:VNx16BI 2 "register_operand" "Uph")
+ (match_operand:SVE_FULLx24 1 "memory_operand" "m")]
+ UNSPEC_LD1_SVE_COUNT))]
+ "TARGET_SME2 && TARGET_STREAMING"
+ "ld1<Vesize>\t%0, %K2/z, %1"
+)
+
;; Unpredicated LD[234].
(define_expand "vec_load_lanes<mode><vsingle>"
[(set (match_operand:SVE_STRUCT 0 "register_operand")
@@ -1332,7 +1365,7 @@
(match_operand:SVE_FULL 1 "aarch64_sve_ld<fn>f1_operand" "Ut<fn>")
(reg:VNx16BI FFRT_REGNUM)]
SVE_LDFF1_LDNF1))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"ld<fn>f1<Vesize>\t%0.<Vetype>, %2/z, %1"
)
@@ -1366,7 +1399,9 @@
(reg:VNx16BI FFRT_REGNUM)]
SVE_LDFF1_LDNF1))]
UNSPEC_PRED_X))]
- "TARGET_SVE && (~<SVE_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
+ "TARGET_SVE
+ && TARGET_NON_STREAMING
+ && (~<SVE_HSDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
"ld<fn>f1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_HSDI:Vctype>, %2/z, %1"
"&& !CONSTANT_P (operands[3])"
{
@@ -1384,7 +1419,7 @@
;; - LDNT1W
;; -------------------------------------------------------------------------
-;; Predicated contiguous non-temporal load.
+;; Predicated contiguous non-temporal load (single).
(define_insn "@aarch64_ldnt1<mode>"
[(set (match_operand:SVE_FULL 0 "register_operand" "=w")
(unspec:SVE_FULL
@@ -1395,6 +1430,17 @@
"ldnt1<Vesize>\t%0.<Vetype>, %2/z, %1"
)
+;; Predicated contiguous non-temporal load (multi).
+(define_insn "@aarch64_ldnt1<mode>"
+ [(set (match_operand:SVE_FULLx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_FULLx24
+ [(match_operand:VNx16BI 2 "register_operand" "Uph")
+ (match_operand:SVE_FULLx24 1 "memory_operand" "m")]
+ UNSPEC_LDNT1_SVE_COUNT))]
+ "TARGET_SVE"
+ "ldnt1<Vesize>\t%0, %K2/z, %1"
+)
+
;; -------------------------------------------------------------------------
;; ---- Normal gather loads
;; -------------------------------------------------------------------------
@@ -1414,7 +1460,7 @@
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{
operands[5] = aarch64_ptrue_reg (<VPRED>mode);
}
@@ -1432,7 +1478,7 @@
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5 ]
[&w, Z, w, Ui1, Ui1, Upl] ld1<Vesize>\t%0.s, %5/z, [%2.s]
[?w, Z, 0, Ui1, Ui1, Upl] ^
@@ -1461,7 +1507,7 @@
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, Z, w, i, Ui1, Upl] ld1<Vesize>\t%0.d, %5/z, [%2.d]
[?w, Z, 0, i, Ui1, Upl] ^
@@ -1489,7 +1535,7 @@
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, <su>xtw]
[?w, rk, 0, i, Ui1, Upl ] ^
@@ -1519,7 +1565,7 @@
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
@@ -1546,7 +1592,7 @@
(match_operand:DI 4 "aarch64_gather_scale_operand_<Vesize>")
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
@@ -1583,7 +1629,9 @@
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
- "TARGET_SVE && (~<SVE_4HSI:narrower_mask> & <SVE_4BHI:self_mask>) == 0"
+ "TARGET_SVE
+ && TARGET_NON_STREAMING
+ && (~<SVE_4HSI:narrower_mask> & <SVE_4BHI:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4, 5, 6]
[&w, Z, w, Ui1, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_4BHI:Vesize>\t%0.s, %5/z, [%2.s]
[?w, Z, 0, Ui1, Ui1, Upl, UplDnm] ^
@@ -1620,7 +1668,9 @@
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
- "TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
+ "TARGET_SVE
+ && TARGET_NON_STREAMING
+ && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4, 5, 6]
[&w, Z, w, i, Ui1, Upl, UplDnm] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%2.d]
[?w, Z, 0, i, Ui1, Upl, UplDnm] ^
@@ -1656,7 +1706,9 @@
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
- "TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
+ "TARGET_SVE
+ && TARGET_NON_STREAMING
+ && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, <ANY_EXTEND2:su>xtw]
[?w, rk, 0, i, Ui1, Upl ] ^
@@ -1691,7 +1743,9 @@
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
- "TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
+ "TARGET_SVE
+ && TARGET_NON_STREAMING
+ && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
@@ -1723,7 +1777,9 @@
(mem:BLK (scratch))]
UNSPEC_LD1_GATHER))]
UNSPEC_PRED_X))]
- "TARGET_SVE && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
+ "TARGET_SVE
+ && TARGET_NON_STREAMING
+ && (~<SVE_2HSDI:narrower_mask> & <SVE_2BHSI:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ld1<ANY_EXTEND:s><SVE_2BHSI:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
@@ -1757,7 +1813,7 @@
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5 ]
[&w, Z, w, i, Ui1, Upl] ldff1w\t%0.s, %5/z, [%2.s]
[?w, Z, 0, i, Ui1, Upl] ^
@@ -1787,7 +1843,7 @@
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5 ]
[&w, Z, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%2.d]
[?w, Z, 0, i, Ui1, Upl ] ^
@@ -1817,7 +1873,7 @@
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, sxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
@@ -1844,7 +1900,7 @@
(mem:BLK (scratch))
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ldff1d\t%0.d, %5/z, [%1, %2.d, uxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
@@ -1882,7 +1938,7 @@
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5, 6]
[&w, Z, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx4_NARROW:Vesize>\t%0.s, %5/z, [%2.s]
[?w, Z, 0, i, Ui1, Upl, UplDnm] ^
@@ -1920,7 +1976,7 @@
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5, 6]
[&w, Z, w, i, Ui1, Upl, UplDnm] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%2.d]
[?w, Z, 0, i, Ui1, Upl, UplDnm] ^
@@ -1958,7 +2014,7 @@
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, sxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
@@ -1990,7 +2046,7 @@
(reg:VNx16BI FFRT_REGNUM)]
UNSPEC_LDFF1_GATHER))]
UNSPEC_PRED_X))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3, 4, 5]
[&w, rk, w, i, Ui1, Upl ] ldff1<ANY_EXTEND:s><VNx2_NARROW:Vesize>\t%0.d, %5/z, [%1, %2.d, uxtw]
[?w, rk, 0, i, Ui1, Upl ] ^
@@ -2068,7 +2124,7 @@
UNSPEC_SVE_PREFETCH_GATHER)
(match_operand:DI 7 "const_int_operand")
(match_operand:DI 8 "const_int_operand"))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{
static const char *const insns[][2] = {
"prf<SVE_FULL_I:Vesize>", "%0, [%2.s]",
@@ -2097,7 +2153,7 @@
UNSPEC_SVE_PREFETCH_GATHER)
(match_operand:DI 7 "const_int_operand")
(match_operand:DI 8 "const_int_operand"))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{
static const char *const insns[][2] = {
"prf<SVE_FULL_I:Vesize>", "%0, [%2.d]",
@@ -2128,7 +2184,7 @@
UNSPEC_SVE_PREFETCH_GATHER)
(match_operand:DI 7 "const_int_operand")
(match_operand:DI 8 "const_int_operand"))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{
static const char *const insns[][2] = {
"prfb", "%0, [%1, %2.d, sxtw]",
@@ -2158,7 +2214,7 @@
UNSPEC_SVE_PREFETCH_GATHER)
(match_operand:DI 7 "const_int_operand")
(match_operand:DI 8 "const_int_operand"))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{
static const char *const insns[][2] = {
"prfb", "%0, [%1, %2.d, uxtw]",
@@ -2195,7 +2251,7 @@
;; - ST4W
;; -------------------------------------------------------------------------
-;; Predicated ST1.
+;; Predicated ST1 (single).
(define_insn "maskstore<mode><vpred>"
[(set (match_operand:SVE_ALL 0 "memory_operand" "+m")
(unspec:SVE_ALL
@@ -2207,6 +2263,17 @@
"st1<Vesize>\t%1.<Vctype>, %2, %0"
)
+(define_insn "@aarch64_st1<mode>"
+ [(set (match_operand:SVE_FULLx24 0 "memory_operand" "+m")
+ (unspec:SVE_FULLx24
+ [(match_operand:VNx16BI 2 "register_operand" "Uph")
+ (match_operand:SVE_FULLx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_dup 0)]
+ UNSPEC_ST1_SVE_COUNT))]
+ "TARGET_SME2 && TARGET_STREAMING"
+ "st1<Vesize>\t%1, %K2, %0"
+)
+
;; Unpredicated ST[234]. This is always a full update, so the dependence
;; on the old value of the memory location (via (match_dup 0)) is redundant.
;; There doesn't seem to be any obvious benefit to treating the all-true
@@ -2306,6 +2373,17 @@
"stnt1<Vesize>\t%1.<Vetype>, %2, %0"
)
+(define_insn "@aarch64_stnt1<mode>"
+ [(set (match_operand:SVE_FULLx24 0 "memory_operand" "+m")
+ (unspec:SVE_FULLx24
+ [(match_operand:VNx16BI 2 "register_operand" "Uph")
+ (match_operand:SVE_FULLx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_dup 0)]
+ UNSPEC_STNT1_SVE_COUNT))]
+ "TARGET_SME2 && TARGET_STREAMING"
+ "stnt1<Vesize>\t%1, %K2, %0"
+)
+
;; -------------------------------------------------------------------------
;; ---- Normal scatter stores
;; -------------------------------------------------------------------------
@@ -2325,7 +2403,7 @@
(match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
(match_operand:SVE_24 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{
operands[5] = aarch64_ptrue_reg (<VPRED>mode);
}
@@ -2343,7 +2421,7 @@
(match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
(match_operand:SVE_4 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: 0 , 1 , 2 , 3 , 4 , 5 ]
[ Z , w , Ui1 , Ui1 , w , Upl ] st1<Vesize>\t%4.s, %5, [%1.s]
[ vgw , w , Ui1 , Ui1 , w , Upl ] st1<Vesize>\t%4.s, %5, [%1.s, #%0]
@@ -2366,7 +2444,7 @@
(match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
(match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: 0 , 1 , 3 , 4 , 5 ]
[ Z , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%1.d]
[ vgd , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%1.d, #%0]
@@ -2390,7 +2468,7 @@
(match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
(match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: 0 , 1 , 3 , 4 , 5 ]
[ rk , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw]
[ rk , w , i , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, <su>xtw %p3]
@@ -2418,7 +2496,7 @@
(match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
(match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: 0 , 1 , 3 , 4 , 5 ]
[ rk , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
[ rk , w , i , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]
@@ -2443,7 +2521,7 @@
(match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
(match_operand:SVE_2 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: 0 , 1 , 3 , 4 , 5 ]
[ rk , w , Ui1 , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
[ rk , w , i , w , Upl ] st1<Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]
@@ -2472,7 +2550,7 @@
(truncate:VNx4_NARROW
(match_operand:VNx4_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: 1 , 2 , 4 , 5 ]
[ w , Ui1 , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s]
[ w , Ui1 , w , Upl ] st1<VNx4_NARROW:Vesize>\t%4.s, %5, [%1.s, #%0]
@@ -2496,7 +2574,7 @@
(truncate:VNx2_NARROW
(match_operand:VNx2_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: 1 , 4 , 5 ]
[ w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d]
[ w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%1.d, #%0]
@@ -2522,7 +2600,7 @@
(truncate:VNx2_NARROW
(match_operand:VNx2_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: 0 , 1 , 4 , 5 ]
[ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw]
[ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, sxtw %p3]
@@ -2547,7 +2625,7 @@
(truncate:VNx2_NARROW
(match_operand:VNx2_WIDE 4 "register_operand"))]
UNSPEC_ST1_SCATTER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: 0 , 1 , 4 , 5 ]
[ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw]
[ rk , w , w , Upl ] st1<VNx2_NARROW:Vesize>\t%4.d, %5, [%0, %1.d, uxtw %p3]
@@ -2727,7 +2805,7 @@
(match_operand:OI 1 "aarch64_sve_ld1ro_operand_<Vesize>"
"UO<Vesize>")]
UNSPEC_LD1RO))]
- "TARGET_SVE_F64MM"
+ "TARGET_SVE_F64MM && TARGET_NON_STREAMING"
{
operands[1] = gen_rtx_MEM (<VEL>mode, XEXP (operands[1], 0));
return "ld1ro<Vesize>\t%0.<Vetype>, %2/z, %1";
@@ -3971,7 +4049,7 @@
[(match_operand:SVE_FULL_SDI 1 "register_operand" "w")
(match_operand:SVE_FULL_SDI 2 "register_operand" "w")]
UNSPEC_ADR))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"adr\t%0.<Vetype>, [%1.<Vetype>, %2.<Vetype>]"
)
@@ -3987,7 +4065,7 @@
(match_operand:VNx2DI 2 "register_operand" "w")))]
UNSPEC_PRED_X)]
UNSPEC_ADR))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"adr\t%0.d, [%1.d, %2.d, sxtw]"
"&& !CONSTANT_P (operands[3])"
{
@@ -4004,7 +4082,7 @@
(match_operand:VNx2DI 2 "register_operand" "w")
(match_operand:VNx2DI 3 "aarch64_sve_uxtw_immediate"))]
UNSPEC_ADR))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"adr\t%0.d, [%1.d, %2.d, uxtw]"
)
@@ -4016,7 +4094,7 @@
(match_operand:VNx2DI 2 "register_operand" "w")
(match_operand:VNx2DI 3 "aarch64_sve_uxtw_immediate"))
(match_operand:VNx2DI 1 "register_operand" "w")))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"adr\t%0.d, [%1.d, %2.d, uxtw]"
)
@@ -4031,7 +4109,7 @@
(match_operand:SVE_FULL_SDI 3 "const_1_to_3_operand"))]
UNSPEC_PRED_X)
(match_operand:SVE_FULL_SDI 1 "register_operand")))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
}
@@ -4047,7 +4125,7 @@
(match_operand:SVE_24I 3 "const_1_to_3_operand"))]
UNSPEC_PRED_X)
(match_operand:SVE_24I 1 "register_operand" "w")))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"adr\t%0.<Vctype>, [%1.<Vctype>, %2.<Vctype>, lsl %3]"
"&& !CONSTANT_P (operands[4])"
{
@@ -4071,7 +4149,7 @@
(match_operand:VNx2DI 3 "const_1_to_3_operand"))]
UNSPEC_PRED_X)
(match_operand:VNx2DI 1 "register_operand" "w")))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"adr\t%0.d, [%1.d, %2.d, sxtw %3]"
"&& (!CONSTANT_P (operands[4]) || !CONSTANT_P (operands[5]))"
{
@@ -4092,7 +4170,7 @@
(match_operand:VNx2DI 3 "const_1_to_3_operand"))]
UNSPEC_PRED_X)
(match_operand:VNx2DI 1 "register_operand" "w")))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"adr\t%0.d, [%1.d, %2.d, uxtw %3]"
"&& !CONSTANT_P (operands[5])"
{
@@ -7099,21 +7177,25 @@
)
;; Four-element integer dot-product by selected lanes with accumulation.
-(define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
+(define_insn "@aarch64_<sur>dot_prod_lane<SVE_FULL_SDI:mode><SVE_FULL_BHI:mode>"
[(set (match_operand:SVE_FULL_SDI 0 "register_operand")
(plus:SVE_FULL_SDI
(unspec:SVE_FULL_SDI
- [(match_operand:<VSI2QI> 1 "register_operand")
- (unspec:<VSI2QI>
- [(match_operand:<VSI2QI> 2 "register_operand")
+ [(match_operand:SVE_FULL_BHI 1 "register_operand")
+ (unspec:SVE_FULL_BHI
+ [(match_operand:SVE_FULL_BHI 2 "register_operand")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
DOTPROD)
(match_operand:SVE_FULL_SDI 4 "register_operand")))]
- "TARGET_SVE"
- {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
- [ w , w , <sve_lane_con> , 0 ; * ] <sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
- [ ?&w , w , <sve_lane_con> , w ; yes ] movprfx\t%0, %4\;<sur>dot\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
+ "TARGET_SVE
+ && (<SVE_FULL_SDI:elem_bits> == <SVE_FULL_BHI:elem_bits> * 4
+ || (TARGET_STREAMING_SME2
+ && <SVE_FULL_SDI:elem_bits> == 32
+ && <SVE_FULL_BHI:elem_bits> == 16))"
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ w , w , <SVE_FULL_SDI:sve_lane_con> , 0 ; * ] <sur>dot\t%0.<SVE_FULL_SDI:Vetype>, %1.<SVE_FULL_BHI:Vetype>, %2.<SVE_FULL_BHI:Vetype>[%3]
+ [ ?&w , w , <SVE_FULL_SDI:sve_lane_con> , w ; yes ] movprfx\t%0, %4\;<sur>dot\t%0.<SVE_FULL_SDI:Vetype>, %1.<SVE_FULL_BHI:Vetype>, %2.<SVE_FULL_BHI:Vetype>[%3]
}
)
@@ -7132,13 +7214,13 @@
}
)
-(define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
+(define_insn "@aarch64_<sur>dot_prod_lane<VNx4SI_ONLY:mode><VNx16QI_ONLY:mode>"
[(set (match_operand:VNx4SI_ONLY 0 "register_operand")
(plus:VNx4SI_ONLY
(unspec:VNx4SI_ONLY
- [(match_operand:<VSI2QI> 1 "register_operand")
- (unspec:<VSI2QI>
- [(match_operand:<VSI2QI> 2 "register_operand")
+ [(match_operand:VNx16QI_ONLY 1 "register_operand")
+ (unspec:VNx16QI_ONLY
+ [(match_operand:VNx16QI_ONLY 2 "register_operand")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
DOTPROD_I8MM)
@@ -7197,7 +7279,7 @@
(match_operand:<VSI2QI> 3 "register_operand")]
MATMUL)
(match_operand:VNx4SI_ONLY 1 "register_operand")))]
- "TARGET_SVE_I8MM"
+ "TARGET_SVE_I8MM && TARGET_NON_STREAMING"
{@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
[ w , 0 , w , w ; * ] <sur>mmla\t%0.s, %2.b, %3.b
[ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sur>mmla\t%0.s, %2.b, %3.b
@@ -7724,6 +7806,8 @@
;; - BFDOT (BF16)
;; - BFMLALB (BF16)
;; - BFMLALT (BF16)
+;; - BFMLSLB (SME2)
+;; - BFMLSLT (SME2)
;; - BFMMLA (BF16)
;; -------------------------------------------------------------------------
@@ -7772,7 +7856,7 @@
(match_operand:SVE_MATMULF 3 "register_operand")
(match_operand:SVE_MATMULF 1 "register_operand")]
FMMLA))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
[ w , 0 , w , w ; * ] <sve_fp_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
[ ?&w , w , w , w ; yes ] movprfx\t%0, %1\;<sve_fp_op>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
@@ -8205,11 +8289,18 @@
;; - WHILEWR (SVE2)
;; -------------------------------------------------------------------------
+(define_constants [
+ (SVE_WHILE_B 0)
+ (SVE_WHILE_B_X2 1)
+ (SVE_WHILE_C 2)
+])
+
;; Set element I of the result if (cmp (plus operand1 J) operand2) is
;; true for all J in [0, I].
(define_insn "@while_<while_optab_cmp><GPI:mode><PRED_ALL:mode>"
[(set (match_operand:PRED_ALL 0 "register_operand" "=Upa")
- (unspec:PRED_ALL [(match_operand:GPI 1 "aarch64_reg_or_zero" "rZ")
+ (unspec:PRED_ALL [(const_int SVE_WHILE_B)
+ (match_operand:GPI 1 "aarch64_reg_or_zero" "rZ")
(match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")]
SVE_WHILE))
(clobber (reg:CC_NZC CC_REGNUM))]
@@ -8227,12 +8318,14 @@
(match_operand 4)
(const_int SVE_KNOWN_PTRUE)
(unspec:PRED_ALL
- [(match_operand:GPI 1 "aarch64_reg_or_zero" "rZ")
+ [(const_int SVE_WHILE_B)
+ (match_operand:GPI 1 "aarch64_reg_or_zero" "rZ")
(match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")]
SVE_WHILE)]
UNSPEC_PTEST))
(set (match_operand:PRED_ALL 0 "register_operand" "=Upa")
- (unspec:PRED_ALL [(match_dup 1)
+ (unspec:PRED_ALL [(const_int SVE_WHILE_B)
+ (match_dup 1)
(match_dup 2)]
SVE_WHILE))]
"TARGET_SVE"
@@ -8254,7 +8347,8 @@
(match_operand 4)
(const_int SVE_KNOWN_PTRUE)
(unspec:PRED_ALL
- [(match_operand:GPI 1 "aarch64_reg_or_zero" "rZ")
+ [(const_int SVE_WHILE_B)
+ (match_operand:GPI 1 "aarch64_reg_or_zero" "rZ")
(match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")]
SVE_WHILE)]
UNSPEC_PTEST))
@@ -8841,7 +8935,7 @@
(match_operand:<VEL> 1 "register_operand")
(match_operand:SVE_FULL_F 2 "register_operand")]
UNSPEC_FADDA))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{
operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
@@ -8854,7 +8948,7 @@
(match_operand:<VEL> 1 "register_operand" "0")
(match_operand:SVE_FULL_F 2 "register_operand" "w")]
UNSPEC_FADDA))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"fadda\t%<Vetype>0, %3, %<Vetype>0, %2.<Vetype>"
)
@@ -8908,7 +9002,7 @@
[(match_operand:<VPRED> 1 "register_operand" "Upl")
(match_operand:SVE_FULL_SD 2 "register_operand" "w")]
UNSPEC_SVE_COMPACT))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
"compact\t%0.<Vetype>, %1, %2.<Vetype>"
)
diff --git a/gcc/config/aarch64/aarch64-sve2.md b/gcc/config/aarch64/aarch64-sve2.md
index ffa964d..29c41ca 100644
--- a/gcc/config/aarch64/aarch64-sve2.md
+++ b/gcc/config/aarch64/aarch64-sve2.md
@@ -25,12 +25,24 @@
;; ---- Non-temporal gather loads
;; ---- Non-temporal scatter stores
;;
+;; == Predicate manipulation
+;; ---- [PRED] Predicate-as-counter PTRUE
+;; ---- [PRED] Predicate extraction
+;; ---- [PRED] Predicate selection
+;; ---- [PRED] Predicate count
+;;
+;; == Uniform unary arithmnetic
+;; ---- [FP] Multi-register unary operations
+;;
;; == Uniform binary arithmnetic
+;; ---- [INT] Multi-register operations
+;; ---- [INT] Clamp to minimum/maximum
;; ---- [INT] Multiplication
;; ---- [INT] Scaled high-part multiplication
;; ---- [INT] General binary arithmetic that maps to unspecs
;; ---- [INT] Saturating binary arithmetic
;; ---- [INT] Saturating left shifts
+;; ---- [FP] Clamp to minimum/maximum
;;
;; == Uniform ternary arithmnetic
;; ---- [INT] General ternary arithmetic that maps to unspecs
@@ -42,16 +54,20 @@
;; ---- [INT] Sum of absolute differences
;;
;; == Extending arithmetic
+;; ---- [INT] Multi-register widening conversions
;; ---- [INT] Wide binary arithmetic
;; ---- [INT] Long binary arithmetic
;; ---- [INT] Long left shifts
;; ---- [INT] Long binary arithmetic with accumulation
+;; ---- [FP] Multi-register operations
;; ---- [FP] Long multiplication with accumulation
;;
;; == Narrowing arithnetic
;; ---- [INT] Narrowing unary arithmetic
+;; ---- [INT] Multi-vector narrowing unary arithmetic
;; ---- [INT] Narrowing binary arithmetic
;; ---- [INT] Narrowing right shifts
+;; ---- [INT] Multi-vector narrowing right shifts
;;
;; == Pairwise arithmetic
;; ---- [INT] Pairwise arithmetic
@@ -66,14 +82,23 @@
;; == Conversions
;; ---- [FP<-FP] Widening conversions
;; ---- [FP<-FP] Narrowing conversions
+;; ---- [FP<-FP] Multi-vector narrowing conversions
+;; ---- [FP<-INT] Multi-vector conversions
+;; ---- [INT<-FP] Multi-vector conversions
;;
;; == Other arithmetic
;; ---- [INT] Reciprocal approximation
;; ---- [INT<-FP] Base-2 logarithm
;; ---- [INT] Polynomial multiplication
;;
+;; == Comparisons and selects
+;; ---- [INT,FP] Select based on predicates as counters
+;; ---- [INT] While tests
+;;
;; == Permutation
+;; ---- [INT,FP] Reversal
;; ---- [INT,FP] General permutes
+;; ---- [INT,FP] Multi-register permutes
;; ---- [INT] Optional bit-permute extensions
;;
;; == General
@@ -109,7 +134,7 @@
(match_operand:<V_INT_EQUIV> 3 "register_operand")
(mem:BLK (scratch))]
UNSPEC_LDNT1_GATHER))]
- "TARGET_SVE2"
+ "TARGET_SVE2 && TARGET_NON_STREAMING"
{@ [cons: =0, 1, 2, 3]
[&w, Upl, Z, w ] ldnt1<Vesize>\t%0.<Vetype>, %1/z, [%3.<Vetype>]
[?w, Upl, Z, 0 ] ^
@@ -132,6 +157,7 @@
UNSPEC_LDNT1_GATHER))]
UNSPEC_PRED_X))]
"TARGET_SVE2
+ && TARGET_NON_STREAMING
&& (~<SVE_FULL_SDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
{@ [cons: =0, 1, 2, 3, 4]
[&w, Upl, Z, w, UplDnm] ldnt1<ANY_EXTEND:s><SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_SDI:Vetype>, %1/z, [%3.<SVE_FULL_SDI:Vetype>]
@@ -165,7 +191,7 @@
(match_operand:SVE_FULL_SD 3 "register_operand")]
UNSPEC_STNT1_SCATTER))]
- "TARGET_SVE"
+ "TARGET_SVE && TARGET_NON_STREAMING"
{@ [ cons: 0 , 1 , 2 , 3 ]
[ Upl , Z , w , w ] stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>]
[ Upl , r , w , w ] stnt1<Vesize>\t%3.<Vetype>, %0, [%2.<Vetype>, %1]
@@ -183,6 +209,7 @@
(match_operand:SVE_FULL_SDI 3 "register_operand"))]
UNSPEC_STNT1_SCATTER))]
"TARGET_SVE2
+ && TARGET_NON_STREAMING
&& (~<SVE_FULL_SDI:narrower_mask> & <SVE_PARTIAL_I:self_mask>) == 0"
{@ [ cons: 0 , 1 , 2 , 3 ]
[ Upl , Z , w , w ] stnt1<SVE_PARTIAL_I:Vesize>\t%3.<SVE_FULL_SDI:Vetype>, %0, [%2.<SVE_FULL_SDI:Vetype>]
@@ -191,10 +218,256 @@
)
;; =========================================================================
+;; == Predicate manipulation
+;; =========================================================================
+
+;; -------------------------------------------------------------------------
+;; ---- [PRED] Predicate-as-counter PTRUE
+;; -------------------------------------------------------------------------
+;; - PTRUE (predicate-as-counter form)
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sve_ptrue_c<BHSD_BITS>"
+ [(set (match_operand:VNx16BI 0 "register_operand" "=Uph")
+ (unspec:VNx16BI [(const_int BHSD_BITS)] UNSPEC_PTRUE_C))]
+ "TARGET_STREAMING_SME2"
+ "ptrue\t%K0.<bits_etype>"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [PRED] Predicate extraction
+;; -------------------------------------------------------------------------
+;; Includes
+;; - PEXT
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sve_pext<BHSD_BITS>"
+ [(set (match_operand:VNx16BI 0 "register_operand" "=Upa")
+ (unspec:VNx16BI
+ [(match_operand:VNx16BI 1 "register_operand" "Uph")
+ (match_operand:DI 2 "const_int_operand")
+ (const_int BHSD_BITS)]
+ UNSPEC_PEXT))]
+ "TARGET_STREAMING_SME2"
+ "pext\t%0.<bits_etype>, %K1[%2]"
+)
+
+(define_insn "@aarch64_sve_pext<BHSD_BITS>x2"
+ [(set (match_operand:VNx32BI 0 "register_operand" "=Up2")
+ (unspec:VNx32BI
+ [(match_operand:VNx16BI 1 "register_operand" "Uph")
+ (match_operand:DI 2 "const_int_operand")
+ (const_int BHSD_BITS)]
+ UNSPEC_PEXTx2))]
+ "TARGET_STREAMING_SME2"
+ "pext\t{%S0.<bits_etype>, %T0.<bits_etype>}, %K1[%2]"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [PRED] Predicate selection
+;; -------------------------------------------------------------------------
+;; Includes
+;; - PSEL
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sve_psel<BHSD_BITS>"
+ [(set (match_operand:VNx16BI 0 "register_operand" "=Upa")
+ (unspec:VNx16BI
+ [(match_operand:VNx16BI 1 "register_operand" "Upa")
+ (match_operand:VNx16BI 2 "register_operand" "Upa")
+ (match_operand:SI 3 "register_operand" "Ucj")
+ (const_int BHSD_BITS)]
+ UNSPEC_PSEL))]
+ "TARGET_STREAMING_SME2"
+ "psel\t%0, %1, %2.<bits_etype>[%w3, 0]"
+)
+
+(define_insn "*aarch64_sve_psel<BHSD_BITS>_plus"
+ [(set (match_operand:VNx16BI 0 "register_operand" "=Upa")
+ (unspec:VNx16BI
+ [(match_operand:VNx16BI 1 "register_operand" "Upa")
+ (match_operand:VNx16BI 2 "register_operand" "Upa")
+ (plus:SI
+ (match_operand:SI 3 "register_operand" "Ucj")
+ (match_operand:SI 4 "const_int_operand"))
+ (const_int BHSD_BITS)]
+ UNSPEC_PSEL))]
+ "TARGET_STREAMING_SME2
+ && UINTVAL (operands[4]) < 128 / <BHSD_BITS>"
+ "psel\t%0, %1, %2.<bits_etype>[%w3, %4]"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [PRED] Predicate count
+;; -------------------------------------------------------------------------
+;; Includes
+;; - CNTP (predicate as counter)
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sve_cntp_c<BHSD_BITS>"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI
+ [(match_operand:VNx16BI 1 "register_operand" "Upa")
+ (match_operand:DI 2 "const_int_operand")
+ (const_int BHSD_BITS)]
+ UNSPEC_CNTP_C))]
+ "TARGET_STREAMING_SME2"
+ "cntp\t%x0, %K1.<bits_etype>, vlx%2"
+)
+
+;; =========================================================================
+;; == Uniform unary arithmnetic
+;; =========================================================================
+
+;; -------------------------------------------------------------------------
+;; ---- [FP] Multi-register unary operations
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - FRINTA
+;; - FRINTM
+;; - FRINTN
+;; - FRINTP
+;; -------------------------------------------------------------------------
+
+(define_insn "<frint_pattern><mode>2"
+ [(set (match_operand:SVE_SFx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_SFx24
+ [(match_operand:SVE_SFx24 1 "aligned_register_operand" "Uw<vector_count>")]
+ SVE2_SFx24_UNARY))]
+ "TARGET_STREAMING_SME2"
+ "frint<frint_suffix>\t%0, %1"
+)
+
+;; =========================================================================
;; == Uniform binary arithmnetic
;; =========================================================================
;; -------------------------------------------------------------------------
+;; ---- [INT] Multi-register operations
+;; -------------------------------------------------------------------------
+;; Includes the multi-register forms of:
+;; - ADD
+;; - SMAX
+;; - SMIN
+;; - SQMULH
+;; - SRSHL
+;; - UMAX
+;; - UMIN
+;; - URSHL
+;; -------------------------------------------------------------------------
+
+(define_expand "<optab><mode>3"
+ [(set (match_operand:SVE_Ix24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (SVE_INT_BINARY_MULTI:SVE_Ix24
+ (match_operand:SVE_Ix24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SVE_Ix24 2 "aligned_register_operand" "Uw<vector_count>")))]
+ "TARGET_STREAMING_SME2"
+)
+
+(define_insn "*<optab><mode>3"
+ [(set (match_operand:SVE_Ix24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (SVE_INT_BINARY_MULTI:SVE_Ix24
+ (match_operand:SVE_Ix24 1 "aligned_register_operand" "%0")
+ (match_operand:SVE_Ix24 2 "aligned_register_operand" "Uw<vector_count>")))]
+ "TARGET_STREAMING_SME2"
+ "<sve_int_op>\t%0, %0, %2"
+)
+
+(define_insn "@aarch64_sve_single_<optab><mode>"
+ [(set (match_operand:SVE_Ix24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (SVE_INT_BINARY_SINGLE:SVE_Ix24
+ (match_operand:SVE_Ix24 1 "aligned_register_operand" "0")
+ (vec_duplicate:SVE_Ix24
+ (match_operand:<VSINGLE> 2 "register_operand" "x"))))]
+ "TARGET_STREAMING_SME2"
+ "<sve_int_op>\t%0, %0, %2.<Vetype>"
+)
+
+(define_insn "@aarch64_sve_<sve_int_op><mode>"
+ [(set (match_operand:SVE_Ix24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_Ix24
+ [(match_operand:SVE_Ix24 1 "aligned_register_operand" "%0")
+ (match_operand:SVE_Ix24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SVE_INT_BINARY_MULTI))]
+ "TARGET_STREAMING_SME2"
+ "<sve_int_op>\t%0, %0, %2"
+)
+
+(define_insn "@aarch64_sve_single_<sve_int_op><mode>"
+ [(set (match_operand:SVE_Ix24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_Ix24
+ [(match_operand:SVE_Ix24 1 "aligned_register_operand" "0")
+ (vec_duplicate:SVE_Ix24
+ (match_operand:<VSINGLE> 2 "register_operand" "x"))]
+ SVE_INT_BINARY_MULTI))]
+ "TARGET_STREAMING_SME2"
+ "<sve_int_op>\t%0, %0, %2.<Vetype>"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [INT] Clamp to minimum/maximum
+;; -------------------------------------------------------------------------
+;; - SCLAMP
+;; - UCLAMP
+;; -------------------------------------------------------------------------
+
+;; The minimum is applied after the maximum, which matters if the maximum
+;; bound is (unexpectedly) less than the minimum bound.
+(define_insn "@aarch64_sve_<su>clamp<mode>"
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (<max_opp>:SVE_FULL_I
+ (USMAX:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand"))
+ (match_operand:SVE_FULL_I 3 "register_operand")))]
+ "TARGET_STREAMING_SME"
+ {@ [cons: =0, 1, 2, 3; attrs: movprfx]
+ [ w, %0, w, w; * ] <su>clamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ [ ?&w, w, w, w; yes ] movprfx\t%0, %1\;<su>clamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ }
+)
+
+(define_insn_and_split "*aarch64_sve_<su>clamp<mode>_x"
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
+ [(match_operand 4)
+ (<max_opp>:SVE_FULL_I
+ (unspec:SVE_FULL_I
+ [(match_operand 5)
+ (USMAX:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand"))]
+ UNSPEC_PRED_X)
+ (match_operand:SVE_FULL_I 3 "register_operand"))]
+ UNSPEC_PRED_X))]
+ "TARGET_STREAMING_SME"
+ {@ [cons: =0, 1, 2, 3; attrs: movprfx]
+ [ w, %0, w, w; * ] #
+ [ ?&w, w, w, w; yes ] #
+ }
+ "&& true"
+ [(set (match_dup 0)
+ (<max_opp>:SVE_FULL_I
+ (USMAX:SVE_FULL_I
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))]
+)
+
+(define_insn "@aarch64_sve_<su>clamp_single<mode>"
+ [(set (match_operand:SVE_Ix24 0 "register_operand" "=Uw<vector_count>")
+ (<max_opp>:SVE_Ix24
+ (USMAX:SVE_Ix24
+ (match_operand:SVE_Ix24 1 "register_operand" "0")
+ (vec_duplicate:SVE_Ix24
+ (match_operand:<VSINGLE> 2 "register_operand" "w")))
+ (vec_duplicate:SVE_Ix24
+ (match_operand:<VSINGLE> 3 "register_operand" "w"))))]
+ "TARGET_STREAMING_SME2"
+ "<su>clamp\t%0, %2.<Vetype>, %3.<Vetype>"
+)
+
+;; -------------------------------------------------------------------------
;; ---- [INT] Multiplication
;; -------------------------------------------------------------------------
;; Includes the lane and unpredicated forms of:
@@ -687,6 +960,74 @@
[(set_attr "movprfx" "yes")]
)
+;; -------------------------------------------------------------------------
+;; ---- [FP] Clamp to minimum/maximum
+;; -------------------------------------------------------------------------
+;; - FCLAMP
+;; -------------------------------------------------------------------------
+
+;; The minimum is applied after the maximum, which matters if the maximum
+;; bound is (unexpectedly) less than the minimum bound.
+(define_insn "@aarch64_sve_fclamp<mode>"
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
+ [(unspec:SVE_FULL_F
+ [(match_operand:SVE_FULL_F 1 "register_operand")
+ (match_operand:SVE_FULL_F 2 "register_operand")]
+ UNSPEC_FMAXNM)
+ (match_operand:SVE_FULL_F 3 "register_operand")]
+ UNSPEC_FMINNM))]
+ "TARGET_STREAMING_SME"
+ {@ [cons: =0, 1, 2, 3; attrs: movprfx]
+ [ w, %0, w, w; * ] fclamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ [ ?&w, w, w, w; yes ] movprfx\t%0, %1\;fclamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
+ }
+)
+
+(define_insn_and_split "*aarch64_sve_fclamp<mode>_x"
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
+ [(match_operand 4)
+ (const_int SVE_RELAXED_GP)
+ (unspec:SVE_FULL_F
+ [(match_operand 5)
+ (const_int SVE_RELAXED_GP)
+ (match_operand:SVE_FULL_F 1 "register_operand")
+ (match_operand:SVE_FULL_F 2 "register_operand")]
+ UNSPEC_COND_FMAXNM)
+ (match_operand:SVE_FULL_F 3 "register_operand")]
+ UNSPEC_COND_FMINNM))]
+ "TARGET_STREAMING_SME"
+ {@ [cons: =0, 1, 2, 3; attrs: movprfx]
+ [ w, %0, w, w; * ] #
+ [ ?&w, w, w, w; yes ] #
+ }
+ "&& true"
+ [(set (match_dup 0)
+ (unspec:SVE_FULL_F
+ [(unspec:SVE_FULL_F
+ [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_FMAXNM)
+ (match_dup 3)]
+ UNSPEC_FMINNM))]
+)
+
+(define_insn "@aarch64_sve_fclamp_single<mode>"
+ [(set (match_operand:SVE_Fx24 0 "register_operand" "=Uw<vector_count>")
+ (unspec:SVE_Fx24
+ [(unspec:SVE_Fx24
+ [(match_operand:SVE_Fx24 1 "register_operand" "0")
+ (vec_duplicate:SVE_Fx24
+ (match_operand:<VSINGLE> 2 "register_operand" "w"))]
+ UNSPEC_FMAXNM)
+ (vec_duplicate:SVE_Fx24
+ (match_operand:<VSINGLE> 3 "register_operand" "w"))]
+ UNSPEC_FMINNM))]
+ "TARGET_STREAMING_SME2"
+ "fclamp\t%0, %2.<Vetype>, %3.<Vetype>"
+)
+
;; =========================================================================
;; == Uniform ternary arithmnetic
;; =========================================================================
@@ -1255,6 +1596,30 @@
;; =========================================================================
;; -------------------------------------------------------------------------
+;; ---- [INT] Multi-register widening conversions
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - SUNPK
+;; - UUNPK
+;; -------------------------------------------------------------------------
+
+(define_insn "<optab><mode><v2xwide>2"
+ [(set (match_operand:<V2XWIDE> 0 "aligned_register_operand" "=Uw2")
+ (ANY_EXTEND:<V2XWIDE>
+ (match_operand:SVE_FULL_BHSI 1 "register_operand" "w")))]
+ "TARGET_STREAMING_SME2"
+ "<su>unpk\t%0, %1.<Vetype>"
+)
+
+(define_insn "<optab><mode><v2xwide>2"
+ [(set (match_operand:<V2XWIDE> 0 "aligned_register_operand" "=Uw4")
+ (ANY_EXTEND:<V2XWIDE>
+ (match_operand:SVE_FULL_BHSIx2 1 "aligned_register_operand" "Uw2")))]
+ "TARGET_STREAMING_SME2"
+ "<su>unpk\t%0, %1"
+)
+
+;; -------------------------------------------------------------------------
;; ---- [INT] Wide binary arithmetic
;; -------------------------------------------------------------------------
;; Includes:
@@ -1355,6 +1720,7 @@
;; Includes:
;; - SABALB
;; - SABALT
+;; - SDOT (SME2 or SVE2p1)
;; - SMLALB
;; - SMLALT
;; - SMLSLB
@@ -1367,6 +1733,7 @@
;; - SQDMLSLT
;; - UABALB
;; - UABALT
+;; - UDOT (SME2 or SVE2p1)
;; - UMLALB
;; - UMLALT
;; - UMLSLB
@@ -1512,10 +1879,68 @@
[ ?&w , w , w , <sve_lane_con> ; yes ] movprfx\t%0, %1\;<sve_int_qsub_op>\t%0.<Vetype>, %2.<Ventype>, %3.<Ventype>[%4]
}
)
+
+;; Two-way dot-product.
+(define_insn "@aarch64_sve_<sur>dotvnx4sivnx8hi"
+ [(set (match_operand:VNx4SI 0 "register_operand")
+ (plus:VNx4SI
+ (unspec:VNx4SI
+ [(match_operand:VNx8HI 1 "register_operand")
+ (match_operand:VNx8HI 2 "register_operand")]
+ DOTPROD)
+ (match_operand:VNx4SI 3 "register_operand")))]
+ "TARGET_STREAMING_SME2"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , w , w , 0 ; * ] <sur>dot\t%0.s, %1.h, %2.h
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %3\;<sur>dot\t%0.s, %1.h, %2.h
+ }
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [FP] Multi-register operations
+;; -------------------------------------------------------------------------
+;; Includes the multi-register forms of:
+;; - FMAX
+;; - FMAXNM
+;; - FMIN
+;; - FMINNM
+;; -------------------------------------------------------------------------
+
+(define_expand "@aarch64_sve_<maxmin_uns_op><mode>"
+ [(set (match_operand:SVE_Fx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_Fx24
+ [(match_operand:SVE_Fx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SVE_Fx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SVE_FP_BINARY_MULTI))]
+ "TARGET_STREAMING_SME2"
+)
+
+(define_insn "*aarch64_sve_<maxmin_uns_op><mode>"
+ [(set (match_operand:SVE_Fx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_Fx24
+ [(match_operand:SVE_Fx24 1 "aligned_register_operand" "%0")
+ (match_operand:SVE_Fx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ SVE_FP_BINARY_MULTI))]
+ "TARGET_STREAMING_SME2"
+ "<maxmin_uns_op>\t%0, %0, %2"
+)
+
+(define_insn "@aarch64_sve_single_<maxmin_uns_op><mode>"
+ [(set (match_operand:SVE_Fx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (unspec:SVE_Fx24
+ [(match_operand:SVE_Fx24 1 "aligned_register_operand" "0")
+ (vec_duplicate:SVE_Fx24
+ (match_operand:<VSINGLE> 2 "register_operand" "x"))]
+ SVE_FP_BINARY_MULTI))]
+ "TARGET_STREAMING_SME2"
+ "<maxmin_uns_op>\t%0, %0, %2.<Vetype>"
+)
+
;; -------------------------------------------------------------------------
;; ---- [FP] Long multiplication with accumulation
;; -------------------------------------------------------------------------
;; Includes:
+;; - FDOT (SME2 or SVE2p1)
;; - FMLALB
;; - FMLALT
;; - FMLSLB
@@ -1553,6 +1978,40 @@
}
)
+;; Two-way dot-product.
+(define_insn "aarch64_sve_fdotvnx4sfvnx8hf"
+ [(set (match_operand:VNx4SF 0 "register_operand")
+ (plus:VNx4SF
+ (unspec:VNx4SF
+ [(match_operand:VNx8HF 1 "register_operand")
+ (match_operand:VNx8HF 2 "register_operand")]
+ UNSPEC_FDOT)
+ (match_operand:VNx4SF 3 "register_operand")))]
+ "TARGET_STREAMING_SME2"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , w , w , 0 ; * ] fdot\t%0.s, %1.h, %2.h
+ [ ?&w , w , w , w ; yes ] movprfx\t%0, %3\;fdot\t%0.s, %1.h, %2.h
+ }
+)
+
+(define_insn "aarch64_fdot_prod_lanevnx4sfvnx8hf"
+ [(set (match_operand:VNx4SF 0 "register_operand")
+ (plus:VNx4SF
+ (unspec:VNx4SF
+ [(match_operand:VNx8HF 1 "register_operand")
+ (unspec:VNx8HF
+ [(match_operand:VNx8HF 2 "register_operand")
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPEC_SVE_LANE_SELECT)]
+ UNSPEC_FDOT)
+ (match_operand:VNx4SF 4 "register_operand")))]
+ "TARGET_STREAMING_SME2"
+ {@ [ cons: =0 , 1 , 2 , 4 ; attrs: movprfx ]
+ [ w , w , y , 0 ; * ] fdot\t%0.s, %1.h, %2.h[%3]
+ [ ?&w , w , y , w ; yes ] movprfx\t%0, %4\;fdot\t%0.s, %1.h, %2.h[%3]
+ }
+)
+
;; =========================================================================
;; == Narrowing arithnetic
;; =========================================================================
@@ -1590,6 +2049,43 @@
)
;; -------------------------------------------------------------------------
+;; ---- [INT] Multi-vector narrowing unary arithmetic
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - SQCVT
+;; - SQCVTN
+;; - UQCVT
+;; - UQCVTN
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sve_<optab><VNx16QI_ONLY:mode><VNx16SI_ONLY:mode>"
+ [(set (match_operand:VNx16QI_ONLY 0 "register_operand" "=w")
+ (unspec:VNx16QI_ONLY
+ [(match_operand:VNx16SI_ONLY 1 "aligned_register_operand" "Uw<vector_count>")]
+ SVE_QCVTxN))]
+ "TARGET_SME2 && TARGET_STREAMING"
+ "<optab>\t%0.b, %1"
+)
+
+(define_insn "@aarch64_sve_<optab><VNx8HI_ONLY:mode><VNx8SI_ONLY:mode>"
+ [(set (match_operand:VNx8HI_ONLY 0 "register_operand" "=w")
+ (unspec:VNx8HI_ONLY
+ [(match_operand:VNx8SI_ONLY 1 "aligned_register_operand" "Uw<vector_count>")]
+ SVE_QCVTxN))]
+ "TARGET_SME2 && TARGET_STREAMING"
+ "<optab>\t%0.h, %1"
+)
+
+(define_insn "@aarch64_sve_<optab><VNx8HI_ONLY:mode><VNx8DI_ONLY:mode>"
+ [(set (match_operand:VNx8HI_ONLY 0 "register_operand" "=w")
+ (unspec:VNx8HI_ONLY
+ [(match_operand:VNx8DI_ONLY 1 "aligned_register_operand" "Uw<vector_count>")]
+ SVE_QCVTxN))]
+ "TARGET_SME2 && TARGET_STREAMING"
+ "<optab>\t%0.h, %1"
+)
+
+;; -------------------------------------------------------------------------
;; ---- [INT] Narrowing binary arithmetic
;; -------------------------------------------------------------------------
;; Includes:
@@ -1687,6 +2183,20 @@
"<sve_int_op>\t%0.<Ventype>, %2.<Vetype>, #%3"
)
+;; -------------------------------------------------------------------------
+;; ---- [INT] Multi-vector narrowing right shifts
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sve_<sve_int_op><mode>"
+ [(set (match_operand:<VNARROW> 0 "register_operand" "=w")
+ (unspec:<VNARROW>
+ [(match_operand:SVE_FULL_SIx2_SDIx4 1 "register_operand" "Uw<vector_count>")
+ (match_operand:DI 2 "const_int_operand")]
+ SVE2_INT_SHIFT_IMM_NARROWxN))]
+ "TARGET_STREAMING_SME2"
+ "<sve_int_op>\t%0.<Ventype>, %1, #%2"
+)
+
;; =========================================================================
;; == Pairwise arithmetic
;; =========================================================================
@@ -2160,6 +2670,57 @@
"fcvtxnt\t%0.<Ventype>, %2/m, %3.<Vetype>"
)
+;; -------------------------------------------------------------------------
+;; ---- [FP<-FP] Multi-vector narrowing conversions
+;; -------------------------------------------------------------------------
+;; Includes the multi-register forms of:
+;; - BFCVT
+;; - BFCVTN
+;; - FCVT
+;; - FCVTN
+;; -------------------------------------------------------------------------
+
+(define_insn "truncvnx8sf<mode>2"
+ [(set (match_operand:SVE_FULL_HF 0 "register_operand" "=w")
+ (float_truncate:SVE_FULL_HF
+ (match_operand:VNx8SF 1 "aligned_register_operand" "Uw2")))]
+ "TARGET_STREAMING_SME2"
+ "<b>fcvt\t%0.h, %1"
+)
+
+(define_insn "@aarch64_sve_cvtn<mode>"
+ [(set (match_operand:SVE_FULL_HF 0 "register_operand" "=w")
+ (unspec:SVE_FULL_HF
+ [(match_operand:VNx8SF 1 "aligned_register_operand" "Uw2")]
+ UNSPEC_FCVTN))]
+ "TARGET_STREAMING_SME2"
+ "<b>fcvtn\t%0.h, %1"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [FP<-INT] Multi-vector conversions
+;; -------------------------------------------------------------------------
+
+(define_insn "<optab><v_int_equiv><mode>2"
+ [(set (match_operand:SVE_SFx24 0 "aligned_register_operand" "=Uw<vector_count>")
+ (FLOATUORS:SVE_SFx24
+ (match_operand:<V_INT_EQUIV> 1 "aligned_register_operand" "Uw<vector_count>")))]
+ "TARGET_STREAMING_SME2"
+ "<su_optab>cvtf\t%0, %1"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [INT<-FP] Multi-vector conversions
+;; -------------------------------------------------------------------------
+
+(define_insn "<optab><mode><v_int_equiv>2"
+ [(set (match_operand:<V_INT_EQUIV> 0 "aligned_register_operand" "=Uw<vector_count>")
+ (FIXUORS:<V_INT_EQUIV>
+ (match_operand:SVE_SFx24 1 "aligned_register_operand" "Uw<vector_count>")))]
+ "TARGET_STREAMING_SME2"
+ "fcvtz<su>\t%0, %1"
+)
+
;; =========================================================================
;; == Other arithmetic
;; =========================================================================
@@ -2356,10 +2917,108 @@
)
;; =========================================================================
+;; == Comparisons and selects
+;; =========================================================================
+
+;; -------------------------------------------------------------------------
+;; ---- [INT,FP] Select based on predicates as counters
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sve_sel<mode>"
+ [(set (match_operand:SVE_FULLx24 0 "register_operand" "=Uw<vector_count>")
+ (unspec:SVE_FULLx24
+ [(match_operand:<VPRED> 3 "register_operand" "Uph")
+ (match_operand:SVE_FULLx24 1 "aligned_register_operand" "Uw<vector_count>")
+ (match_operand:SVE_FULLx24 2 "aligned_register_operand" "Uw<vector_count>")]
+ UNSPEC_SEL))]
+ "TARGET_STREAMING_SME2"
+ "sel\t%0, %K3, %1, %2"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [INT] While tests
+;; -------------------------------------------------------------------------
+;; Includes the x2 and count versions of:
+;; - WHILEGE
+;; - WHILEGT
+;; - WHILEHI
+;; - WHILEHS
+;; - WHILELE
+;; - WHILELO
+;; - WHILELS
+;; - WHILELT
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sve_while<while_optab_cmp>_b<BHSD_BITS>_x2"
+ [(set (match_operand:VNx32BI 0 "register_operand" "=Up2")
+ (unspec:VNx32BI
+ [(const_int SVE_WHILE_B_X2)
+ (match_operand:DI 1 "aarch64_reg_or_zero" "rZ")
+ (match_operand:DI 2 "aarch64_reg_or_zero" "rZ")
+ (const_int BHSD_BITS)]
+ SVE_WHILE_ORDER))
+ (clobber (reg:CC_NZC CC_REGNUM))]
+ "TARGET_STREAMING_SME2"
+ "while<cmp_op>\t{%S0.<bits_etype>, %T0.<bits_etype>}, %x1, %x2"
+)
+
+(define_insn "@aarch64_sve_while<while_optab_cmp>_c<BHSD_BITS>"
+ [(set (match_operand:VNx16BI 0 "register_operand" "=Uph")
+ (unspec:VNx16BI
+ [(const_int SVE_WHILE_C)
+ (match_operand:DI 1 "aarch64_reg_or_zero" "rZ")
+ (match_operand:DI 2 "aarch64_reg_or_zero" "rZ")
+ (const_int BHSD_BITS)
+ (match_operand:DI 3 "const_int_operand")]
+ SVE_WHILE_ORDER))
+ (clobber (reg:CC_NZC CC_REGNUM))]
+ "TARGET_STREAMING_SME2"
+ "while<cmp_op>\t%K0.<bits_etype>, %x1, %x2, vlx%3"
+)
+
+;; =========================================================================
;; == Permutation
;; =========================================================================
;; -------------------------------------------------------------------------
+;; ---- [INT,FP] Reversal
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - REVD
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_pred_<optab><mode>"
+ [(set (match_operand:SVE_FULL 0 "register_operand")
+ (unspec:SVE_FULL
+ [(match_operand:VNx2BI 1 "register_operand")
+ (unspec:SVE_FULL
+ [(match_operand:SVE_FULL 2 "register_operand")]
+ UNSPEC_REVD_ONLY)]
+ UNSPEC_PRED_X))]
+ "TARGET_STREAMING_SME"
+ {@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
+ [ w , Upl , 0 ; * ] revd\t%0.q, %1/m, %2.q
+ [ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;revd\t%0.q, %1/m, %2.q
+ }
+)
+
+(define_insn "@cond_<optab><mode>"
+ [(set (match_operand:SVE_FULL 0 "register_operand")
+ (unspec:SVE_FULL
+ [(match_operand:VNx2BI 1 "register_operand")
+ (unspec:SVE_FULL
+ [(match_operand:SVE_FULL 2 "register_operand")]
+ UNSPEC_REVD_ONLY)
+ (match_operand:SVE_FULL 3 "register_operand")]
+ UNSPEC_SEL))]
+ "TARGET_STREAMING_SME"
+ {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+ [ w , Upl , w , 0 ; * ] revd\t%0.q, %1/m, %2.q
+ [ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;revd\t%0.q, %1/m, %2.q
+ }
+)
+
+;; -------------------------------------------------------------------------
;; ---- [INT,FP] General permutes
;; -------------------------------------------------------------------------
;; Includes:
@@ -2391,6 +3050,52 @@
)
;; -------------------------------------------------------------------------
+;; ---- [INT,FP] Multi-register permutes
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - ZIP
+;; - UZP
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sve_<optab><mode>"
+ [(set (match_operand:SVE_FULLx2 0 "aligned_register_operand" "=Uw2")
+ (unspec:SVE_FULLx2
+ [(match_operand:<VSINGLE> 1 "register_operand" "w")
+ (match_operand:<VSINGLE> 2 "register_operand" "w")]
+ SVE2_x24_PERMUTE))]
+ "TARGET_STREAMING_SME2"
+ "<perm_insn>\t%0, %1.<Vetype>, %2.<Vetype>"
+)
+
+(define_insn "@aarch64_sve_<optab><mode>"
+ [(set (match_operand:SVE_FULLx2 0 "aligned_register_operand" "=Uw2")
+ (unspec:SVE_FULLx2
+ [(match_operand:<VSINGLE> 1 "register_operand" "w")
+ (match_operand:<VSINGLE> 2 "register_operand" "w")]
+ SVE2_x24_PERMUTEQ))]
+ "TARGET_STREAMING_SME2"
+ "<perm_insn>\t{%S0.q - %T0.q}, %1.q, %2.q"
+)
+
+(define_insn "@aarch64_sve_<optab><mode>"
+ [(set (match_operand:SVE_FULLx4 0 "aligned_register_operand" "=Uw4")
+ (unspec:SVE_FULLx4
+ [(match_operand:SVE_FULLx4 1 "aligned_register_operand" "Uw4")]
+ SVE2_x24_PERMUTE))]
+ "TARGET_STREAMING_SME2"
+ "<perm_insn>\t%0, %1"
+)
+
+(define_insn "@aarch64_sve_<optab><mode>"
+ [(set (match_operand:SVE_FULLx4 0 "aligned_register_operand" "=Uw4")
+ (unspec:SVE_FULLx4
+ [(match_operand:SVE_FULLx4 1 "aligned_register_operand" "Uw4")]
+ SVE2_x24_PERMUTEQ))]
+ "TARGET_STREAMING_SME2"
+ "<perm_insn>\t{%S0.q - %V0.q}, {%S1.q - %V1.q}"
+)
+
+;; -------------------------------------------------------------------------
;; ---- [INT] Optional bit-permute extensions
;; -------------------------------------------------------------------------
;; Includes:
@@ -2469,7 +3174,7 @@
(match_operand:SVE_FULL_SDI 2 "register_operand" "w")
(match_operand:SVE_FULL_SDI 3 "register_operand" "w")]
UNSPEC_HISTCNT))]
- "TARGET_SVE2"
+ "TARGET_SVE2 && TARGET_NON_STREAMING"
"histcnt\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>"
)
@@ -2479,7 +3184,7 @@
[(match_operand:VNx16QI_ONLY 1 "register_operand" "w")
(match_operand:VNx16QI_ONLY 2 "register_operand" "w")]
UNSPEC_HISTSEG))]
- "TARGET_SVE2"
+ "TARGET_SVE2 && TARGET_NON_STREAMING"
"histseg\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
)
@@ -2503,7 +3208,7 @@
SVE2_MATCH)]
UNSPEC_PRED_Z))
(clobber (reg:CC_NZC CC_REGNUM))]
- "TARGET_SVE2"
+ "TARGET_SVE2 && TARGET_NON_STREAMING"
"<sve_int_op>\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
)
@@ -2534,6 +3239,7 @@
SVE2_MATCH)]
UNSPEC_PRED_Z))]
"TARGET_SVE2
+ && TARGET_NON_STREAMING
&& aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
"<sve_int_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>"
"&& !rtx_equal_p (operands[4], operands[6])"
@@ -2561,6 +3267,7 @@
UNSPEC_PTEST))
(clobber (match_scratch:<VPRED> 0 "=Upa"))]
"TARGET_SVE2
+ && TARGET_NON_STREAMING
&& aarch64_sve_same_pred_for_ptest_p (&operands[4], &operands[6])"
"<sve_int_op>\t%0.<Vetype>, %1/z, %2.<Vetype>, %3.<Vetype>"
"&& !rtx_equal_p (operands[4], operands[6])"
diff --git a/gcc/config/aarch64/aarch64-sys-regs.def b/gcc/config/aarch64/aarch64-sys-regs.def
new file mode 100644
index 0000000..d24a245
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-sys-regs.def
@@ -0,0 +1,1064 @@
+/* aarch64-system-regs.def -- AArch64 opcode support.
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of the GNU opcodes library.
+
+ This library is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ It is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not,
+ see <http://www.gnu.org/licenses/>. */
+
+/* Array of system registers and their associated arch features.
+
+ This file is also used by GCC. Where necessary, any updates should
+ be made in Binutils and the updated file copied across to GCC, such
+ that the two projects are kept in sync at all times.
+
+ Before using #include to read this file, define a macro:
+
+ SYSREG (name, encoding, flags, features)
+
+ The NAME is the system register name, as recognized by the
+ assembler. ENCODING provides the necessary information for the binary
+ encoding of the system register. The FLAGS field is a bitmask of
+ relevant behavior information pertaining to the particular register.
+ For example: is it read/write-only? does it alias another register?
+ The FEATURES field maps onto ISA flags and specifies the architectural
+ feature requirements of the system register. */
+
+ SYSREG ("accdata_el1", CPENC (3,0,13,0,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("actlr_el1", CPENC (3,0,1,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("actlr_el2", CPENC (3,4,1,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("actlr_el3", CPENC (3,6,1,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("afsr0_el1", CPENC (3,0,5,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("afsr0_el12", CPENC (3,5,5,1,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("afsr0_el2", CPENC (3,4,5,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("afsr0_el3", CPENC (3,6,5,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("afsr1_el1", CPENC (3,0,5,1,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("afsr1_el12", CPENC (3,5,5,1,1), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("afsr1_el2", CPENC (3,4,5,1,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("afsr1_el3", CPENC (3,6,5,1,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("aidr_el1", CPENC (3,1,0,0,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("allint", CPENC (3,0,4,3,0), F_ARCHEXT, AARCH64_FEATURE (V8_8A))
+ SYSREG ("amair_el1", CPENC (3,0,10,3,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("amair_el12", CPENC (3,5,10,3,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("amair_el2", CPENC (3,4,10,3,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("amair_el3", CPENC (3,6,10,3,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("amcfgr_el0", CPENC (3,3,13,2,1), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amcg1idr_el0", CPENC (3,3,13,2,6), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amcgcr_el0", CPENC (3,3,13,2,2), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amcntenclr0_el0", CPENC (3,3,13,2,4), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amcntenclr1_el0", CPENC (3,3,13,3,0), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amcntenset0_el0", CPENC (3,3,13,2,5), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amcntenset1_el0", CPENC (3,3,13,3,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amcr_el0", CPENC (3,3,13,2,0), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr00_el0", CPENC (3,3,13,4,0), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr01_el0", CPENC (3,3,13,4,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr02_el0", CPENC (3,3,13,4,2), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr03_el0", CPENC (3,3,13,4,3), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr10_el0", CPENC (3,3,13,12,0), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr110_el0", CPENC (3,3,13,13,2), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr111_el0", CPENC (3,3,13,13,3), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr112_el0", CPENC (3,3,13,13,4), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr113_el0", CPENC (3,3,13,13,5), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr114_el0", CPENC (3,3,13,13,6), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr115_el0", CPENC (3,3,13,13,7), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr11_el0", CPENC (3,3,13,12,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr12_el0", CPENC (3,3,13,12,2), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr13_el0", CPENC (3,3,13,12,3), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr14_el0", CPENC (3,3,13,12,4), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr15_el0", CPENC (3,3,13,12,5), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr16_el0", CPENC (3,3,13,12,6), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr17_el0", CPENC (3,3,13,12,7), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr18_el0", CPENC (3,3,13,13,0), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntr19_el0", CPENC (3,3,13,13,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevcntvoff00_el2", CPENC (3,4,13,8,0), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff010_el2", CPENC (3,4,13,9,2), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff011_el2", CPENC (3,4,13,9,3), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff012_el2", CPENC (3,4,13,9,4), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff013_el2", CPENC (3,4,13,9,5), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff014_el2", CPENC (3,4,13,9,6), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff015_el2", CPENC (3,4,13,9,7), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff01_el2", CPENC (3,4,13,8,1), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff02_el2", CPENC (3,4,13,8,2), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff03_el2", CPENC (3,4,13,8,3), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff04_el2", CPENC (3,4,13,8,4), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff05_el2", CPENC (3,4,13,8,5), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff06_el2", CPENC (3,4,13,8,6), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff07_el2", CPENC (3,4,13,8,7), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff08_el2", CPENC (3,4,13,9,0), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff09_el2", CPENC (3,4,13,9,1), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff10_el2", CPENC (3,4,13,10,0), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff110_el2", CPENC (3,4,13,11,2), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff111_el2", CPENC (3,4,13,11,3), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff112_el2", CPENC (3,4,13,11,4), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff113_el2", CPENC (3,4,13,11,5), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff114_el2", CPENC (3,4,13,11,6), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff115_el2", CPENC (3,4,13,11,7), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff11_el2", CPENC (3,4,13,10,1), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff12_el2", CPENC (3,4,13,10,2), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff13_el2", CPENC (3,4,13,10,3), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff14_el2", CPENC (3,4,13,10,4), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff15_el2", CPENC (3,4,13,10,5), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff16_el2", CPENC (3,4,13,10,6), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff17_el2", CPENC (3,4,13,10,7), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff18_el2", CPENC (3,4,13,11,0), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevcntvoff19_el2", CPENC (3,4,13,11,1), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("amevtyper00_el0", CPENC (3,3,13,6,0), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper01_el0", CPENC (3,3,13,6,1), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper02_el0", CPENC (3,3,13,6,2), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper03_el0", CPENC (3,3,13,6,3), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper10_el0", CPENC (3,3,13,14,0), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper110_el0", CPENC (3,3,13,15,2), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper111_el0", CPENC (3,3,13,15,3), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper112_el0", CPENC (3,3,13,15,4), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper113_el0", CPENC (3,3,13,15,5), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper114_el0", CPENC (3,3,13,15,6), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper115_el0", CPENC (3,3,13,15,7), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper11_el0", CPENC (3,3,13,14,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper12_el0", CPENC (3,3,13,14,2), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper13_el0", CPENC (3,3,13,14,3), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper14_el0", CPENC (3,3,13,14,4), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper15_el0", CPENC (3,3,13,14,5), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper16_el0", CPENC (3,3,13,14,6), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper17_el0", CPENC (3,3,13,14,7), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper18_el0", CPENC (3,3,13,15,0), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amevtyper19_el0", CPENC (3,3,13,15,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("amuserenr_el0", CPENC (3,3,13,2,3), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("apdakeyhi_el1", CPENC (3,0,2,2,1), F_ARCHEXT, AARCH64_FEATURE (V8_3A))
+ SYSREG ("apdakeylo_el1", CPENC (3,0,2,2,0), F_ARCHEXT, AARCH64_FEATURE (V8_3A))
+ SYSREG ("apdbkeyhi_el1", CPENC (3,0,2,2,3), F_ARCHEXT, AARCH64_FEATURE (V8_3A))
+ SYSREG ("apdbkeylo_el1", CPENC (3,0,2,2,2), F_ARCHEXT, AARCH64_FEATURE (V8_3A))
+ SYSREG ("apgakeyhi_el1", CPENC (3,0,2,3,1), F_ARCHEXT, AARCH64_FEATURE (V8_3A))
+ SYSREG ("apgakeylo_el1", CPENC (3,0,2,3,0), F_ARCHEXT, AARCH64_FEATURE (V8_3A))
+ SYSREG ("apiakeyhi_el1", CPENC (3,0,2,1,1), F_ARCHEXT, AARCH64_FEATURE (V8_3A))
+ SYSREG ("apiakeylo_el1", CPENC (3,0,2,1,0), F_ARCHEXT, AARCH64_FEATURE (V8_3A))
+ SYSREG ("apibkeyhi_el1", CPENC (3,0,2,1,3), F_ARCHEXT, AARCH64_FEATURE (V8_3A))
+ SYSREG ("apibkeylo_el1", CPENC (3,0,2,1,2), F_ARCHEXT, AARCH64_FEATURE (V8_3A))
+ SYSREG ("brbcr_el1", CPENC (2,1,9,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("brbcr_el12", CPENC (2,5,9,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("brbcr_el2", CPENC (2,4,9,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("brbfcr_el1", CPENC (2,1,9,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("brbidr0_el1", CPENC (2,1,9,2,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf0_el1", CPENC (2,1,8,0,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf10_el1", CPENC (2,1,8,10,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf11_el1", CPENC (2,1,8,11,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf12_el1", CPENC (2,1,8,12,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf13_el1", CPENC (2,1,8,13,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf14_el1", CPENC (2,1,8,14,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf15_el1", CPENC (2,1,8,15,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf16_el1", CPENC (2,1,8,0,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf17_el1", CPENC (2,1,8,1,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf18_el1", CPENC (2,1,8,2,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf19_el1", CPENC (2,1,8,3,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf1_el1", CPENC (2,1,8,1,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf20_el1", CPENC (2,1,8,4,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf21_el1", CPENC (2,1,8,5,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf22_el1", CPENC (2,1,8,6,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf23_el1", CPENC (2,1,8,7,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf24_el1", CPENC (2,1,8,8,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf25_el1", CPENC (2,1,8,9,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf26_el1", CPENC (2,1,8,10,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf27_el1", CPENC (2,1,8,11,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf28_el1", CPENC (2,1,8,12,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf29_el1", CPENC (2,1,8,13,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf2_el1", CPENC (2,1,8,2,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf30_el1", CPENC (2,1,8,14,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf31_el1", CPENC (2,1,8,15,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf3_el1", CPENC (2,1,8,3,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf4_el1", CPENC (2,1,8,4,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf5_el1", CPENC (2,1,8,5,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf6_el1", CPENC (2,1,8,6,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf7_el1", CPENC (2,1,8,7,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf8_el1", CPENC (2,1,8,8,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinf9_el1", CPENC (2,1,8,9,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbinfinj_el1", CPENC (2,1,9,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc0_el1", CPENC (2,1,8,0,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc10_el1", CPENC (2,1,8,10,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc11_el1", CPENC (2,1,8,11,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc12_el1", CPENC (2,1,8,12,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc13_el1", CPENC (2,1,8,13,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc14_el1", CPENC (2,1,8,14,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc15_el1", CPENC (2,1,8,15,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc16_el1", CPENC (2,1,8,0,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc17_el1", CPENC (2,1,8,1,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc18_el1", CPENC (2,1,8,2,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc19_el1", CPENC (2,1,8,3,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc1_el1", CPENC (2,1,8,1,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc20_el1", CPENC (2,1,8,4,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc21_el1", CPENC (2,1,8,5,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc22_el1", CPENC (2,1,8,6,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc23_el1", CPENC (2,1,8,7,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc24_el1", CPENC (2,1,8,8,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc25_el1", CPENC (2,1,8,9,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc26_el1", CPENC (2,1,8,10,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc27_el1", CPENC (2,1,8,11,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc28_el1", CPENC (2,1,8,12,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc29_el1", CPENC (2,1,8,13,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc2_el1", CPENC (2,1,8,2,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc30_el1", CPENC (2,1,8,14,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc31_el1", CPENC (2,1,8,15,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc3_el1", CPENC (2,1,8,3,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc4_el1", CPENC (2,1,8,4,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc5_el1", CPENC (2,1,8,5,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc6_el1", CPENC (2,1,8,6,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc7_el1", CPENC (2,1,8,7,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc8_el1", CPENC (2,1,8,8,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrc9_el1", CPENC (2,1,8,9,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbsrcinj_el1", CPENC (2,1,9,1,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt0_el1", CPENC (2,1,8,0,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt10_el1", CPENC (2,1,8,10,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt11_el1", CPENC (2,1,8,11,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt12_el1", CPENC (2,1,8,12,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt13_el1", CPENC (2,1,8,13,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt14_el1", CPENC (2,1,8,14,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt15_el1", CPENC (2,1,8,15,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt16_el1", CPENC (2,1,8,0,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt17_el1", CPENC (2,1,8,1,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt18_el1", CPENC (2,1,8,2,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt19_el1", CPENC (2,1,8,3,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt1_el1", CPENC (2,1,8,1,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt20_el1", CPENC (2,1,8,4,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt21_el1", CPENC (2,1,8,5,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt22_el1", CPENC (2,1,8,6,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt23_el1", CPENC (2,1,8,7,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt24_el1", CPENC (2,1,8,8,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt25_el1", CPENC (2,1,8,9,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt26_el1", CPENC (2,1,8,10,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt27_el1", CPENC (2,1,8,11,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt28_el1", CPENC (2,1,8,12,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt29_el1", CPENC (2,1,8,13,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt2_el1", CPENC (2,1,8,2,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt30_el1", CPENC (2,1,8,14,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt31_el1", CPENC (2,1,8,15,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt3_el1", CPENC (2,1,8,3,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt4_el1", CPENC (2,1,8,4,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt5_el1", CPENC (2,1,8,5,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt6_el1", CPENC (2,1,8,6,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt7_el1", CPENC (2,1,8,7,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt8_el1", CPENC (2,1,8,8,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgt9_el1", CPENC (2,1,8,9,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("brbtgtinj_el1", CPENC (2,1,9,1,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("brbts_el1", CPENC (2,1,9,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ccsidr2_el1", CPENC (3,1,0,0,2), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_3A))
+ SYSREG ("ccsidr_el1", CPENC (3,1,0,0,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("clidr_el1", CPENC (3,1,0,0,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("cntfrq_el0", CPENC (3,3,14,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cnthctl_el2", CPENC (3,4,14,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cnthp_ctl_el2", CPENC (3,4,14,2,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cnthp_cval_el2", CPENC (3,4,14,2,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cnthp_tval_el2", CPENC (3,4,14,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cnthps_ctl_el2", CPENC (3,4,14,5,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("cnthps_cval_el2", CPENC (3,4,14,5,2), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("cnthps_tval_el2", CPENC (3,4,14,5,0), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("cnthv_ctl_el2", CPENC (3,4,14,3,1), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cnthv_cval_el2", CPENC (3,4,14,3,2), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cnthv_tval_el2", CPENC (3,4,14,3,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cnthvs_ctl_el2", CPENC (3,4,14,4,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("cnthvs_cval_el2", CPENC (3,4,14,4,2), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("cnthvs_tval_el2", CPENC (3,4,14,4,0), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("cntkctl_el1", CPENC (3,0,14,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cntkctl_el12", CPENC (3,5,14,1,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cntp_ctl_el0", CPENC (3,3,14,2,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cntp_ctl_el02", CPENC (3,5,14,2,1), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cntp_cval_el0", CPENC (3,3,14,2,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cntp_cval_el02", CPENC (3,5,14,2,2), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cntp_tval_el0", CPENC (3,3,14,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cntp_tval_el02", CPENC (3,5,14,2,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cntpct_el0", CPENC (3,3,14,0,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("cntpctss_el0", CPENC (3,3,14,0,5), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("cntpoff_el2", CPENC (3,4,14,0,6), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("cntps_ctl_el1", CPENC (3,7,14,2,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cntps_cval_el1", CPENC (3,7,14,2,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cntps_tval_el1", CPENC (3,7,14,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cntv_ctl_el0", CPENC (3,3,14,3,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cntv_ctl_el02", CPENC (3,5,14,3,1), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cntv_cval_el0", CPENC (3,3,14,3,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cntv_cval_el02", CPENC (3,5,14,3,2), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cntv_tval_el0", CPENC (3,3,14,3,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cntv_tval_el02", CPENC (3,5,14,3,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cntvct_el0", CPENC (3,3,14,0,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("cntvctss_el0", CPENC (3,3,14,0,6), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("cntvoff_el2", CPENC (3,4,14,0,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("contextidr_el1", CPENC (3,0,13,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("contextidr_el12", CPENC (3,5,13,0,1), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("contextidr_el2", CPENC (3,4,13,0,1), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cpacr_el1", CPENC (3,0,1,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cpacr_el12", CPENC (3,5,1,0,2), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("cptr_el2", CPENC (3,4,1,1,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("cptr_el3", CPENC (3,6,1,1,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("csrcr_el0", CPENC (2,3,8,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("csrcr_el1", CPENC (2,0,8,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("csrcr_el12", CPENC (2,5,8,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("csrcr_el2", CPENC (2,4,8,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("csridr_el0", CPENC (2,3,8,0,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("csrptr_el0", CPENC (2,3,8,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("csrptr_el1", CPENC (2,0,8,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("csrptr_el12", CPENC (2,5,8,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("csrptr_el2", CPENC (2,4,8,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("csrptridx_el0", CPENC (2,3,8,0,3), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("csrptridx_el1", CPENC (2,0,8,0,3), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("csrptridx_el2", CPENC (2,4,8,0,3), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("csselr_el1", CPENC (3,2,0,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ctr_el0", CPENC (3,3,0,0,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("currentel", CPENC (3,0,4,2,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("dacr32_el2", CPENC (3,4,3,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("daif", CPENC (3,3,4,2,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgauthstatus_el1", CPENC (2,0,7,14,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr0_el1", CPENC (2,0,0,0,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr10_el1", CPENC (2,0,0,10,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr11_el1", CPENC (2,0,0,11,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr12_el1", CPENC (2,0,0,12,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr13_el1", CPENC (2,0,0,13,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr14_el1", CPENC (2,0,0,14,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr15_el1", CPENC (2,0,0,15,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr1_el1", CPENC (2,0,0,1,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr2_el1", CPENC (2,0,0,2,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr3_el1", CPENC (2,0,0,3,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr4_el1", CPENC (2,0,0,4,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr5_el1", CPENC (2,0,0,5,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr6_el1", CPENC (2,0,0,6,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr7_el1", CPENC (2,0,0,7,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr8_el1", CPENC (2,0,0,8,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbcr9_el1", CPENC (2,0,0,9,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr0_el1", CPENC (2,0,0,0,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr10_el1", CPENC (2,0,0,10,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr11_el1", CPENC (2,0,0,11,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr12_el1", CPENC (2,0,0,12,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr13_el1", CPENC (2,0,0,13,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr14_el1", CPENC (2,0,0,14,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr15_el1", CPENC (2,0,0,15,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr1_el1", CPENC (2,0,0,1,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr2_el1", CPENC (2,0,0,2,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr3_el1", CPENC (2,0,0,3,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr4_el1", CPENC (2,0,0,4,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr5_el1", CPENC (2,0,0,5,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr6_el1", CPENC (2,0,0,6,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr7_el1", CPENC (2,0,0,7,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr8_el1", CPENC (2,0,0,8,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgbvr9_el1", CPENC (2,0,0,9,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgclaimclr_el1", CPENC (2,0,7,9,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgclaimset_el1", CPENC (2,0,7,8,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgdtr_el0", CPENC (2,3,0,4,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgdtrrx_el0", CPENC (2,3,0,5,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("dbgdtrtx_el0", CPENC (2,3,0,5,0), F_REG_WRITE, AARCH64_NO_FEATURES)
+ SYSREG ("dbgprcr_el1", CPENC (2,0,1,4,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgvcr32_el2", CPENC (2,4,0,7,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr0_el1", CPENC (2,0,0,0,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr10_el1", CPENC (2,0,0,10,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr11_el1", CPENC (2,0,0,11,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr12_el1", CPENC (2,0,0,12,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr13_el1", CPENC (2,0,0,13,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr14_el1", CPENC (2,0,0,14,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr15_el1", CPENC (2,0,0,15,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr1_el1", CPENC (2,0,0,1,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr2_el1", CPENC (2,0,0,2,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr3_el1", CPENC (2,0,0,3,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr4_el1", CPENC (2,0,0,4,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr5_el1", CPENC (2,0,0,5,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr6_el1", CPENC (2,0,0,6,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr7_el1", CPENC (2,0,0,7,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr8_el1", CPENC (2,0,0,8,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwcr9_el1", CPENC (2,0,0,9,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr0_el1", CPENC (2,0,0,0,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr10_el1", CPENC (2,0,0,10,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr11_el1", CPENC (2,0,0,11,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr12_el1", CPENC (2,0,0,12,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr13_el1", CPENC (2,0,0,13,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr14_el1", CPENC (2,0,0,14,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr15_el1", CPENC (2,0,0,15,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr1_el1", CPENC (2,0,0,1,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr2_el1", CPENC (2,0,0,2,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr3_el1", CPENC (2,0,0,3,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr4_el1", CPENC (2,0,0,4,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr5_el1", CPENC (2,0,0,5,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr6_el1", CPENC (2,0,0,6,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr7_el1", CPENC (2,0,0,7,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr8_el1", CPENC (2,0,0,8,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dbgwvr9_el1", CPENC (2,0,0,9,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dczid_el0", CPENC (3,3,0,0,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("disr_el1", CPENC (3,0,12,1,1), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("dit", CPENC (3,3,4,2,5), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("dlr_el0", CPENC (3,3,4,5,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("dspsr_el0", CPENC (3,3,4,5,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("elr_el1", CPENC (3,0,4,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("elr_el12", CPENC (3,5,4,0,1), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("elr_el2", CPENC (3,4,4,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("elr_el3", CPENC (3,6,4,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("erridr_el1", CPENC (3,0,5,3,0), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("errselr_el1", CPENC (3,0,5,3,1), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("erxaddr_el1", CPENC (3,0,5,4,3), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("erxctlr_el1", CPENC (3,0,5,4,1), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("erxfr_el1", CPENC (3,0,5,4,0), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("erxmisc0_el1", CPENC (3,0,5,5,0), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("erxmisc1_el1", CPENC (3,0,5,5,1), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("erxmisc2_el1", CPENC (3,0,5,5,2), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("erxmisc3_el1", CPENC (3,0,5,5,3), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("erxpfgcdn_el1", CPENC (3,0,5,4,6), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("erxpfgctl_el1", CPENC (3,0,5,4,5), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("erxpfgf_el1", CPENC (3,0,5,4,4), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("erxstatus_el1", CPENC (3,0,5,4,2), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("esr_el1", CPENC (3,0,5,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("esr_el12", CPENC (3,5,5,2,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("esr_el2", CPENC (3,4,5,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("esr_el3", CPENC (3,6,5,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("far_el1", CPENC (3,0,6,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("far_el12", CPENC (3,5,6,0,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("far_el2", CPENC (3,4,6,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("far_el3", CPENC (3,6,6,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("fpcr", CPENC (3,3,4,4,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("fpexc32_el2", CPENC (3,4,5,3,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("fpsr", CPENC (3,3,4,4,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("gcr_el1", CPENC (3,0,1,0,6), F_ARCHEXT, AARCH64_FEATURE (MEMTAG))
+ SYSREG ("gmid_el1", CPENC (3,1,0,0,4), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (MEMTAG))
+ SYSREG ("gpccr_el3", CPENC (3,6,2,1,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("gptbr_el3", CPENC (3,6,2,1,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("hacr_el2", CPENC (3,4,1,1,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("hafgrtr_el2", CPENC (3,4,3,1,6), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("hcr_el2", CPENC (3,4,1,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("hcrx_el2", CPENC (3,4,1,2,2), F_ARCHEXT, AARCH64_FEATURE (V8_7A))
+ SYSREG ("hdfgrtr_el2", CPENC (3,4,3,1,4), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("hdfgwtr_el2", CPENC (3,4,3,1,5), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("hfgitr_el2", CPENC (3,4,1,1,6), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("hfgrtr_el2", CPENC (3,4,1,1,4), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("hfgwtr_el2", CPENC (3,4,1,1,5), F_ARCHEXT, AARCH64_FEATURE (V8_6A))
+ SYSREG ("hpfar_el2", CPENC (3,4,6,0,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("hstr_el2", CPENC (3,4,1,1,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_ap0r0_el1", CPENC (3,0,12,8,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_ap0r1_el1", CPENC (3,0,12,8,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_ap0r2_el1", CPENC (3,0,12,8,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_ap0r3_el1", CPENC (3,0,12,8,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_ap1r0_el1", CPENC (3,0,12,9,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_ap1r1_el1", CPENC (3,0,12,9,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_ap1r2_el1", CPENC (3,0,12,9,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_ap1r3_el1", CPENC (3,0,12,9,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_asgi1r_el1", CPENC (3,0,12,11,6), F_REG_WRITE, AARCH64_NO_FEATURES)
+ SYSREG ("icc_bpr0_el1", CPENC (3,0,12,8,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_bpr1_el1", CPENC (3,0,12,12,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_ctlr_el1", CPENC (3,0,12,12,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_ctlr_el3", CPENC (3,6,12,12,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_dir_el1", CPENC (3,0,12,11,1), F_REG_WRITE, AARCH64_NO_FEATURES)
+ SYSREG ("icc_eoir0_el1", CPENC (3,0,12,8,1), F_REG_WRITE, AARCH64_NO_FEATURES)
+ SYSREG ("icc_eoir1_el1", CPENC (3,0,12,12,1), F_REG_WRITE, AARCH64_NO_FEATURES)
+ SYSREG ("icc_hppir0_el1", CPENC (3,0,12,8,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("icc_hppir1_el1", CPENC (3,0,12,12,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("icc_iar0_el1", CPENC (3,0,12,8,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("icc_iar1_el1", CPENC (3,0,12,12,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("icc_igrpen0_el1", CPENC (3,0,12,12,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_igrpen1_el1", CPENC (3,0,12,12,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_igrpen1_el3", CPENC (3,6,12,12,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_nmiar1_el1", CPENC (3,0,12,9,5), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_8A))
+ SYSREG ("icc_pmr_el1", CPENC (3,0,4,6,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_rpr_el1", CPENC (3,0,12,11,3), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("icc_sgi0r_el1", CPENC (3,0,12,11,7), F_REG_WRITE, AARCH64_NO_FEATURES)
+ SYSREG ("icc_sgi1r_el1", CPENC (3,0,12,11,5), F_REG_WRITE, AARCH64_NO_FEATURES)
+ SYSREG ("icc_sre_el1", CPENC (3,0,12,12,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_sre_el2", CPENC (3,4,12,9,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("icc_sre_el3", CPENC (3,6,12,12,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_ap0r0_el2", CPENC (3,4,12,8,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_ap0r1_el2", CPENC (3,4,12,8,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_ap0r2_el2", CPENC (3,4,12,8,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_ap0r3_el2", CPENC (3,4,12,8,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_ap1r0_el2", CPENC (3,4,12,9,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_ap1r1_el2", CPENC (3,4,12,9,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_ap1r2_el2", CPENC (3,4,12,9,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_ap1r3_el2", CPENC (3,4,12,9,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_eisr_el2", CPENC (3,4,12,11,3), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("ich_elrsr_el2", CPENC (3,4,12,11,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("ich_hcr_el2", CPENC (3,4,12,11,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr0_el2", CPENC (3,4,12,12,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr10_el2", CPENC (3,4,12,13,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr11_el2", CPENC (3,4,12,13,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr12_el2", CPENC (3,4,12,13,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr13_el2", CPENC (3,4,12,13,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr14_el2", CPENC (3,4,12,13,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr15_el2", CPENC (3,4,12,13,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr1_el2", CPENC (3,4,12,12,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr2_el2", CPENC (3,4,12,12,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr3_el2", CPENC (3,4,12,12,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr4_el2", CPENC (3,4,12,12,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr5_el2", CPENC (3,4,12,12,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr6_el2", CPENC (3,4,12,12,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr7_el2", CPENC (3,4,12,12,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr8_el2", CPENC (3,4,12,13,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_lr9_el2", CPENC (3,4,12,13,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_misr_el2", CPENC (3,4,12,11,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("ich_vmcr_el2", CPENC (3,4,12,11,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ich_vtr_el2", CPENC (3,4,12,11,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64afr0_el1", CPENC (3,0,0,5,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64afr1_el1", CPENC (3,0,0,5,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64dfr0_el1", CPENC (3,0,0,5,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64dfr1_el1", CPENC (3,0,0,5,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64isar0_el1", CPENC (3,0,0,6,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64isar1_el1", CPENC (3,0,0,6,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64isar2_el1", CPENC (3,0,0,6,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64mmfr0_el1", CPENC (3,0,0,7,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64mmfr1_el1", CPENC (3,0,0,7,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64mmfr2_el1", CPENC (3,0,0,7,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64pfr0_el1", CPENC (3,0,0,4,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64pfr1_el1", CPENC (3,0,0,4,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_aa64smfr0_el1", CPENC (3,0,0,4,5), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (SME))
+ SYSREG ("id_aa64zfr0_el1", CPENC (3,0,0,4,4), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (SVE))
+ SYSREG ("id_afr0_el1", CPENC (3,0,0,1,3), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_dfr0_el1", CPENC (3,0,0,1,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_dfr1_el1", CPENC (3,0,0,3,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_isar0_el1", CPENC (3,0,0,2,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_isar1_el1", CPENC (3,0,0,2,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_isar2_el1", CPENC (3,0,0,2,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_isar3_el1", CPENC (3,0,0,2,3), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_isar4_el1", CPENC (3,0,0,2,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_isar5_el1", CPENC (3,0,0,2,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_isar6_el1", CPENC (3,0,0,2,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_mmfr0_el1", CPENC (3,0,0,1,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_mmfr1_el1", CPENC (3,0,0,1,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_mmfr2_el1", CPENC (3,0,0,1,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_mmfr3_el1", CPENC (3,0,0,1,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_mmfr4_el1", CPENC (3,0,0,2,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_mmfr5_el1", CPENC (3,0,0,3,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_pfr0_el1", CPENC (3,0,0,1,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_pfr1_el1", CPENC (3,0,0,1,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("id_pfr2_el1", CPENC (3,0,0,3,4), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (ID_PFR2))
+ SYSREG ("ifsr32_el2", CPENC (3,4,5,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("isr_el1", CPENC (3,0,12,1,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("lorc_el1", CPENC (3,0,10,4,3), F_ARCHEXT, AARCH64_FEATURE (LOR))
+ SYSREG ("lorea_el1", CPENC (3,0,10,4,1), F_ARCHEXT, AARCH64_FEATURE (LOR))
+ SYSREG ("lorid_el1", CPENC (3,0,10,4,7), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (LOR))
+ SYSREG ("lorn_el1", CPENC (3,0,10,4,2), F_ARCHEXT, AARCH64_FEATURE (LOR))
+ SYSREG ("lorsa_el1", CPENC (3,0,10,4,0), F_ARCHEXT, AARCH64_FEATURE (LOR))
+ SYSREG ("mair_el1", CPENC (3,0,10,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mair_el12", CPENC (3,5,10,2,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("mair_el2", CPENC (3,4,10,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mair_el3", CPENC (3,6,10,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mdccint_el1", CPENC (2,0,0,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mdccsr_el0", CPENC (2,3,0,1,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("mdcr_el2", CPENC (3,4,1,1,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mdcr_el3", CPENC (3,6,1,3,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mdrar_el1", CPENC (2,0,1,0,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("mdscr_el1", CPENC (2,0,0,2,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mecid_a0_el2", CPENC (3,4,10,8,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mecid_a1_el2", CPENC (3,4,10,8,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mecid_p0_el2", CPENC (3,4,10,8,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mecid_p1_el2", CPENC (3,4,10,8,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mecid_rl_a_el3", CPENC (3,6,10,10,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mecidr_el2", CPENC (3,4,10,8,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("mfar_el3", CPENC (3,6,6,0,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("midr_el1", CPENC (3,0,0,0,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("mpam0_el1", CPENC (3,0,10,5,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpam1_el1", CPENC (3,0,10,5,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpam1_el12", CPENC (3,5,10,5,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpam2_el2", CPENC (3,4,10,5,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpam3_el3", CPENC (3,6,10,5,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpamhcr_el2", CPENC (3,4,10,4,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpamidr_el1", CPENC (3,0,10,4,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("mpamsm_el1", CPENC (3,0,10,5,3), F_ARCHEXT, AARCH64_FEATURE (SME))
+ SYSREG ("mpamvpm0_el2", CPENC (3,4,10,6,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpamvpm1_el2", CPENC (3,4,10,6,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpamvpm2_el2", CPENC (3,4,10,6,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpamvpm3_el2", CPENC (3,4,10,6,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpamvpm4_el2", CPENC (3,4,10,6,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpamvpm5_el2", CPENC (3,4,10,6,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpamvpm6_el2", CPENC (3,4,10,6,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpamvpm7_el2", CPENC (3,4,10,6,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpamvpmv_el2", CPENC (3,4,10,4,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("mpidr_el1", CPENC (3,0,0,0,5), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("mpuir_el1", CPENC (3,0,0,0,4), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("mpuir_el2", CPENC (3,4,0,0,4), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("mvfr0_el1", CPENC (3,0,0,3,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("mvfr1_el1", CPENC (3,0,0,3,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("mvfr2_el1", CPENC (3,0,0,3,2), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("nzcv", CPENC (3,3,4,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("osdlr_el1", CPENC (2,0,1,3,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("osdtrrx_el1", CPENC (2,0,0,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("osdtrtx_el1", CPENC (2,0,0,3,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("oseccr_el1", CPENC (2,0,0,6,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("oslar_el1", CPENC (2,0,1,0,4), F_REG_WRITE, AARCH64_NO_FEATURES)
+ SYSREG ("oslsr_el1", CPENC (2,0,1,1,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("pan", CPENC (3,0,4,2,3), F_ARCHEXT, AARCH64_FEATURE (PAN))
+ SYSREG ("par_el1", CPENC (3,0,7,4,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmbidr_el1", CPENC (3,0,9,10,7), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmblimitr_el1", CPENC (3,0,9,10,0), F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmbptr_el1", CPENC (3,0,9,10,1), F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmbsr_el1", CPENC (3,0,9,10,3), F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmccfiltr_el0", CPENC (3,3,14,15,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmccntr_el0", CPENC (3,3,9,13,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmceid0_el0", CPENC (3,3,9,12,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("pmceid1_el0", CPENC (3,3,9,12,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("pmcntenclr_el0", CPENC (3,3,9,12,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmcntenset_el0", CPENC (3,3,9,12,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmcr_el0", CPENC (3,3,9,12,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr0_el0", CPENC (3,3,14,8,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr10_el0", CPENC (3,3,14,9,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr11_el0", CPENC (3,3,14,9,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr12_el0", CPENC (3,3,14,9,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr13_el0", CPENC (3,3,14,9,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr14_el0", CPENC (3,3,14,9,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr15_el0", CPENC (3,3,14,9,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr16_el0", CPENC (3,3,14,10,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr17_el0", CPENC (3,3,14,10,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr18_el0", CPENC (3,3,14,10,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr19_el0", CPENC (3,3,14,10,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr1_el0", CPENC (3,3,14,8,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr20_el0", CPENC (3,3,14,10,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr21_el0", CPENC (3,3,14,10,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr22_el0", CPENC (3,3,14,10,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr23_el0", CPENC (3,3,14,10,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr24_el0", CPENC (3,3,14,11,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr25_el0", CPENC (3,3,14,11,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr26_el0", CPENC (3,3,14,11,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr27_el0", CPENC (3,3,14,11,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr28_el0", CPENC (3,3,14,11,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr29_el0", CPENC (3,3,14,11,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr2_el0", CPENC (3,3,14,8,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr30_el0", CPENC (3,3,14,11,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr3_el0", CPENC (3,3,14,8,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr4_el0", CPENC (3,3,14,8,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr5_el0", CPENC (3,3,14,8,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr6_el0", CPENC (3,3,14,8,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr7_el0", CPENC (3,3,14,8,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr8_el0", CPENC (3,3,14,9,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevcntr9_el0", CPENC (3,3,14,9,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper0_el0", CPENC (3,3,14,12,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper10_el0", CPENC (3,3,14,13,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper11_el0", CPENC (3,3,14,13,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper12_el0", CPENC (3,3,14,13,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper13_el0", CPENC (3,3,14,13,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper14_el0", CPENC (3,3,14,13,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper15_el0", CPENC (3,3,14,13,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper16_el0", CPENC (3,3,14,14,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper17_el0", CPENC (3,3,14,14,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper18_el0", CPENC (3,3,14,14,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper19_el0", CPENC (3,3,14,14,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper1_el0", CPENC (3,3,14,12,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper20_el0", CPENC (3,3,14,14,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper21_el0", CPENC (3,3,14,14,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper22_el0", CPENC (3,3,14,14,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper23_el0", CPENC (3,3,14,14,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper24_el0", CPENC (3,3,14,15,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper25_el0", CPENC (3,3,14,15,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper26_el0", CPENC (3,3,14,15,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper27_el0", CPENC (3,3,14,15,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper28_el0", CPENC (3,3,14,15,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper29_el0", CPENC (3,3,14,15,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper2_el0", CPENC (3,3,14,12,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper30_el0", CPENC (3,3,14,15,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper3_el0", CPENC (3,3,14,12,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper4_el0", CPENC (3,3,14,12,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper5_el0", CPENC (3,3,14,12,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper6_el0", CPENC (3,3,14,12,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper7_el0", CPENC (3,3,14,12,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper8_el0", CPENC (3,3,14,13,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmevtyper9_el0", CPENC (3,3,14,13,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmintenclr_el1", CPENC (3,0,9,14,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmintenset_el1", CPENC (3,0,9,14,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmmir_el1", CPENC (3,0,9,14,6), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("pmovsclr_el0", CPENC (3,3,9,12,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmovsset_el0", CPENC (3,3,9,14,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmscr_el1", CPENC (3,0,9,9,0), F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmscr_el12", CPENC (3,5,9,9,0), F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmscr_el2", CPENC (3,4,9,9,0), F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmselr_el0", CPENC (3,3,9,12,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmsevfr_el1", CPENC (3,0,9,9,5), F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmsfcr_el1", CPENC (3,0,9,9,4), F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmsicr_el1", CPENC (3,0,9,9,2), F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmsidr_el1", CPENC (3,0,9,9,7), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmsirr_el1", CPENC (3,0,9,9,3), F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmslatfr_el1", CPENC (3,0,9,9,6), F_ARCHEXT, AARCH64_FEATURE (PROFILE))
+ SYSREG ("pmsnevfr_el1", CPENC (3,0,9,9,1), F_ARCHEXT, AARCH64_FEATURE (V8_7A))
+ SYSREG ("pmswinc_el0", CPENC (3,3,9,12,4), F_REG_WRITE, AARCH64_NO_FEATURES)
+ SYSREG ("pmuserenr_el0", CPENC (3,3,9,14,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmxevcntr_el0", CPENC (3,3,9,13,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("pmxevtyper_el0", CPENC (3,3,9,13,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("prbar10_el1", CPENC (3,0,6,13,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar10_el2", CPENC (3,4,6,13,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar11_el1", CPENC (3,0,6,13,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar11_el2", CPENC (3,4,6,13,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar12_el1", CPENC (3,0,6,14,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar12_el2", CPENC (3,4,6,14,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar13_el1", CPENC (3,0,6,14,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar13_el2", CPENC (3,4,6,14,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar14_el1", CPENC (3,0,6,15,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar14_el2", CPENC (3,4,6,15,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar15_el1", CPENC (3,0,6,15,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar15_el2", CPENC (3,4,6,15,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar1_el1", CPENC (3,0,6,8,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar1_el2", CPENC (3,4,6,8,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar2_el1", CPENC (3,0,6,9,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar2_el2", CPENC (3,4,6,9,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar3_el1", CPENC (3,0,6,9,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar3_el2", CPENC (3,4,6,9,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar4_el1", CPENC (3,0,6,10,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar4_el2", CPENC (3,4,6,10,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar5_el1", CPENC (3,0,6,10,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar5_el2", CPENC (3,4,6,10,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar6_el1", CPENC (3,0,6,11,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar6_el2", CPENC (3,4,6,11,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar7_el1", CPENC (3,0,6,11,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar7_el2", CPENC (3,4,6,11,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar8_el1", CPENC (3,0,6,12,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar8_el2", CPENC (3,4,6,12,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar9_el1", CPENC (3,0,6,12,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar9_el2", CPENC (3,4,6,12,4), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar_el1", CPENC (3,0,6,8,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prbar_el2", CPENC (3,4,6,8,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prenr_el1", CPENC (3,0,6,1,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prenr_el2", CPENC (3,4,6,1,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar10_el1", CPENC (3,0,6,13,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar10_el2", CPENC (3,4,6,13,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar11_el1", CPENC (3,0,6,13,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar11_el2", CPENC (3,4,6,13,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar12_el1", CPENC (3,0,6,14,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar12_el2", CPENC (3,4,6,14,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar13_el1", CPENC (3,0,6,14,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar13_el2", CPENC (3,4,6,14,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar14_el1", CPENC (3,0,6,15,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar14_el2", CPENC (3,4,6,15,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar15_el1", CPENC (3,0,6,15,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar15_el2", CPENC (3,4,6,15,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar1_el1", CPENC (3,0,6,8,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar1_el2", CPENC (3,4,6,8,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar2_el1", CPENC (3,0,6,9,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar2_el2", CPENC (3,4,6,9,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar3_el1", CPENC (3,0,6,9,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar3_el2", CPENC (3,4,6,9,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar4_el1", CPENC (3,0,6,10,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar4_el2", CPENC (3,4,6,10,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar5_el1", CPENC (3,0,6,10,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar5_el2", CPENC (3,4,6,10,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar6_el1", CPENC (3,0,6,11,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar6_el2", CPENC (3,4,6,11,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar7_el1", CPENC (3,0,6,11,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar7_el2", CPENC (3,4,6,11,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar8_el1", CPENC (3,0,6,12,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar8_el2", CPENC (3,4,6,12,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar9_el1", CPENC (3,0,6,12,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar9_el2", CPENC (3,4,6,12,5), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar_el1", CPENC (3,0,6,8,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prlar_el2", CPENC (3,4,6,8,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prselr_el1", CPENC (3,0,6,2,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("prselr_el2", CPENC (3,4,6,2,1), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("revidr_el1", CPENC (3,0,0,0,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("rgsr_el1", CPENC (3,0,1,0,5), F_ARCHEXT, AARCH64_FEATURE (MEMTAG))
+ SYSREG ("rmr_el1", CPENC (3,0,12,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("rmr_el2", CPENC (3,4,12,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("rmr_el3", CPENC (3,6,12,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("rndr", CPENC (3,3,2,4,0), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (RNG))
+ SYSREG ("rndrrs", CPENC (3,3,2,4,1), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (RNG))
+ SYSREG ("rvbar_el1", CPENC (3,0,12,0,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("rvbar_el2", CPENC (3,4,12,0,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("rvbar_el3", CPENC (3,6,12,0,1), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("scr_el3", CPENC (3,6,1,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("sctlr_el1", CPENC (3,0,1,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("sctlr_el12", CPENC (3,5,1,0,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("sctlr_el2", CPENC (3,4,1,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("sctlr_el3", CPENC (3,6,1,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("scxtnum_el0", CPENC (3,3,13,0,7), F_ARCHEXT, AARCH64_FEATURE (SCXTNUM))
+ SYSREG ("scxtnum_el1", CPENC (3,0,13,0,7), F_ARCHEXT, AARCH64_FEATURE (SCXTNUM))
+ SYSREG ("scxtnum_el12", CPENC (3,5,13,0,7), F_ARCHEXT, AARCH64_FEATURE (SCXTNUM))
+ SYSREG ("scxtnum_el2", CPENC (3,4,13,0,7), F_ARCHEXT, AARCH64_FEATURE (SCXTNUM))
+ SYSREG ("scxtnum_el3", CPENC (3,6,13,0,7), F_ARCHEXT, AARCH64_FEATURE (SCXTNUM))
+ SYSREG ("sder32_el2", CPENC (3,4,1,3,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("sder32_el3", CPENC (3,6,1,1,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("smcr_el1", CPENC (3,0,1,2,6), F_ARCHEXT, AARCH64_FEATURE (SME))
+ SYSREG ("smcr_el12", CPENC (3,5,1,2,6), F_ARCHEXT, AARCH64_FEATURE (SME))
+ SYSREG ("smcr_el2", CPENC (3,4,1,2,6), F_ARCHEXT, AARCH64_FEATURE (SME))
+ SYSREG ("smcr_el3", CPENC (3,6,1,2,6), F_ARCHEXT, AARCH64_FEATURE (SME))
+ SYSREG ("smidr_el1", CPENC (3,1,0,0,6), F_REG_READ|F_ARCHEXT, AARCH64_FEATURE (SME))
+ SYSREG ("smpri_el1", CPENC (3,0,1,2,4), F_ARCHEXT, AARCH64_FEATURE (SME))
+ SYSREG ("smprimap_el2", CPENC (3,4,1,2,5), F_ARCHEXT, AARCH64_FEATURE (SME))
+ SYSREG ("sp_el0", CPENC (3,0,4,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("sp_el1", CPENC (3,4,4,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("sp_el2", CPENC (3,6,4,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("spsel", CPENC (3,0,4,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("spsr_abt", CPENC (3,4,4,3,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("spsr_el1", CPENC (3,0,4,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("spsr_el12", CPENC (3,5,4,0,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("spsr_el2", CPENC (3,4,4,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("spsr_el3", CPENC (3,6,4,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("spsr_fiq", CPENC (3,4,4,3,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("spsr_hyp", CPENC (3,4,4,0,0), F_DEPRECATED, AARCH64_NO_FEATURES)
+ SYSREG ("spsr_irq", CPENC (3,4,4,3,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("spsr_svc", CPENC (3,0,4,0,0), F_DEPRECATED, AARCH64_NO_FEATURES)
+ SYSREG ("spsr_und", CPENC (3,4,4,3,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ssbs", CPENC (3,3,4,2,6), F_ARCHEXT, AARCH64_FEATURE (SSBS))
+ SYSREG ("svcr", CPENC (3,3,4,2,2), F_ARCHEXT, AARCH64_FEATURE (SME))
+ SYSREG ("tco", CPENC (3,3,4,2,7), F_ARCHEXT, AARCH64_FEATURE (MEMTAG))
+ SYSREG ("tcr_el1", CPENC (3,0,2,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("tcr_el12", CPENC (3,5,2,0,2), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("tcr_el2", CPENC (3,4,2,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("tcr_el3", CPENC (3,6,2,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("teecr32_el1", CPENC (2,2,0,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("teehbr32_el1", CPENC (2,2,1,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("tfsr_el1", CPENC (3,0,5,6,0), F_ARCHEXT, AARCH64_FEATURE (MEMTAG))
+ SYSREG ("tfsr_el12", CPENC (3,5,5,6,0), F_ARCHEXT, AARCH64_FEATURE (MEMTAG))
+ SYSREG ("tfsr_el2", CPENC (3,4,5,6,0), F_ARCHEXT, AARCH64_FEATURE (MEMTAG))
+ SYSREG ("tfsr_el3", CPENC (3,6,5,6,0), F_ARCHEXT, AARCH64_FEATURE (MEMTAG))
+ SYSREG ("tfsre0_el1", CPENC (3,0,5,6,1), F_ARCHEXT, AARCH64_FEATURE (MEMTAG))
+ SYSREG ("tpidr2_el0", CPENC (3,3,13,0,5), F_ARCHEXT, AARCH64_FEATURE (SME))
+ SYSREG ("tpidr_el0", CPENC (3,3,13,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("tpidr_el1", CPENC (3,0,13,0,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("tpidr_el2", CPENC (3,4,13,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("tpidr_el3", CPENC (3,6,13,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("tpidrro_el0", CPENC (3,3,13,0,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trbbaser_el1", CPENC (3,0,9,11,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trbidr_el1", CPENC (3,0,9,11,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trblimitr_el1", CPENC (3,0,9,11,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trbmar_el1", CPENC (3,0,9,11,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trbptr_el1", CPENC (3,0,9,11,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trbsr_el1", CPENC (3,0,9,11,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trbtrg_el1", CPENC (3,0,9,11,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr0", CPENC (2,1,2,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr1", CPENC (2,1,2,2,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr10", CPENC (2,1,2,4,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr11", CPENC (2,1,2,6,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr12", CPENC (2,1,2,8,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr13", CPENC (2,1,2,10,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr14", CPENC (2,1,2,12,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr15", CPENC (2,1,2,14,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr2", CPENC (2,1,2,4,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr3", CPENC (2,1,2,6,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr4", CPENC (2,1,2,8,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr5", CPENC (2,1,2,10,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr6", CPENC (2,1,2,12,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr7", CPENC (2,1,2,14,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr8", CPENC (2,1,2,0,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacatr9", CPENC (2,1,2,2,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr0", CPENC (2,1,2,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr1", CPENC (2,1,2,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr10", CPENC (2,1,2,4,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr11", CPENC (2,1,2,6,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr12", CPENC (2,1,2,8,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr13", CPENC (2,1,2,10,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr14", CPENC (2,1,2,12,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr15", CPENC (2,1,2,14,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr2", CPENC (2,1,2,4,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr3", CPENC (2,1,2,6,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr4", CPENC (2,1,2,8,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr5", CPENC (2,1,2,10,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr6", CPENC (2,1,2,12,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr7", CPENC (2,1,2,14,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr8", CPENC (2,1,2,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcacvr9", CPENC (2,1,2,2,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcauthstatus", CPENC (2,1,7,14,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcauxctlr", CPENC (2,1,0,6,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcbbctlr", CPENC (2,1,0,15,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcccctlr", CPENC (2,1,0,14,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccidcctlr0", CPENC (2,1,3,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccidcctlr1", CPENC (2,1,3,1,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccidcvr0", CPENC (2,1,3,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccidcvr1", CPENC (2,1,3,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccidcvr2", CPENC (2,1,3,4,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccidcvr3", CPENC (2,1,3,6,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccidcvr4", CPENC (2,1,3,8,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccidcvr5", CPENC (2,1,3,10,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccidcvr6", CPENC (2,1,3,12,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccidcvr7", CPENC (2,1,3,14,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccidr0", CPENC (2,1,7,12,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trccidr1", CPENC (2,1,7,13,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trccidr2", CPENC (2,1,7,14,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trccidr3", CPENC (2,1,7,15,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcclaimclr", CPENC (2,1,7,9,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcclaimset", CPENC (2,1,7,8,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntctlr0", CPENC (2,1,0,4,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntctlr1", CPENC (2,1,0,5,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntctlr2", CPENC (2,1,0,6,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntctlr3", CPENC (2,1,0,7,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntrldvr0", CPENC (2,1,0,0,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntrldvr1", CPENC (2,1,0,1,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntrldvr2", CPENC (2,1,0,2,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntrldvr3", CPENC (2,1,0,3,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntvr0", CPENC (2,1,0,8,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntvr1", CPENC (2,1,0,9,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntvr2", CPENC (2,1,0,10,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trccntvr3", CPENC (2,1,0,11,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcconfigr", CPENC (2,1,0,4,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdevaff0", CPENC (2,1,7,10,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcdevaff1", CPENC (2,1,7,11,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcdevarch", CPENC (2,1,7,15,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcdevid", CPENC (2,1,7,2,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcdevtype", CPENC (2,1,7,3,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcmr0", CPENC (2,1,2,0,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcmr1", CPENC (2,1,2,4,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcmr2", CPENC (2,1,2,8,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcmr3", CPENC (2,1,2,12,6), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcmr4", CPENC (2,1,2,0,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcmr5", CPENC (2,1,2,4,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcmr6", CPENC (2,1,2,8,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcmr7", CPENC (2,1,2,12,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcvr0", CPENC (2,1,2,0,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcvr1", CPENC (2,1,2,4,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcvr2", CPENC (2,1,2,8,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcvr3", CPENC (2,1,2,12,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcvr4", CPENC (2,1,2,0,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcvr5", CPENC (2,1,2,4,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcvr6", CPENC (2,1,2,8,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcdvcvr7", CPENC (2,1,2,12,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trceventctl0r", CPENC (2,1,0,8,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trceventctl1r", CPENC (2,1,0,9,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcextinselr", CPENC (2,1,0,8,4), F_REG_ALIAS, AARCH64_NO_FEATURES)
+ SYSREG ("trcextinselr0", CPENC (2,1,0,8,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcextinselr1", CPENC (2,1,0,9,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcextinselr2", CPENC (2,1,0,10,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcextinselr3", CPENC (2,1,0,11,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr0", CPENC (2,1,0,8,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr1", CPENC (2,1,0,9,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr10", CPENC (2,1,0,2,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr11", CPENC (2,1,0,3,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr12", CPENC (2,1,0,4,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr13", CPENC (2,1,0,5,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr2", CPENC (2,1,0,10,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr3", CPENC (2,1,0,11,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr4", CPENC (2,1,0,12,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr5", CPENC (2,1,0,13,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr6", CPENC (2,1,0,14,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr7", CPENC (2,1,0,15,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr8", CPENC (2,1,0,0,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcidr9", CPENC (2,1,0,1,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcimspec0", CPENC (2,1,0,0,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcimspec1", CPENC (2,1,0,1,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcimspec2", CPENC (2,1,0,2,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcimspec3", CPENC (2,1,0,3,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcimspec4", CPENC (2,1,0,4,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcimspec5", CPENC (2,1,0,5,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcimspec6", CPENC (2,1,0,6,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcimspec7", CPENC (2,1,0,7,7), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcitctrl", CPENC (2,1,7,0,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trclar", CPENC (2,1,7,12,6), F_REG_WRITE, AARCH64_NO_FEATURES)
+ SYSREG ("trclsr", CPENC (2,1,7,13,6), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcoslar", CPENC (2,1,1,0,4), F_REG_WRITE, AARCH64_NO_FEATURES)
+ SYSREG ("trcoslsr", CPENC (2,1,1,1,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcpdcr", CPENC (2,1,1,4,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcpdsr", CPENC (2,1,1,5,4), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcpidr0", CPENC (2,1,7,8,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcpidr1", CPENC (2,1,7,9,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcpidr2", CPENC (2,1,7,10,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcpidr3", CPENC (2,1,7,11,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcpidr4", CPENC (2,1,7,4,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcpidr5", CPENC (2,1,7,5,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcpidr6", CPENC (2,1,7,6,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcpidr7", CPENC (2,1,7,7,7), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcprgctlr", CPENC (2,1,0,1,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcprocselr", CPENC (2,1,0,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcqctlr", CPENC (2,1,0,1,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr10", CPENC (2,1,1,10,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr11", CPENC (2,1,1,11,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr12", CPENC (2,1,1,12,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr13", CPENC (2,1,1,13,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr14", CPENC (2,1,1,14,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr15", CPENC (2,1,1,15,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr16", CPENC (2,1,1,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr17", CPENC (2,1,1,1,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr18", CPENC (2,1,1,2,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr19", CPENC (2,1,1,3,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr2", CPENC (2,1,1,2,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr20", CPENC (2,1,1,4,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr21", CPENC (2,1,1,5,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr22", CPENC (2,1,1,6,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr23", CPENC (2,1,1,7,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr24", CPENC (2,1,1,8,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr25", CPENC (2,1,1,9,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr26", CPENC (2,1,1,10,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr27", CPENC (2,1,1,11,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr28", CPENC (2,1,1,12,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr29", CPENC (2,1,1,13,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr3", CPENC (2,1,1,3,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr30", CPENC (2,1,1,14,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr31", CPENC (2,1,1,15,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr4", CPENC (2,1,1,4,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr5", CPENC (2,1,1,5,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr6", CPENC (2,1,1,6,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr7", CPENC (2,1,1,7,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr8", CPENC (2,1,1,8,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsctlr9", CPENC (2,1,1,9,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcrsr", CPENC (2,1,0,10,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcseqevr0", CPENC (2,1,0,0,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcseqevr1", CPENC (2,1,0,1,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcseqevr2", CPENC (2,1,0,2,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcseqrstevr", CPENC (2,1,0,6,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcseqstr", CPENC (2,1,0,7,4), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcssccr0", CPENC (2,1,1,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcssccr1", CPENC (2,1,1,1,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcssccr2", CPENC (2,1,1,2,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcssccr3", CPENC (2,1,1,3,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcssccr4", CPENC (2,1,1,4,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcssccr5", CPENC (2,1,1,5,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcssccr6", CPENC (2,1,1,6,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcssccr7", CPENC (2,1,1,7,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsscsr0", CPENC (2,1,1,8,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsscsr1", CPENC (2,1,1,9,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsscsr2", CPENC (2,1,1,10,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsscsr3", CPENC (2,1,1,11,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsscsr4", CPENC (2,1,1,12,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsscsr5", CPENC (2,1,1,13,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsscsr6", CPENC (2,1,1,14,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsscsr7", CPENC (2,1,1,15,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsspcicr0", CPENC (2,1,1,0,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsspcicr1", CPENC (2,1,1,1,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsspcicr2", CPENC (2,1,1,2,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsspcicr3", CPENC (2,1,1,3,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsspcicr4", CPENC (2,1,1,4,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsspcicr5", CPENC (2,1,1,5,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsspcicr6", CPENC (2,1,1,6,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcsspcicr7", CPENC (2,1,1,7,3), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcstallctlr", CPENC (2,1,0,11,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcstatr", CPENC (2,1,0,3,0), F_REG_READ, AARCH64_NO_FEATURES)
+ SYSREG ("trcsyncpr", CPENC (2,1,0,13,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trctraceidr", CPENC (2,1,0,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trctsctlr", CPENC (2,1,0,12,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvdarcctlr", CPENC (2,1,0,10,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvdctlr", CPENC (2,1,0,8,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvdsacctlr", CPENC (2,1,0,9,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvictlr", CPENC (2,1,0,0,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcviiectlr", CPENC (2,1,0,1,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvipcssctlr", CPENC (2,1,0,3,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvissctlr", CPENC (2,1,0,2,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvmidcctlr0", CPENC (2,1,3,2,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvmidcctlr1", CPENC (2,1,3,3,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvmidcvr0", CPENC (2,1,3,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvmidcvr1", CPENC (2,1,3,2,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvmidcvr2", CPENC (2,1,3,4,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvmidcvr3", CPENC (2,1,3,6,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvmidcvr4", CPENC (2,1,3,8,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvmidcvr5", CPENC (2,1,3,10,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvmidcvr6", CPENC (2,1,3,12,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trcvmidcvr7", CPENC (2,1,3,14,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("trfcr_el1", CPENC (3,0,1,2,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("trfcr_el12", CPENC (3,5,1,2,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("trfcr_el2", CPENC (3,4,1,2,1), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("ttbr0_el1", CPENC (3,0,2,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ttbr0_el12", CPENC (3,5,2,0,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("ttbr0_el2", CPENC (3,4,2,0,0), F_ARCHEXT, AARCH64_FEATURE (V8A))
+ SYSREG ("ttbr0_el3", CPENC (3,6,2,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ttbr1_el1", CPENC (3,0,2,0,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("ttbr1_el12", CPENC (3,5,2,0,1), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("ttbr1_el2", CPENC (3,4,2,0,1), F_ARCHEXT, AARCH64_FEATURES (2, V8A, V8_1A))
+ SYSREG ("uao", CPENC (3,0,4,2,4), F_ARCHEXT, AARCH64_FEATURE (V8_2A))
+ SYSREG ("vbar_el1", CPENC (3,0,12,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("vbar_el12", CPENC (3,5,12,0,0), F_ARCHEXT, AARCH64_FEATURE (V8_1A))
+ SYSREG ("vbar_el2", CPENC (3,4,12,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("vbar_el3", CPENC (3,6,12,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("vdisr_el2", CPENC (3,4,12,1,1), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("vmecid_a_el2", CPENC (3,4,10,9,1), 0, AARCH64_NO_FEATURES)
+ SYSREG ("vmecid_p_el2", CPENC (3,4,10,9,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("vmpidr_el2", CPENC (3,4,0,0,5), 0, AARCH64_NO_FEATURES)
+ SYSREG ("vncr_el2", CPENC (3,4,2,2,0), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("vpidr_el2", CPENC (3,4,0,0,0), 0, AARCH64_NO_FEATURES)
+ SYSREG ("vsctlr_el2", CPENC (3,4,2,0,0), F_ARCHEXT, AARCH64_FEATURE (V8R))
+ SYSREG ("vsesr_el2", CPENC (3,4,5,2,3), F_ARCHEXT, AARCH64_FEATURE (RAS))
+ SYSREG ("vstcr_el2", CPENC (3,4,2,6,2), F_ARCHEXT, AARCH64_FEATURE (V8_4A))
+ SYSREG ("vsttbr_el2", CPENC (3,4,2,6,0), F_ARCHEXT, AARCH64_FEATURES (2, V8A, V8_4A))
+ SYSREG ("vtcr_el2", CPENC (3,4,2,1,2), 0, AARCH64_NO_FEATURES)
+ SYSREG ("vttbr_el2", CPENC (3,4,2,1,0), F_ARCHEXT, AARCH64_FEATURE (V8A))
+ SYSREG ("zcr_el1", CPENC (3,0,1,2,0), F_ARCHEXT, AARCH64_FEATURE (SVE))
+ SYSREG ("zcr_el12", CPENC (3,5,1,2,0), F_ARCHEXT, AARCH64_FEATURE (SVE))
+ SYSREG ("zcr_el2", CPENC (3,4,1,2,0), F_ARCHEXT, AARCH64_FEATURE (SVE))
+ SYSREG ("zcr_el3", CPENC (3,6,1,2,0), F_ARCHEXT, AARCH64_FEATURE (SVE)) \ No newline at end of file
diff --git a/gcc/config/aarch64/aarch64-tune.md b/gcc/config/aarch64/aarch64-tune.md
index 61bb852..98e6882 100644
--- a/gcc/config/aarch64/aarch64-tune.md
+++ b/gcc/config/aarch64/aarch64-tune.md
@@ -1,5 +1,5 @@
;; -*- buffer-read-only: t -*-
;; Generated automatically by gentune.sh from aarch64-cores.def
(define_attr "tune"
- "cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,cortexx1c,neoversen1,ares,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,thunderx3t110,neoversev1,zeus,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa520,cortexa710,cortexa715,cortexa720,cortexx2,cortexx3,cortexx4,neoversen2,neoversev2,demeter,generic,generic_armv8_a,generic_armv9_a"
+ "cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,ampere1b,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,cortexx1c,neoversen1,ares,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,thunderx3t110,neoversev1,zeus,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa520,cortexa710,cortexa715,cortexa720,cortexx2,cortexx3,cortexx4,neoversen2,neoversev2,demeter,generic,generic_armv8_a,generic_armv9_a"
(const (symbol_ref "((enum attr_tune) aarch64_tune)")))
diff --git a/gcc/config/aarch64/aarch64-tuning-flags.def b/gcc/config/aarch64/aarch64-tuning-flags.def
index 774568e..f28a738 100644
--- a/gcc/config/aarch64/aarch64-tuning-flags.def
+++ b/gcc/config/aarch64/aarch64-tuning-flags.def
@@ -47,4 +47,6 @@ AARCH64_EXTRA_TUNING_OPTION ("use_new_vector_costs", USE_NEW_VECTOR_COSTS)
AARCH64_EXTRA_TUNING_OPTION ("matched_vector_throughput", MATCHED_VECTOR_THROUGHPUT)
+AARCH64_EXTRA_TUNING_OPTION ("avoid_cross_loop_fma", AVOID_CROSS_LOOP_FMA)
+
#undef AARCH64_EXTRA_TUNING_OPTION
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index f6f6f94..fd44e59 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -22,6 +22,7 @@
#define INCLUDE_STRING
#define INCLUDE_ALGORITHM
+#define INCLUDE_VECTOR
#include "config.h"
#include "system.h"
#include "coretypes.h"
@@ -85,6 +86,13 @@
#include "config/arm/aarch-common.h"
#include "config/arm/aarch-common-protos.h"
#include "ssa.h"
+#include "except.h"
+#include "tree-pass.h"
+#include "cfgbuild.h"
+#include "symbol-summary.h"
+#include "ipa-prop.h"
+#include "ipa-fnsummary.h"
+#include "hash-map.h"
/* This file should be included last. */
#include "target-def.h"
@@ -92,6 +100,26 @@
/* Defined for convenience. */
#define POINTER_BYTES (POINTER_SIZE / BITS_PER_UNIT)
+/* Flags that describe how a function shares certain architectural state
+ with its callers.
+
+ - AARCH64_STATE_SHARED indicates that the function does share the state
+ with callers.
+
+ - AARCH64_STATE_IN indicates that the function reads (or might read) the
+ incoming state. The converse is that the function ignores the incoming
+ state.
+
+ - AARCH64_STATE_OUT indicates that the function returns new state.
+ The converse is that the state on return is the same as it was on entry.
+
+ A function that partially modifies the state treats it as both IN
+ and OUT (because the value on return depends to some extent on the
+ value on input). */
+constexpr auto AARCH64_STATE_SHARED = 1U << 0;
+constexpr auto AARCH64_STATE_IN = 1U << 1;
+constexpr auto AARCH64_STATE_OUT = 1U << 2;
+
/* Information about a legitimate vector immediate operand. */
struct simd_immediate_info
{
@@ -375,6 +403,7 @@ static const struct aarch64_flag_desc aarch64_tuning_flags[] =
#include "tuning_models/neoversen1.h"
#include "tuning_models/ampere1.h"
#include "tuning_models/ampere1a.h"
+#include "tuning_models/ampere1b.h"
#include "tuning_models/neoversev1.h"
#include "tuning_models/neoverse512tvb.h"
#include "tuning_models/neoversen2.h"
@@ -431,10 +460,253 @@ static const struct processor all_cores[] =
#include "aarch64-cores.def"
{NULL, aarch64_none, aarch64_none, aarch64_no_arch, 0, NULL}
};
+/* Internal representation of system registers. */
+typedef struct {
+ const char *name;
+ /* Stringified sysreg encoding values, represented as
+ s<sn>_<op1>_c<cn>_c<cm>_<op2>. */
+ const char *encoding;
+ /* Flags affecting sysreg usage, such as read/write-only. */
+ unsigned properties;
+ /* Architectural features implied by sysreg. */
+ aarch64_feature_flags arch_reqs;
+} sysreg_t;
+
+/* An aarch64_feature_set initializer for a single feature,
+ AARCH64_FEATURE_<FEAT>. */
+#define AARCH64_FEATURE(FEAT) AARCH64_FL_##FEAT
+
+/* Used by AARCH64_FEATURES. */
+#define AARCH64_OR_FEATURES_1(X, F1) \
+ AARCH64_FEATURE (F1)
+#define AARCH64_OR_FEATURES_2(X, F1, F2) \
+ (AARCH64_FEATURE (F1) | AARCH64_OR_FEATURES_1 (X, F2))
+#define AARCH64_OR_FEATURES_3(X, F1, ...) \
+ (AARCH64_FEATURE (F1) | AARCH64_OR_FEATURES_2 (X, __VA_ARGS__))
+
+/* An aarch64_feature_set initializer for the N features listed in "...". */
+#define AARCH64_FEATURES(N, ...) \
+ AARCH64_OR_FEATURES_##N (0, __VA_ARGS__)
+
+#define AARCH64_NO_FEATURES 0
+
+/* Flags associated with the properties of system registers. It mainly serves
+ to mark particular registers as read or write only. */
+#define F_DEPRECATED (1 << 1)
+#define F_REG_READ (1 << 2)
+#define F_REG_WRITE (1 << 3)
+#define F_ARCHEXT (1 << 4)
+/* Flag indicating register name is alias for another system register. */
+#define F_REG_ALIAS (1 << 5)
+
+/* Database of system registers, their encodings and architectural
+ requirements. */
+const sysreg_t aarch64_sysregs[] =
+{
+#define CPENC(SN, OP1, CN, CM, OP2) "s"#SN"_"#OP1"_c"#CN"_c"#CM"_"#OP2
+#define SYSREG(NAME, ENC, FLAGS, ARCH) \
+ { NAME, ENC, FLAGS, ARCH },
+#include "aarch64-sys-regs.def"
+#undef CPENC
+};
+
+#undef AARCH64_NO_FEATURES
+
+using sysreg_map_t = hash_map<nofree_string_hash, const sysreg_t *>;
+static sysreg_map_t *sysreg_map = nullptr;
+
+/* Map system register names to their hardware metadata: encoding,
+ feature flags and architectural feature requirements, all of which
+ are encoded in a sysreg_t struct. */
+void
+aarch64_register_sysreg (const char *name, const sysreg_t *metadata)
+{
+ bool dup = sysreg_map->put (name, metadata);
+ gcc_checking_assert (!dup);
+}
+
+/* Lazily initialize hash table for system register validation,
+ checking the validity of supplied register name and returning
+ register's associated metadata. */
+static void
+aarch64_init_sysregs (void)
+{
+ gcc_assert (!sysreg_map);
+ sysreg_map = new sysreg_map_t;
+
+
+ for (unsigned i = 0; i < ARRAY_SIZE (aarch64_sysregs); i++)
+ {
+ const sysreg_t *reg = aarch64_sysregs + i;
+ aarch64_register_sysreg (reg->name, reg);
+ }
+}
+
+/* No direct access to the sysreg hash-map should be made. Doing so
+ risks trying to acess an unitialized hash-map and dereferencing the
+ returned double pointer without due care risks dereferencing a
+ null-pointer. */
+const sysreg_t *
+aarch64_lookup_sysreg_map (const char *regname)
+{
+ if (!sysreg_map)
+ aarch64_init_sysregs ();
+
+ const sysreg_t **sysreg_entry = sysreg_map->get (regname);
+ if (sysreg_entry != NULL)
+ return *sysreg_entry;
+ return NULL;
+}
/* The current tuning set. */
struct tune_params aarch64_tune_params = generic_tunings;
+/* If NAME is the name of an arm:: attribute that describes shared state,
+ return its associated AARCH64_STATE_* flags, otherwise return 0. */
+static unsigned int
+aarch64_attribute_shared_state_flags (const char *name)
+{
+ if (strcmp (name, "in") == 0)
+ return AARCH64_STATE_SHARED | AARCH64_STATE_IN;
+ if (strcmp (name, "inout") == 0)
+ return AARCH64_STATE_SHARED | AARCH64_STATE_IN | AARCH64_STATE_OUT;
+ if (strcmp (name, "out") == 0)
+ return AARCH64_STATE_SHARED | AARCH64_STATE_OUT;
+ if (strcmp (name, "preserves") == 0)
+ return AARCH64_STATE_SHARED;
+ return 0;
+}
+
+/* See whether attribute list ATTRS has any sharing information
+ for state STATE_NAME. Return the associated state flags if so,
+ otherwise return 0. */
+static unsigned int
+aarch64_lookup_shared_state_flags (tree attrs, const char *state_name)
+{
+ for (tree attr = attrs; attr; attr = TREE_CHAIN (attr))
+ {
+ if (!cxx11_attribute_p (attr))
+ continue;
+
+ auto ns = IDENTIFIER_POINTER (TREE_PURPOSE (TREE_PURPOSE (attr)));
+ if (strcmp (ns, "arm") != 0)
+ continue;
+
+ auto attr_name = IDENTIFIER_POINTER (TREE_VALUE (TREE_PURPOSE (attr)));
+ auto flags = aarch64_attribute_shared_state_flags (attr_name);
+ if (!flags)
+ continue;
+
+ for (tree arg = TREE_VALUE (attr); arg; arg = TREE_CHAIN (arg))
+ {
+ tree value = TREE_VALUE (arg);
+ if (TREE_CODE (value) == STRING_CST
+ && strcmp (TREE_STRING_POINTER (value), state_name) == 0)
+ return flags;
+ }
+ }
+ return 0;
+}
+
+/* Return true if DECL creates a new scope for state STATE_STRING. */
+static bool
+aarch64_fndecl_has_new_state (const_tree decl, const char *state_name)
+{
+ if (tree attr = lookup_attribute ("arm", "new", DECL_ATTRIBUTES (decl)))
+ for (tree arg = TREE_VALUE (attr); arg; arg = TREE_CHAIN (arg))
+ {
+ tree value = TREE_VALUE (arg);
+ if (TREE_CODE (value) == STRING_CST
+ && strcmp (TREE_STRING_POINTER (value), state_name) == 0)
+ return true;
+ }
+ return false;
+}
+
+/* Return true if attribute argument VALUE is a recognized state string,
+ otherwise report an error. NAME is the name of the attribute to which
+ VALUE is being passed. */
+static bool
+aarch64_check_state_string (tree name, tree value)
+{
+ if (TREE_CODE (value) != STRING_CST)
+ {
+ error ("the arguments to %qE must be constant strings", name);
+ return false;
+ }
+
+ const char *state_name = TREE_STRING_POINTER (value);
+ if (strcmp (state_name, "za") != 0
+ && strcmp (state_name, "zt0") != 0)
+ {
+ error ("unrecognized state string %qs", state_name);
+ return false;
+ }
+
+ return true;
+}
+
+/* qsort callback to compare two STRING_CSTs. */
+static int
+cmp_string_csts (const void *a, const void *b)
+{
+ return strcmp (TREE_STRING_POINTER (*(const_tree const *) a),
+ TREE_STRING_POINTER (*(const_tree const *) b));
+}
+
+/* Canonicalize a list of state strings. ARGS contains the arguments to
+ a new attribute while OLD_ATTR, if nonnull, contains a previous attribute
+ of the same type. If CAN_MERGE_IN_PLACE, it is safe to adjust OLD_ATTR's
+ arguments and drop the new attribute. Otherwise, the new attribute must
+ be kept and ARGS must include the information in OLD_ATTR.
+
+ In both cases, the new arguments must be a sorted list of state strings
+ with duplicates removed.
+
+ Return true if new attribute should be kept, false if it should be
+ dropped. */
+static bool
+aarch64_merge_string_arguments (tree args, tree old_attr,
+ bool can_merge_in_place)
+{
+ /* Get a sorted list of all state strings (including duplicates). */
+ auto add_args = [](vec<tree> &strings, const_tree args)
+ {
+ for (const_tree arg = args; arg; arg = TREE_CHAIN (arg))
+ if (TREE_CODE (TREE_VALUE (arg)) == STRING_CST)
+ strings.safe_push (TREE_VALUE (arg));
+ };
+ auto_vec<tree, 16> strings;
+ add_args (strings, args);
+ if (old_attr)
+ add_args (strings, TREE_VALUE (old_attr));
+ strings.qsort (cmp_string_csts);
+
+ /* The list can be empty if there was no previous attribute and if all
+ the new arguments are erroneous. Drop the attribute in that case. */
+ if (strings.is_empty ())
+ return false;
+
+ /* Destructively modify one of the argument lists, removing duplicates
+ on the fly. */
+ bool use_old_attr = old_attr && can_merge_in_place;
+ tree *end = use_old_attr ? &TREE_VALUE (old_attr) : &args;
+ tree prev = NULL_TREE;
+ for (tree arg : strings)
+ {
+ if (prev && simple_cst_equal (arg, prev))
+ continue;
+ prev = arg;
+ if (!*end)
+ *end = tree_cons (NULL_TREE, arg, NULL_TREE);
+ else
+ TREE_VALUE (*end) = arg;
+ end = &TREE_CHAIN (*end);
+ }
+ *end = NULL_TREE;
+ return !use_old_attr;
+}
+
/* Check whether an 'aarch64_vector_pcs' attribute is valid. */
static tree
@@ -463,8 +735,113 @@ handle_aarch64_vector_pcs_attribute (tree *node, tree name, tree,
gcc_unreachable ();
}
+/* Return true if arm::new(ARGS) is compatible with the type of decl DECL,
+ otherwise report an error. */
+static bool
+aarch64_check_arm_new_against_type (tree args, tree decl)
+{
+ tree type_attrs = TYPE_ATTRIBUTES (TREE_TYPE (decl));
+ for (tree arg = args; arg; arg = TREE_CHAIN (arg))
+ {
+ tree value = TREE_VALUE (arg);
+ if (TREE_CODE (value) == STRING_CST)
+ {
+ const char *state_name = TREE_STRING_POINTER (value);
+ if (aarch64_lookup_shared_state_flags (type_attrs, state_name))
+ {
+ error_at (DECL_SOURCE_LOCATION (decl),
+ "cannot create a new %qs scope since %qs is shared"
+ " with callers", state_name, state_name);
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+/* Callback for arm::new attributes. */
+static tree
+handle_arm_new (tree *node, tree name, tree args, int, bool *no_add_attrs)
+{
+ tree decl = *node;
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ {
+ error ("%qE attribute applies only to function definitions", name);
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+ if (TREE_TYPE (decl) == error_mark_node)
+ {
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
+ for (tree arg = args; arg; arg = TREE_CHAIN (arg))
+ aarch64_check_state_string (name, TREE_VALUE (arg));
+
+ if (!aarch64_check_arm_new_against_type (args, decl))
+ {
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
+ /* If there is an old attribute, we should try to update it in-place,
+ so that there is only one (definitive) arm::new attribute on the decl. */
+ tree old_attr = lookup_attribute ("arm", "new", DECL_ATTRIBUTES (decl));
+ if (!aarch64_merge_string_arguments (args, old_attr, true))
+ *no_add_attrs = true;
+
+ return NULL_TREE;
+}
+
+/* Callback for arm::{in,out,inout,preserves} attributes. */
+static tree
+handle_arm_shared (tree *node, tree name, tree args,
+ int, bool *no_add_attrs)
+{
+ tree type = *node;
+ tree old_attrs = TYPE_ATTRIBUTES (type);
+ auto flags = aarch64_attribute_shared_state_flags (IDENTIFIER_POINTER (name));
+ for (tree arg = args; arg; arg = TREE_CHAIN (arg))
+ {
+ tree value = TREE_VALUE (arg);
+ if (aarch64_check_state_string (name, value))
+ {
+ const char *state_name = TREE_STRING_POINTER (value);
+ auto old_flags = aarch64_lookup_shared_state_flags (old_attrs,
+ state_name);
+ if (old_flags && old_flags != flags)
+ {
+ error ("inconsistent attributes for state %qs", state_name);
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+ }
+ }
+
+ /* We can't update an old attribute in-place, since types are shared.
+ Instead make sure that this new attribute contains all the
+ information, so that the old attribute becomes redundant. */
+ tree old_attr = lookup_attribute ("arm", IDENTIFIER_POINTER (name),
+ old_attrs);
+ if (!aarch64_merge_string_arguments (args, old_attr, false))
+ *no_add_attrs = true;
+
+ return NULL_TREE;
+}
+
+/* Mutually-exclusive function type attributes for controlling PSTATE.SM. */
+static const struct attribute_spec::exclusions attr_streaming_exclusions[] =
+{
+ /* Attribute name exclusion applies to:
+ function, type, variable */
+ { "streaming", false, true, false },
+ { "streaming_compatible", false, true, false },
+ { NULL, false, false, false }
+};
+
/* Table of machine attributes. */
-static const struct attribute_spec aarch64_attribute_table[] =
+static const attribute_spec aarch64_gnu_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -475,8 +852,42 @@ static const struct attribute_spec aarch64_attribute_table[] =
NULL },
{ "Advanced SIMD type", 1, 1, false, true, false, true, NULL, NULL },
{ "SVE type", 3, 3, false, true, false, true, NULL, NULL },
- { "SVE sizeless type", 0, 0, false, true, false, true, NULL, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+ { "SVE sizeless type", 0, 0, false, true, false, true, NULL, NULL }
+};
+
+static const scoped_attribute_specs aarch64_gnu_attribute_table =
+{
+ "gnu", { aarch64_gnu_attributes }
+};
+
+static const attribute_spec aarch64_arm_attributes[] =
+{
+ { "streaming", 0, 0, false, true, true, true,
+ NULL, attr_streaming_exclusions },
+ { "streaming_compatible", 0, 0, false, true, true, true,
+ NULL, attr_streaming_exclusions },
+ { "locally_streaming", 0, 0, true, false, false, false, NULL, NULL },
+ { "new", 1, -1, true, false, false, false,
+ handle_arm_new, NULL },
+ { "preserves", 1, -1, false, true, true, true,
+ handle_arm_shared, NULL },
+ { "in", 1, -1, false, true, true, true,
+ handle_arm_shared, NULL },
+ { "out", 1, -1, false, true, true, true,
+ handle_arm_shared, NULL },
+ { "inout", 1, -1, false, true, true, true,
+ handle_arm_shared, NULL }
+};
+
+static const scoped_attribute_specs aarch64_arm_attribute_table =
+{
+ "arm", { aarch64_arm_attributes }
+};
+
+static const scoped_attribute_specs *const aarch64_attribute_table[] =
+{
+ &aarch64_gnu_attribute_table,
+ &aarch64_arm_attribute_table
};
typedef enum aarch64_cond_code
@@ -535,7 +946,7 @@ pure_scalable_type_info::piece::get_rtx (unsigned int first_zr,
if (num_zr > 0 && num_pr == 0)
return gen_rtx_REG (mode, first_zr);
- if (num_zr == 0 && num_pr == 1)
+ if (num_zr == 0 && num_pr <= 2)
return gen_rtx_REG (mode, first_pr);
gcc_unreachable ();
@@ -758,6 +1169,7 @@ pure_scalable_type_info::add_piece (const piece &p)
gcc_assert (VECTOR_MODE_P (p.mode) && VECTOR_MODE_P (prev.mode));
unsigned int nelems1, nelems2;
if (prev.orig_mode == p.orig_mode
+ && GET_MODE_CLASS (p.orig_mode) != MODE_VECTOR_BOOL
&& known_eq (prev.offset + GET_MODE_SIZE (prev.mode), p.offset)
&& constant_multiple_p (GET_MODE_NUNITS (prev.mode),
GET_MODE_NUNITS (p.orig_mode), &nelems1)
@@ -1059,8 +1471,7 @@ aarch64_sve_pred_mode_p (machine_mode mode)
const unsigned int VEC_ADVSIMD = 1;
const unsigned int VEC_SVE_DATA = 2;
const unsigned int VEC_SVE_PRED = 4;
-/* Can be used in combination with VEC_ADVSIMD or VEC_SVE_DATA to indicate
- a structure of 2, 3 or 4 vectors. */
+/* Indicates a structure of 2, 3 or 4 vectors or predicates. */
const unsigned int VEC_STRUCT = 8;
/* Can be used in combination with VEC_SVE_DATA to indicate that the
vector has fewer significant bytes than a full SVE vector. */
@@ -1223,6 +1634,9 @@ aarch64_classify_vector_mode (machine_mode mode, bool any_target_p = false)
case E_V2DFmode:
return (TARGET_FLOAT || any_target_p) ? VEC_ADVSIMD : 0;
+ case E_VNx32BImode:
+ return TARGET_SVE ? VEC_SVE_PRED | VEC_STRUCT : 0;
+
default:
return 0;
}
@@ -1350,12 +1764,24 @@ aarch64_sve_data_mode (scalar_mode inner_mode, poly_uint64 nunits)
static opt_machine_mode
aarch64_array_mode (machine_mode mode, unsigned HOST_WIDE_INT nelems)
{
- if (aarch64_classify_vector_mode (mode) == VEC_SVE_DATA
- && IN_RANGE (nelems, 2, 4))
+ if (TARGET_SVE && GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL)
+ {
+ /* Use VNx32BI for pairs of predicates, but explicitly reject giving
+ a mode to other array sizes. Using integer modes requires a round
+ trip through memory and generates terrible code. */
+ if (nelems == 1)
+ return mode;
+ if (mode == VNx16BImode && nelems == 2)
+ return VNx32BImode;
+ return BLKmode;
+ }
+
+ auto flags = aarch64_classify_vector_mode (mode);
+ if (flags == VEC_SVE_DATA && IN_RANGE (nelems, 2, 4))
return aarch64_sve_data_mode (GET_MODE_INNER (mode),
GET_MODE_NUNITS (mode) * nelems);
- if (aarch64_classify_vector_mode (mode) == VEC_ADVSIMD
- && IN_RANGE (nelems, 2, 4))
+
+ if (flags == VEC_ADVSIMD && IN_RANGE (nelems, 2, 4))
return aarch64_advsimd_vector_array_mode (mode, nelems);
return opt_machine_mode ();
@@ -1366,7 +1792,7 @@ static bool
aarch64_array_mode_supported_p (machine_mode mode,
unsigned HOST_WIDE_INT nelems)
{
- if (TARGET_SIMD
+ if (TARGET_BASE_SIMD
&& (AARCH64_VALID_SIMD_QREG_MODE (mode)
|| AARCH64_VALID_SIMD_DREG_MODE (mode))
&& (nelems >= 2 && nelems <= 4))
@@ -1575,12 +2001,17 @@ aarch64_hard_regno_nregs (unsigned regno, machine_mode mode)
return GET_MODE_SIZE (mode).to_constant () / 8;
return CEIL (lowest_size, UNITS_PER_VREG);
}
+
case PR_REGS:
case PR_LO_REGS:
case PR_HI_REGS:
+ return mode == VNx32BImode ? 2 : 1;
+
case FFR_REGS:
case PR_AND_FFR_REGS:
+ case FAKE_REGS:
return 1;
+
default:
return CEIL (lowest_size, UNITS_PER_WORD);
}
@@ -1604,12 +2035,19 @@ aarch64_hard_regno_mode_ok (unsigned regno, machine_mode mode)
return mode == DImode;
unsigned int vec_flags = aarch64_classify_vector_mode (mode);
- if (vec_flags & VEC_SVE_PRED)
+ if (vec_flags == VEC_SVE_PRED)
return pr_or_ffr_regnum_p (regno);
+ if (vec_flags == (VEC_SVE_PRED | VEC_STRUCT))
+ return PR_REGNUM_P (regno);
+
if (pr_or_ffr_regnum_p (regno))
return false;
+ /* These registers are abstract; their modes don't matter. */
+ if (FAKE_REGNUM_P (regno))
+ return true;
+
if (regno == SP_REGNUM)
/* The purpose of comparing with ptr_mode is to support the
global register variable associated with the stack pointer
@@ -1715,6 +2153,177 @@ aarch64_fntype_abi (const_tree fntype)
return default_function_abi;
}
+/* Return the state of PSTATE.SM on entry to functions of type FNTYPE. */
+
+static aarch64_feature_flags
+aarch64_fntype_pstate_sm (const_tree fntype)
+{
+ if (lookup_attribute ("arm", "streaming", TYPE_ATTRIBUTES (fntype)))
+ return AARCH64_FL_SM_ON;
+
+ if (lookup_attribute ("arm", "streaming_compatible",
+ TYPE_ATTRIBUTES (fntype)))
+ return 0;
+
+ return AARCH64_FL_SM_OFF;
+}
+
+/* Return state flags that describe whether and how functions of type
+ FNTYPE share state STATE_NAME with their callers. */
+
+static unsigned int
+aarch64_fntype_shared_flags (const_tree fntype, const char *state_name)
+{
+ return aarch64_lookup_shared_state_flags (TYPE_ATTRIBUTES (fntype),
+ state_name);
+}
+
+/* Return the state of PSTATE.ZA on entry to functions of type FNTYPE. */
+
+static aarch64_feature_flags
+aarch64_fntype_pstate_za (const_tree fntype)
+{
+ if (aarch64_fntype_shared_flags (fntype, "za")
+ || aarch64_fntype_shared_flags (fntype, "zt0"))
+ return AARCH64_FL_ZA_ON;
+
+ return 0;
+}
+
+/* Return the ISA mode on entry to functions of type FNTYPE. */
+
+static aarch64_feature_flags
+aarch64_fntype_isa_mode (const_tree fntype)
+{
+ return (aarch64_fntype_pstate_sm (fntype)
+ | aarch64_fntype_pstate_za (fntype));
+}
+
+/* Return true if FNDECL uses streaming mode internally, as an
+ implementation choice. */
+
+static bool
+aarch64_fndecl_is_locally_streaming (const_tree fndecl)
+{
+ return lookup_attribute ("arm", "locally_streaming",
+ DECL_ATTRIBUTES (fndecl));
+}
+
+/* Return the state of PSTATE.SM when compiling the body of
+ function FNDECL. This might be different from the state of
+ PSTATE.SM on entry. */
+
+static aarch64_feature_flags
+aarch64_fndecl_pstate_sm (const_tree fndecl)
+{
+ if (aarch64_fndecl_is_locally_streaming (fndecl))
+ return AARCH64_FL_SM_ON;
+
+ return aarch64_fntype_pstate_sm (TREE_TYPE (fndecl));
+}
+
+/* Return true if function FNDECL has state STATE_NAME, either by creating
+ new state itself or by sharing state with callers. */
+
+static bool
+aarch64_fndecl_has_state (tree fndecl, const char *state_name)
+{
+ return (aarch64_fndecl_has_new_state (fndecl, state_name)
+ || aarch64_fntype_shared_flags (TREE_TYPE (fndecl),
+ state_name) != 0);
+}
+
+/* Return the state of PSTATE.ZA when compiling the body of function FNDECL.
+ This might be different from the state of PSTATE.ZA on entry. */
+
+static aarch64_feature_flags
+aarch64_fndecl_pstate_za (const_tree fndecl)
+{
+ if (aarch64_fndecl_has_new_state (fndecl, "za")
+ || aarch64_fndecl_has_new_state (fndecl, "zt0"))
+ return AARCH64_FL_ZA_ON;
+
+ return aarch64_fntype_pstate_za (TREE_TYPE (fndecl));
+}
+
+/* Return the ISA mode that should be used to compile the body of
+ function FNDECL. */
+
+static aarch64_feature_flags
+aarch64_fndecl_isa_mode (const_tree fndecl)
+{
+ return (aarch64_fndecl_pstate_sm (fndecl)
+ | aarch64_fndecl_pstate_za (fndecl));
+}
+
+/* Return the state of PSTATE.SM on entry to the current function.
+ This might be different from the state of PSTATE.SM in the function
+ body. */
+
+static aarch64_feature_flags
+aarch64_cfun_incoming_pstate_sm ()
+{
+ return aarch64_fntype_pstate_sm (TREE_TYPE (cfun->decl));
+}
+
+/* Return the state of PSTATE.ZA on entry to the current function.
+ This might be different from the state of PSTATE.ZA in the function
+ body. */
+
+static aarch64_feature_flags
+aarch64_cfun_incoming_pstate_za ()
+{
+ return aarch64_fntype_pstate_za (TREE_TYPE (cfun->decl));
+}
+
+/* Return state flags that describe whether and how the current function shares
+ state STATE_NAME with callers. */
+
+static unsigned int
+aarch64_cfun_shared_flags (const char *state_name)
+{
+ return aarch64_fntype_shared_flags (TREE_TYPE (cfun->decl), state_name);
+}
+
+/* Return true if the current function creates new state of type STATE_NAME
+ (as opposed to sharing the state with its callers or ignoring the state
+ altogether). */
+
+static bool
+aarch64_cfun_has_new_state (const char *state_name)
+{
+ return aarch64_fndecl_has_new_state (cfun->decl, state_name);
+}
+
+/* Return true if PSTATE.SM is 1 in the body of the current function,
+ but is not guaranteed to be 1 on entry. */
+
+static bool
+aarch64_cfun_enables_pstate_sm ()
+{
+ return (aarch64_fndecl_is_locally_streaming (cfun->decl)
+ && aarch64_cfun_incoming_pstate_sm () != AARCH64_FL_SM_ON);
+}
+
+/* Return true if the current function has state STATE_NAME, either by
+ creating new state itself or by sharing state with callers. */
+
+static bool
+aarch64_cfun_has_state (const char *state_name)
+{
+ return aarch64_fndecl_has_state (cfun->decl, state_name);
+}
+
+/* Return true if a call from the current function to a function with
+ ISA mode CALLEE_MODE would involve a change to PSTATE.SM around
+ the BL instruction. */
+
+static bool
+aarch64_call_switches_pstate_sm (aarch64_feature_flags callee_mode)
+{
+ return (callee_mode & ~AARCH64_ISA_MODE & AARCH64_FL_SM_STATE) != 0;
+}
+
/* Implement TARGET_COMPATIBLE_VECTOR_TYPES_P. */
static bool
@@ -1738,7 +2347,7 @@ aarch64_emit_cfi_for_reg_p (unsigned int regno)
static machine_mode
aarch64_reg_save_mode (unsigned int regno)
{
- if (GP_REGNUM_P (regno))
+ if (GP_REGNUM_P (regno) || regno == VG_REGNUM)
return DImode;
if (FP_REGNUM_P (regno))
@@ -1777,17 +2386,65 @@ aarch64_reg_save_mode (unsigned int regno)
gcc_unreachable ();
}
-/* Implement TARGET_INSN_CALLEE_ABI. */
+/* Given the ISA mode on entry to a callee and the ABI of the callee,
+ return the CONST_INT that should be placed in an UNSPEC_CALLEE_ABI rtx. */
-const predefined_function_abi &
-aarch64_insn_callee_abi (const rtx_insn *insn)
+rtx
+aarch64_gen_callee_cookie (aarch64_feature_flags isa_mode, arm_pcs pcs_variant)
+{
+ return gen_int_mode ((unsigned int) isa_mode
+ | (unsigned int) pcs_variant << AARCH64_NUM_ISA_MODES,
+ DImode);
+}
+
+/* COOKIE is a CONST_INT from an UNSPEC_CALLEE_ABI rtx. Return the
+ callee's ABI. */
+
+static const predefined_function_abi &
+aarch64_callee_abi (rtx cookie)
+{
+ return function_abis[UINTVAL (cookie) >> AARCH64_NUM_ISA_MODES];
+}
+
+/* COOKIE is a CONST_INT from an UNSPEC_CALLEE_ABI rtx. Return the
+ required ISA mode on entry to the callee, which is also the ISA
+ mode on return from the callee. */
+
+static aarch64_feature_flags
+aarch64_callee_isa_mode (rtx cookie)
+{
+ return UINTVAL (cookie) & AARCH64_FL_ISA_MODES;
+}
+
+/* INSN is a call instruction. Return the CONST_INT stored in its
+ UNSPEC_CALLEE_ABI rtx. */
+
+static rtx
+aarch64_insn_callee_cookie (const rtx_insn *insn)
{
rtx pat = PATTERN (insn);
gcc_assert (GET_CODE (pat) == PARALLEL);
rtx unspec = XVECEXP (pat, 0, 1);
gcc_assert (GET_CODE (unspec) == UNSPEC
&& XINT (unspec, 1) == UNSPEC_CALLEE_ABI);
- return function_abis[INTVAL (XVECEXP (unspec, 0, 0))];
+ return XVECEXP (unspec, 0, 0);
+}
+
+/* Implement TARGET_INSN_CALLEE_ABI. */
+
+const predefined_function_abi &
+aarch64_insn_callee_abi (const rtx_insn *insn)
+{
+ return aarch64_callee_abi (aarch64_insn_callee_cookie (insn));
+}
+
+/* INSN is a call instruction. Return the required ISA mode on entry to
+ the callee, which is also the ISA mode on return from the callee. */
+
+static aarch64_feature_flags
+aarch64_insn_callee_isa_mode (const rtx_insn *insn)
+{
+ return aarch64_callee_isa_mode (aarch64_insn_callee_cookie (insn));
}
/* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. The callee only saves
@@ -2467,6 +3124,33 @@ aarch64_emit_binop (rtx dest, optab binoptab, rtx op0, rtx op1)
emit_move_insn (dest, tmp);
}
+/* Split a move from SRC to DST into two moves of mode SINGLE_MODE. */
+
+void
+aarch64_split_double_move (rtx dst, rtx src, machine_mode single_mode)
+{
+ machine_mode mode = GET_MODE (dst);
+
+ rtx dst0 = simplify_gen_subreg (single_mode, dst, mode, 0);
+ rtx dst1 = simplify_gen_subreg (single_mode, dst, mode,
+ GET_MODE_SIZE (single_mode));
+ rtx src0 = simplify_gen_subreg (single_mode, src, mode, 0);
+ rtx src1 = simplify_gen_subreg (single_mode, src, mode,
+ GET_MODE_SIZE (single_mode));
+
+ /* At most one pairing may overlap. */
+ if (reg_overlap_mentioned_p (dst0, src1))
+ {
+ aarch64_emit_move (dst1, src1);
+ aarch64_emit_move (dst0, src0);
+ }
+ else
+ {
+ aarch64_emit_move (dst0, src0);
+ aarch64_emit_move (dst1, src1);
+ }
+}
+
/* Split a 128-bit move operation into two 64-bit move operations,
taking care to handle partial overlap of register to register
copies. Special cases are needed when moving between GP regs and
@@ -2476,9 +3160,6 @@ aarch64_emit_binop (rtx dest, optab binoptab, rtx op0, rtx op1)
void
aarch64_split_128bit_move (rtx dst, rtx src)
{
- rtx dst_lo, dst_hi;
- rtx src_lo, src_hi;
-
machine_mode mode = GET_MODE (dst);
gcc_assert (mode == TImode || mode == TFmode || mode == TDmode);
@@ -2493,8 +3174,8 @@ aarch64_split_128bit_move (rtx dst, rtx src)
/* Handle FP <-> GP regs. */
if (FP_REGNUM_P (dst_regno) && GP_REGNUM_P (src_regno))
{
- src_lo = gen_lowpart (word_mode, src);
- src_hi = gen_highpart (word_mode, src);
+ rtx src_lo = gen_lowpart (word_mode, src);
+ rtx src_hi = gen_highpart (word_mode, src);
emit_insn (gen_aarch64_movlow_di (mode, dst, src_lo));
emit_insn (gen_aarch64_movhigh_di (mode, dst, src_hi));
@@ -2502,8 +3183,8 @@ aarch64_split_128bit_move (rtx dst, rtx src)
}
else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno))
{
- dst_lo = gen_lowpart (word_mode, dst);
- dst_hi = gen_highpart (word_mode, dst);
+ rtx dst_lo = gen_lowpart (word_mode, dst);
+ rtx dst_hi = gen_highpart (word_mode, dst);
emit_insn (gen_aarch64_movdi_low (mode, dst_lo, src));
emit_insn (gen_aarch64_movdi_high (mode, dst_hi, src));
@@ -2511,22 +3192,7 @@ aarch64_split_128bit_move (rtx dst, rtx src)
}
}
- dst_lo = gen_lowpart (word_mode, dst);
- dst_hi = gen_highpart (word_mode, dst);
- src_lo = gen_lowpart (word_mode, src);
- src_hi = gen_highpart_mode (word_mode, mode, src);
-
- /* At most one pairing may overlap. */
- if (reg_overlap_mentioned_p (dst_lo, src_hi))
- {
- aarch64_emit_move (dst_hi, src_hi);
- aarch64_emit_move (dst_lo, src_lo);
- }
- else
- {
- aarch64_emit_move (dst_lo, src_lo);
- aarch64_emit_move (dst_hi, src_hi);
- }
+ aarch64_split_double_move (dst, src, word_mode);
}
/* Return true if we should split a move from 128-bit value SRC
@@ -2792,7 +3458,7 @@ aarch64_ptrue_all (unsigned int elt_size)
rtx
aarch64_ptrue_reg (machine_mode mode)
{
- gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
+ gcc_assert (aarch64_sve_pred_mode_p (mode));
rtx reg = force_reg (VNx16BImode, CONSTM1_RTX (VNx16BImode));
return gen_lowpart (mode, reg);
}
@@ -2802,7 +3468,7 @@ aarch64_ptrue_reg (machine_mode mode)
rtx
aarch64_pfalse_reg (machine_mode mode)
{
- gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
+ gcc_assert (aarch64_sve_pred_mode_p (mode));
rtx reg = force_reg (VNx16BImode, CONST0_RTX (VNx16BImode));
return gen_lowpart (mode, reg);
}
@@ -2818,7 +3484,7 @@ bool
aarch64_sve_same_pred_for_ptest_p (rtx *pred1, rtx *pred2)
{
machine_mode mode = GET_MODE (pred1[0]);
- gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL
+ gcc_assert (aarch64_sve_pred_mode_p (mode)
&& mode == GET_MODE (pred2[0])
&& aarch64_sve_ptrue_flag (pred1[1], SImode)
&& aarch64_sve_ptrue_flag (pred2[1], SImode));
@@ -2933,6 +3599,18 @@ aarch64_fold_sve_cnt_pat (aarch64_svpattern pattern, unsigned int nelts_per_vq)
return -1;
}
+/* Return true if a single CNT[BHWD] instruction can multiply FACTOR
+ by the number of 128-bit quadwords in an SVE vector. */
+
+static bool
+aarch64_sve_cnt_factor_p (HOST_WIDE_INT factor)
+{
+ /* The coefficient must be [1, 16] * {2, 4, 8, 16}. */
+ return (IN_RANGE (factor, 2, 16 * 16)
+ && (factor & 1) == 0
+ && factor <= 16 * (factor & -factor));
+}
+
/* Return true if we can move VALUE into a register using a single
CNT[BHWD] instruction. */
@@ -2940,11 +3618,7 @@ static bool
aarch64_sve_cnt_immediate_p (poly_int64 value)
{
HOST_WIDE_INT factor = value.coeffs[0];
- /* The coefficient must be [1, 16] * {2, 4, 8, 16}. */
- return (value.coeffs[1] == factor
- && IN_RANGE (factor, 2, 16 * 16)
- && (factor & 1) == 0
- && factor <= 16 * (factor & -factor));
+ return value.coeffs[1] == factor && aarch64_sve_cnt_factor_p (factor);
}
/* Likewise for rtx X. */
@@ -3060,6 +3734,61 @@ aarch64_output_sve_scalar_inc_dec (rtx offset)
-offset_value.coeffs[1], 0);
}
+/* Return true if a single RDVL instruction can multiply FACTOR by the
+ number of 128-bit quadwords in an SVE vector. This is also the
+ range of ADDVL. */
+
+static bool
+aarch64_sve_rdvl_addvl_factor_p (HOST_WIDE_INT factor)
+{
+ return (multiple_p (factor, 16)
+ && IN_RANGE (factor, -32 * 16, 31 * 16));
+}
+
+/* Return true if ADDPL can be used to add FACTOR multiplied by the number
+ of quadwords in an SVE vector. */
+
+static bool
+aarch64_sve_addpl_factor_p (HOST_WIDE_INT factor)
+{
+ return (multiple_p (factor, 2)
+ && IN_RANGE (factor, -32 * 2, 31 * 2));
+}
+
+/* Return true if we can move VALUE into a register using a single
+ RDVL instruction. */
+
+static bool
+aarch64_sve_rdvl_immediate_p (poly_int64 value)
+{
+ HOST_WIDE_INT factor = value.coeffs[0];
+ return value.coeffs[1] == factor && aarch64_sve_rdvl_addvl_factor_p (factor);
+}
+
+/* Likewise for rtx X. */
+
+bool
+aarch64_sve_rdvl_immediate_p (rtx x)
+{
+ poly_int64 value;
+ return poly_int_rtx_p (x, &value) && aarch64_sve_rdvl_immediate_p (value);
+}
+
+/* Return the asm string for moving RDVL immediate OFFSET into register
+ operand 0. */
+
+char *
+aarch64_output_sve_rdvl (rtx offset)
+{
+ static char buffer[sizeof ("rdvl\t%x0, #-") + 3 * sizeof (int)];
+ poly_int64 offset_value = rtx_to_poly_int64 (offset);
+ gcc_assert (aarch64_sve_rdvl_immediate_p (offset_value));
+
+ int factor = offset_value.coeffs[1];
+ snprintf (buffer, sizeof (buffer), "rdvl\t%%x0, #%d", factor / 16);
+ return buffer;
+}
+
/* Return true if we can add VALUE to a register using a single ADDVL
or ADDPL instruction. */
@@ -3069,10 +3798,8 @@ aarch64_sve_addvl_addpl_immediate_p (poly_int64 value)
HOST_WIDE_INT factor = value.coeffs[0];
if (factor == 0 || value.coeffs[1] != factor)
return false;
- /* FACTOR counts VG / 2, so a value of 2 is one predicate width
- and a value of 16 is one vector width. */
- return (((factor & 15) == 0 && IN_RANGE (factor, -32 * 16, 31 * 16))
- || ((factor & 1) == 0 && IN_RANGE (factor, -32 * 2, 31 * 2)));
+ return (aarch64_sve_rdvl_addvl_factor_p (factor)
+ || aarch64_sve_addpl_factor_p (factor));
}
/* Likewise for rtx X. */
@@ -3168,6 +3895,106 @@ aarch64_output_sve_vector_inc_dec (const char *operands, rtx x)
factor, nelts_per_vq);
}
+/* Return a constant that represents FACTOR multiplied by the
+ number of 128-bit quadwords in an SME vector. ISA_MODE is the
+ ISA mode in which the calculation is being performed. */
+
+rtx
+aarch64_sme_vq_immediate (machine_mode mode, HOST_WIDE_INT factor,
+ aarch64_feature_flags isa_mode)
+{
+ gcc_assert (aarch64_sve_rdvl_addvl_factor_p (factor));
+ if (isa_mode & AARCH64_FL_SM_ON)
+ /* We're in streaming mode, so we can use normal poly-int values. */
+ return gen_int_mode ({ factor, factor }, mode);
+
+ rtvec vec = gen_rtvec (1, gen_int_mode (factor, SImode));
+ rtx unspec = gen_rtx_UNSPEC (mode, vec, UNSPEC_SME_VQ);
+ return gen_rtx_CONST (mode, unspec);
+}
+
+/* Return true if X is a constant that represents some number X
+ multiplied by the number of quadwords in an SME vector. Store this X
+ in *FACTOR if so. */
+
+static bool
+aarch64_sme_vq_unspec_p (const_rtx x, HOST_WIDE_INT *factor)
+{
+ if (!TARGET_SME || GET_CODE (x) != CONST)
+ return false;
+
+ x = XEXP (x, 0);
+ if (GET_CODE (x) != UNSPEC
+ || XINT (x, 1) != UNSPEC_SME_VQ
+ || XVECLEN (x, 0) != 1)
+ return false;
+
+ x = XVECEXP (x, 0, 0);
+ if (!CONST_INT_P (x))
+ return false;
+
+ *factor = INTVAL (x);
+ return true;
+}
+
+/* Return true if X is a constant that represents some number Y
+ multiplied by the number of quadwords in an SME vector, and if
+ that Y is in the range of RDSVL. */
+
+bool
+aarch64_rdsvl_immediate_p (const_rtx x)
+{
+ HOST_WIDE_INT factor;
+ return (aarch64_sme_vq_unspec_p (x, &factor)
+ && aarch64_sve_rdvl_addvl_factor_p (factor));
+}
+
+/* Return the asm string for an RDSVL instruction that calculates X,
+ which is a constant that satisfies aarch64_rdsvl_immediate_p. */
+
+char *
+aarch64_output_rdsvl (const_rtx x)
+{
+ gcc_assert (aarch64_rdsvl_immediate_p (x));
+ static char buffer[sizeof ("rdsvl\t%x0, #-") + 3 * sizeof (int)];
+ x = XVECEXP (XEXP (x, 0), 0, 0);
+ snprintf (buffer, sizeof (buffer), "rdsvl\t%%x0, #%d",
+ (int) INTVAL (x) / 16);
+ return buffer;
+}
+
+/* Return true if X is a constant that can be added using ADDSVL or ADDSPL. */
+
+bool
+aarch64_addsvl_addspl_immediate_p (const_rtx x)
+{
+ HOST_WIDE_INT factor;
+ return (aarch64_sme_vq_unspec_p (x, &factor)
+ && (aarch64_sve_rdvl_addvl_factor_p (factor)
+ || aarch64_sve_addpl_factor_p (factor)));
+}
+
+/* X is a constant that satisfies aarch64_addsvl_addspl_immediate_p.
+ Return the asm string for the associated instruction. */
+
+char *
+aarch64_output_addsvl_addspl (rtx x)
+{
+ static char buffer[sizeof ("addspl\t%x0, %x1, #-") + 3 * sizeof (int)];
+ HOST_WIDE_INT factor;
+ if (!aarch64_sme_vq_unspec_p (x, &factor))
+ gcc_unreachable ();
+ if (aarch64_sve_rdvl_addvl_factor_p (factor))
+ snprintf (buffer, sizeof (buffer), "addsvl\t%%x0, %%x1, #%d",
+ (int) factor / 16);
+ else if (aarch64_sve_addpl_factor_p (factor))
+ snprintf (buffer, sizeof (buffer), "addspl\t%%x0, %%x1, #%d",
+ (int) factor / 2);
+ else
+ gcc_unreachable ();
+ return buffer;
+}
+
/* Multipliers for repeating bitmasks of width 32, 16, 8, 4, and 2. */
static const unsigned HOST_WIDE_INT bitmask_imm_mul[] =
@@ -3689,13 +4516,13 @@ aarch64_offset_temporaries (bool add_p, poly_int64 offset)
count += 1;
else if (factor != 0)
{
- factor = abs (factor);
- if (factor > 16 * (factor & -factor))
- /* Need one register for the CNT result and one for the multiplication
- factor. If necessary, the second temporary can be reused for the
- constant part of the offset. */
+ factor /= (HOST_WIDE_INT) least_bit_hwi (factor);
+ if (!IN_RANGE (factor, -32, 31))
+ /* Need one register for the CNT or RDVL result and one for the
+ multiplication factor. If necessary, the second temporary
+ can be reused for the constant part of the offset. */
return 2;
- /* Need one register for the CNT result (which might then
+ /* Need one register for the CNT or RDVL result (which might then
be shifted). */
count += 1;
}
@@ -3728,6 +4555,10 @@ aarch64_add_offset_temporaries (rtx x)
TEMP2, if nonnull, is a second temporary register that doesn't
overlap either DEST or REG.
+ FORCE_ISA_MODE is AARCH64_FL_SM_ON if any variable component of OFFSET
+ is measured relative to the SME vector length instead of the current
+ prevailing vector length. It is 0 otherwise.
+
Since this function may be used to adjust the stack pointer, we must
ensure that it cannot cause transient stack deallocation (for example
by first incrementing SP and then decrementing when adjusting by a
@@ -3736,6 +4567,7 @@ aarch64_add_offset_temporaries (rtx x)
static void
aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
poly_int64 offset, rtx temp1, rtx temp2,
+ aarch64_feature_flags force_isa_mode,
bool frame_related_p, bool emit_move_imm = true)
{
gcc_assert (emit_move_imm || temp1 != NULL_RTX);
@@ -3748,9 +4580,18 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
/* Try using ADDVL or ADDPL to add the whole value. */
if (src != const0_rtx && aarch64_sve_addvl_addpl_immediate_p (offset))
{
- rtx offset_rtx = gen_int_mode (offset, mode);
+ gcc_assert (offset.coeffs[0] == offset.coeffs[1]);
+ rtx offset_rtx;
+ if (force_isa_mode == 0)
+ offset_rtx = gen_int_mode (offset, mode);
+ else
+ offset_rtx = aarch64_sme_vq_immediate (mode, offset.coeffs[0], 0);
rtx_insn *insn = emit_insn (gen_add3_insn (dest, src, offset_rtx));
RTX_FRAME_RELATED_P (insn) = frame_related_p;
+ if (frame_related_p && (force_isa_mode & AARCH64_FL_SM_ON))
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (dest, plus_constant (Pmode, src,
+ offset)));
return;
}
@@ -3766,11 +4607,19 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
if (src != const0_rtx
&& aarch64_sve_addvl_addpl_immediate_p (poly_offset))
{
- rtx offset_rtx = gen_int_mode (poly_offset, mode);
+ rtx offset_rtx;
+ if (force_isa_mode == 0)
+ offset_rtx = gen_int_mode (poly_offset, mode);
+ else
+ offset_rtx = aarch64_sme_vq_immediate (mode, factor, 0);
if (frame_related_p)
{
rtx_insn *insn = emit_insn (gen_add3_insn (dest, src, offset_rtx));
RTX_FRAME_RELATED_P (insn) = true;
+ if (force_isa_mode & AARCH64_FL_SM_ON)
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (dest, plus_constant (Pmode, src,
+ poly_offset)));
src = dest;
}
else
@@ -3784,85 +4633,117 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
/* Otherwise use a CNT-based sequence. */
else if (factor != 0)
{
- /* Use a subtraction if we have a negative factor. */
- rtx_code code = PLUS;
- if (factor < 0)
- {
- factor = -factor;
- code = MINUS;
- }
+ /* Calculate CNTB * FACTOR / 16 as CNTB * REL_FACTOR * 2**SHIFT,
+ with negative shifts indicating a shift right. */
+ HOST_WIDE_INT low_bit = least_bit_hwi (factor);
+ HOST_WIDE_INT rel_factor = factor / low_bit;
+ int shift = exact_log2 (low_bit) - 4;
+ gcc_assert (shift >= -4 && (rel_factor & 1) != 0);
- /* Calculate CNTD * FACTOR / 2. First try to fold the division
- into the multiplication. */
+ /* Set CODE, VAL and SHIFT so that [+-] VAL * 2**SHIFT is
+ equal to CNTB * FACTOR / 16, with CODE being the [+-].
+
+ We can avoid a multiplication if REL_FACTOR is in the range
+ of RDVL, although there are then various optimizations that
+ we can try on top. */
+ rtx_code code = PLUS;
rtx val;
- int shift = 0;
- if (factor & 1)
- /* Use a right shift by 1. */
- shift = -1;
- else
- factor /= 2;
- HOST_WIDE_INT low_bit = factor & -factor;
- if (factor <= 16 * low_bit)
+ if (IN_RANGE (rel_factor, -32, 31))
{
- if (factor > 16 * 8)
+ if (force_isa_mode & AARCH64_FL_SM_ON)
+ {
+ /* Try to use an unshifted RDSVL, otherwise fall back on
+ a shifted RDSVL #1. */
+ if (aarch64_sve_rdvl_addvl_factor_p (factor))
+ shift = 0;
+ else
+ factor = rel_factor * 16;
+ val = aarch64_sme_vq_immediate (mode, factor, 0);
+ }
+ /* Try to use an unshifted CNT[BHWD] or RDVL. */
+ else if (aarch64_sve_cnt_factor_p (factor)
+ || aarch64_sve_rdvl_addvl_factor_p (factor))
+ {
+ val = gen_int_mode (poly_int64 (factor, factor), mode);
+ shift = 0;
+ }
+ /* Try to subtract an unshifted CNT[BHWD]. */
+ else if (aarch64_sve_cnt_factor_p (-factor))
+ {
+ code = MINUS;
+ val = gen_int_mode (poly_int64 (-factor, -factor), mode);
+ shift = 0;
+ }
+ /* If subtraction is free, prefer to load a positive constant.
+ In the best case this will fit a shifted CNTB. */
+ else if (src != const0_rtx && rel_factor < 0)
{
- /* "CNTB Xn, ALL, MUL #FACTOR" is out of range, so calculate
- the value with the minimum multiplier and shift it into
- position. */
- int extra_shift = exact_log2 (low_bit);
- shift += extra_shift;
- factor >>= extra_shift;
+ code = MINUS;
+ val = gen_int_mode (-rel_factor * BYTES_PER_SVE_VECTOR, mode);
}
- val = gen_int_mode (poly_int64 (factor * 2, factor * 2), mode);
+ /* Otherwise use a shifted RDVL or CNT[BHWD]. */
+ else
+ val = gen_int_mode (rel_factor * BYTES_PER_SVE_VECTOR, mode);
}
else
{
- /* Base the factor on LOW_BIT if we can calculate LOW_BIT
- directly, since that should increase the chances of being
- able to use a shift and add sequence. If LOW_BIT itself
- is out of range, just use CNTD. */
- if (low_bit <= 16 * 8)
- factor /= low_bit;
+ /* If we can calculate CNTB << SHIFT directly, prefer to do that,
+ since it should increase the chances of being able to use
+ a shift and add sequence for the multiplication.
+ If CNTB << SHIFT is out of range, stick with the current
+ shift factor. */
+ if (force_isa_mode == 0
+ && IN_RANGE (low_bit, 2, 16 * 16))
+ {
+ val = gen_int_mode (poly_int64 (low_bit, low_bit), mode);
+ shift = 0;
+ }
+ else if ((force_isa_mode & AARCH64_FL_SM_ON)
+ && aarch64_sve_rdvl_addvl_factor_p (low_bit))
+ {
+ val = aarch64_sme_vq_immediate (mode, low_bit, 0);
+ shift = 0;
+ }
else
- low_bit = 1;
+ val = gen_int_mode (BYTES_PER_SVE_VECTOR, mode);
- val = gen_int_mode (poly_int64 (low_bit * 2, low_bit * 2), mode);
val = aarch64_force_temporary (mode, temp1, val);
+ /* Prefer to multiply by a positive factor and subtract rather
+ than multiply by a negative factor and add, since positive
+ values are usually easier to move. */
+ if (rel_factor < 0 && src != const0_rtx)
+ {
+ rel_factor = -rel_factor;
+ code = MINUS;
+ }
+
if (can_create_pseudo_p ())
{
- rtx coeff1 = gen_int_mode (factor, mode);
+ rtx coeff1 = gen_int_mode (rel_factor, mode);
val = expand_mult (mode, val, coeff1, NULL_RTX, true, true);
}
else
{
- /* Go back to using a negative multiplication factor if we have
- no register from which to subtract. */
- if (code == MINUS && src == const0_rtx)
- {
- factor = -factor;
- code = PLUS;
- }
- rtx coeff1 = gen_int_mode (factor, mode);
+ rtx coeff1 = gen_int_mode (rel_factor, mode);
coeff1 = aarch64_force_temporary (mode, temp2, coeff1);
val = gen_rtx_MULT (mode, val, coeff1);
}
}
+ /* Multiply by 2 ** SHIFT. */
if (shift > 0)
{
- /* Multiply by 1 << SHIFT. */
val = aarch64_force_temporary (mode, temp1, val);
val = gen_rtx_ASHIFT (mode, val, GEN_INT (shift));
}
- else if (shift == -1)
+ else if (shift < 0)
{
- /* Divide by 2. */
val = aarch64_force_temporary (mode, temp1, val);
- val = gen_rtx_ASHIFTRT (mode, val, const1_rtx);
+ val = gen_rtx_ASHIFTRT (mode, val, GEN_INT (-shift));
}
- /* Calculate SRC +/- CNTD * FACTOR / 2. */
+ /* Add the result to SRC or subtract the result from SRC. */
if (src != const0_rtx)
{
val = aarch64_force_temporary (mode, temp1, val);
@@ -3910,30 +4791,509 @@ aarch64_split_add_offset (scalar_int_mode mode, rtx dest, rtx src,
rtx offset_rtx, rtx temp1, rtx temp2)
{
aarch64_add_offset (mode, dest, src, rtx_to_poly_int64 (offset_rtx),
- temp1, temp2, false);
+ temp1, temp2, 0, false);
}
/* Add DELTA to the stack pointer, marking the instructions frame-related.
- TEMP1 is available as a temporary if nonnull. EMIT_MOVE_IMM is false
- if TEMP1 already contains abs (DELTA). */
+ TEMP1 is available as a temporary if nonnull. FORCE_ISA_MODE is as
+ for aarch64_add_offset. EMIT_MOVE_IMM is false if TEMP1 already
+ contains abs (DELTA). */
static inline void
-aarch64_add_sp (rtx temp1, rtx temp2, poly_int64 delta, bool emit_move_imm)
+aarch64_add_sp (rtx temp1, rtx temp2, poly_int64 delta,
+ aarch64_feature_flags force_isa_mode, bool emit_move_imm)
{
aarch64_add_offset (Pmode, stack_pointer_rtx, stack_pointer_rtx, delta,
- temp1, temp2, true, emit_move_imm);
+ temp1, temp2, force_isa_mode, true, emit_move_imm);
}
/* Subtract DELTA from the stack pointer, marking the instructions
- frame-related if FRAME_RELATED_P. TEMP1 is available as a temporary
- if nonnull. */
+ frame-related if FRAME_RELATED_P. FORCE_ISA_MODE is as for
+ aarch64_add_offset. TEMP1 is available as a temporary if nonnull. */
static inline void
-aarch64_sub_sp (rtx temp1, rtx temp2, poly_int64 delta, bool frame_related_p,
- bool emit_move_imm = true)
+aarch64_sub_sp (rtx temp1, rtx temp2, poly_int64 delta,
+ aarch64_feature_flags force_isa_mode,
+ bool frame_related_p, bool emit_move_imm = true)
{
aarch64_add_offset (Pmode, stack_pointer_rtx, stack_pointer_rtx, -delta,
- temp1, temp2, frame_related_p, emit_move_imm);
+ temp1, temp2, force_isa_mode, frame_related_p,
+ emit_move_imm);
+}
+
+/* A streaming-compatible function needs to switch temporarily to the known
+ PSTATE.SM mode described by LOCAL_MODE. The low bit of OLD_SVCR contains
+ the runtime state of PSTATE.SM in the streaming-compatible code, before
+ the start of the switch to LOCAL_MODE.
+
+ Emit instructions to branch around the mode switch if PSTATE.SM already
+ matches LOCAL_MODE. Return the label that the branch jumps to. */
+
+static rtx_insn *
+aarch64_guard_switch_pstate_sm (rtx old_svcr, aarch64_feature_flags local_mode)
+{
+ local_mode &= AARCH64_FL_SM_STATE;
+ gcc_assert (local_mode != 0);
+ auto already_ok_cond = (local_mode & AARCH64_FL_SM_ON ? NE : EQ);
+ auto *label = gen_label_rtx ();
+ auto *jump = emit_jump_insn (gen_aarch64_tb (already_ok_cond, DImode, DImode,
+ old_svcr, const0_rtx, label));
+ JUMP_LABEL (jump) = label;
+ return label;
+}
+
+/* Emit code to switch from the PSTATE.SM state in OLD_MODE to the PSTATE.SM
+ state in NEW_MODE. This is known to involve either an SMSTART SM or
+ an SMSTOP SM. */
+
+static void
+aarch64_switch_pstate_sm (aarch64_feature_flags old_mode,
+ aarch64_feature_flags new_mode)
+{
+ old_mode &= AARCH64_FL_SM_STATE;
+ new_mode &= AARCH64_FL_SM_STATE;
+ gcc_assert (old_mode != new_mode);
+
+ if ((new_mode & AARCH64_FL_SM_ON)
+ || (new_mode == 0 && (old_mode & AARCH64_FL_SM_OFF)))
+ emit_insn (gen_aarch64_smstart_sm ());
+ else
+ emit_insn (gen_aarch64_smstop_sm ());
+}
+
+/* As a side-effect, SMSTART SM and SMSTOP SM clobber the contents of all
+ FP and predicate registers. This class emits code to preserve any
+ necessary registers around the mode switch.
+
+ The class uses four approaches to saving and restoring contents, enumerated
+ by group_type:
+
+ - GPR: save and restore the contents of FP registers using GPRs.
+ This is used if the FP register contains no more than 64 significant
+ bits. The registers used are FIRST_GPR onwards.
+
+ - MEM_128: save and restore 128-bit SIMD registers using memory.
+
+ - MEM_SVE_PRED: save and restore full SVE predicate registers using memory.
+
+ - MEM_SVE_DATA: save and restore full SVE vector registers using memory.
+
+ The save slots within each memory group are consecutive, with the
+ MEM_SVE_PRED slots occupying a region below the MEM_SVE_DATA slots.
+
+ There will only be two mode switches for each use of SME, so they should
+ not be particularly performance-sensitive. It's also rare for SIMD, SVE
+ or predicate registers to be live across mode switches. We therefore
+ don't preallocate the save slots but instead allocate them locally on
+ demand. This makes the code emitted by the class self-contained. */
+
+class aarch64_sme_mode_switch_regs
+{
+public:
+ static const unsigned int FIRST_GPR = R10_REGNUM;
+
+ void add_reg (machine_mode, unsigned int);
+ void add_call_args (rtx_call_insn *);
+ void add_call_result (rtx_call_insn *);
+ void add_call_preserved_reg (unsigned int);
+ void add_call_preserved_regs (bitmap);
+
+ void emit_prologue ();
+ void emit_epilogue ();
+
+ /* The number of GPRs needed to save FP registers, starting from
+ FIRST_GPR. */
+ unsigned int num_gprs () { return m_group_count[GPR]; }
+
+private:
+ enum sequence { PROLOGUE, EPILOGUE };
+ enum group_type { GPR, MEM_128, MEM_SVE_PRED, MEM_SVE_DATA, NUM_GROUPS };
+
+ /* Information about the save location for one FP, SIMD, SVE data, or
+ SVE predicate register. */
+ struct save_location {
+ /* The register to be saved. */
+ rtx reg;
+
+ /* Which group the save location belongs to. */
+ group_type group;
+
+ /* A zero-based index of the register within the group. */
+ unsigned int index;
+ };
+
+ unsigned int sve_data_headroom ();
+ rtx get_slot_mem (machine_mode, poly_int64);
+ void emit_stack_adjust (sequence, poly_int64);
+ void emit_mem_move (sequence, const save_location &, poly_int64);
+
+ void emit_gpr_moves (sequence);
+ void emit_mem_128_moves (sequence);
+ void emit_sve_sp_adjust (sequence);
+ void emit_sve_pred_moves (sequence);
+ void emit_sve_data_moves (sequence);
+
+ /* All save locations, in no particular order. */
+ auto_vec<save_location, 12> m_save_locations;
+
+ /* The number of registers in each group. */
+ unsigned int m_group_count[NUM_GROUPS] = {};
+};
+
+/* Record that (reg:MODE REGNO) needs to be preserved around the mode
+ switch. */
+
+void
+aarch64_sme_mode_switch_regs::add_reg (machine_mode mode, unsigned int regno)
+{
+ if (!FP_REGNUM_P (regno) && !PR_REGNUM_P (regno))
+ return;
+
+ unsigned int end_regno = end_hard_regno (mode, regno);
+ unsigned int vec_flags = aarch64_classify_vector_mode (mode);
+ gcc_assert ((vec_flags & VEC_STRUCT) || end_regno == regno + 1);
+ for (; regno < end_regno; regno++)
+ {
+ machine_mode submode = mode;
+ if (vec_flags & VEC_STRUCT)
+ {
+ if (vec_flags & VEC_SVE_PRED)
+ submode = VNx16BImode;
+ else if (vec_flags & VEC_SVE_DATA)
+ submode = SVE_BYTE_MODE;
+ else if (vec_flags & VEC_PARTIAL)
+ submode = V8QImode;
+ else
+ submode = V16QImode;
+ }
+ save_location loc;
+ loc.reg = gen_rtx_REG (submode, regno);
+ if (vec_flags & VEC_SVE_PRED)
+ {
+ gcc_assert (PR_REGNUM_P (regno));
+ loc.group = MEM_SVE_PRED;
+ }
+ else
+ {
+ gcc_assert (FP_REGNUM_P (regno));
+ if (known_le (GET_MODE_SIZE (submode), 8))
+ loc.group = GPR;
+ else if (known_eq (GET_MODE_SIZE (submode), 16))
+ loc.group = MEM_128;
+ else
+ loc.group = MEM_SVE_DATA;
+ }
+ loc.index = m_group_count[loc.group]++;
+ m_save_locations.quick_push (loc);
+ }
+}
+
+/* Record that the arguments to CALL_INSN need to be preserved around
+ the mode switch. */
+
+void
+aarch64_sme_mode_switch_regs::add_call_args (rtx_call_insn *call_insn)
+{
+ for (rtx node = CALL_INSN_FUNCTION_USAGE (call_insn);
+ node; node = XEXP (node, 1))
+ {
+ rtx item = XEXP (node, 0);
+ if (GET_CODE (item) != USE)
+ continue;
+ item = XEXP (item, 0);
+ if (!REG_P (item))
+ continue;
+ add_reg (GET_MODE (item), REGNO (item));
+ }
+}
+
+/* Record that the return value from CALL_INSN (if any) needs to be
+ preserved around the mode switch. */
+
+void
+aarch64_sme_mode_switch_regs::add_call_result (rtx_call_insn *call_insn)
+{
+ rtx pat = PATTERN (call_insn);
+ gcc_assert (GET_CODE (pat) == PARALLEL);
+ pat = XVECEXP (pat, 0, 0);
+ if (GET_CODE (pat) == CALL)
+ return;
+ rtx dest = SET_DEST (pat);
+ if (GET_CODE (dest) == PARALLEL)
+ for (int i = 0; i < XVECLEN (dest, 0); ++i)
+ {
+ rtx x = XVECEXP (dest, 0, i);
+ gcc_assert (GET_CODE (x) == EXPR_LIST);
+ rtx reg = XEXP (x, 0);
+ add_reg (GET_MODE (reg), REGNO (reg));
+ }
+ else
+ add_reg (GET_MODE (dest), REGNO (dest));
+}
+
+/* REGNO is a register that is call-preserved under the current function's ABI.
+ Record that it must be preserved around the mode switch. */
+
+void
+aarch64_sme_mode_switch_regs::add_call_preserved_reg (unsigned int regno)
+{
+ if (FP_REGNUM_P (regno))
+ switch (crtl->abi->id ())
+ {
+ case ARM_PCS_SVE:
+ add_reg (VNx16QImode, regno);
+ break;
+ case ARM_PCS_SIMD:
+ add_reg (V16QImode, regno);
+ break;
+ case ARM_PCS_AAPCS64:
+ add_reg (DImode, regno);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ else if (PR_REGNUM_P (regno))
+ add_reg (VNx16BImode, regno);
+}
+
+/* The hard registers in REGS are call-preserved under the current function's
+ ABI. Record that they must be preserved around the mode switch. */
+
+void
+aarch64_sme_mode_switch_regs::add_call_preserved_regs (bitmap regs)
+{
+ bitmap_iterator bi;
+ unsigned int regno;
+ EXECUTE_IF_SET_IN_BITMAP (regs, 0, regno, bi)
+ if (HARD_REGISTER_NUM_P (regno))
+ add_call_preserved_reg (regno);
+ else
+ break;
+}
+
+/* Emit code to save registers before the mode switch. */
+
+void
+aarch64_sme_mode_switch_regs::emit_prologue ()
+{
+ emit_sve_sp_adjust (PROLOGUE);
+ emit_sve_pred_moves (PROLOGUE);
+ emit_sve_data_moves (PROLOGUE);
+ emit_mem_128_moves (PROLOGUE);
+ emit_gpr_moves (PROLOGUE);
+}
+
+/* Emit code to restore registers after the mode switch. */
+
+void
+aarch64_sme_mode_switch_regs::emit_epilogue ()
+{
+ emit_gpr_moves (EPILOGUE);
+ emit_mem_128_moves (EPILOGUE);
+ emit_sve_pred_moves (EPILOGUE);
+ emit_sve_data_moves (EPILOGUE);
+ emit_sve_sp_adjust (EPILOGUE);
+}
+
+/* The SVE predicate registers are stored below the SVE data registers,
+ with the predicate save area being padded to a data-register-sized
+ boundary. Return the size of this padded area as a whole number
+ of data register slots. */
+
+unsigned int
+aarch64_sme_mode_switch_regs::sve_data_headroom ()
+{
+ return CEIL (m_group_count[MEM_SVE_PRED], 8);
+}
+
+/* Return a memory reference of mode MODE to OFFSET bytes from the
+ stack pointer. */
+
+rtx
+aarch64_sme_mode_switch_regs::get_slot_mem (machine_mode mode,
+ poly_int64 offset)
+{
+ rtx addr = plus_constant (Pmode, stack_pointer_rtx, offset);
+ return gen_rtx_MEM (mode, addr);
+}
+
+/* Allocate or deallocate SIZE bytes of stack space: SEQ decides which. */
+
+void
+aarch64_sme_mode_switch_regs::emit_stack_adjust (sequence seq,
+ poly_int64 size)
+{
+ if (seq == PROLOGUE)
+ size = -size;
+ emit_insn (gen_rtx_SET (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx, size)));
+}
+
+/* Save or restore the register in LOC, whose slot is OFFSET bytes from
+ the stack pointer. SEQ chooses between saving and restoring. */
+
+void
+aarch64_sme_mode_switch_regs::emit_mem_move (sequence seq,
+ const save_location &loc,
+ poly_int64 offset)
+{
+ rtx mem = get_slot_mem (GET_MODE (loc.reg), offset);
+ if (seq == PROLOGUE)
+ emit_move_insn (mem, loc.reg);
+ else
+ emit_move_insn (loc.reg, mem);
+}
+
+/* Emit instructions to save or restore the GPR group. SEQ chooses between
+ saving and restoring. */
+
+void
+aarch64_sme_mode_switch_regs::emit_gpr_moves (sequence seq)
+{
+ for (auto &loc : m_save_locations)
+ if (loc.group == GPR)
+ {
+ gcc_assert (loc.index < 8);
+ rtx gpr = gen_rtx_REG (GET_MODE (loc.reg), FIRST_GPR + loc.index);
+ if (seq == PROLOGUE)
+ emit_move_insn (gpr, loc.reg);
+ else
+ emit_move_insn (loc.reg, gpr);
+ }
+}
+
+/* Emit instructions to save or restore the MEM_128 group. SEQ chooses
+ between saving and restoring. */
+
+void
+aarch64_sme_mode_switch_regs::emit_mem_128_moves (sequence seq)
+{
+ HOST_WIDE_INT count = m_group_count[MEM_128];
+ if (count == 0)
+ return;
+
+ auto sp = stack_pointer_rtx;
+ auto sp_adjust = (seq == PROLOGUE ? -count : count) * 16;
+
+ /* Pick a common mode that supports LDR & STR with pre/post-modification
+ and LDP & STP with pre/post-modification. */
+ auto mode = TFmode;
+
+ /* An instruction pattern that should be emitted at the end. */
+ rtx last_pat = NULL_RTX;
+
+ /* A previous MEM_128 location that hasn't been handled yet. */
+ save_location *prev_loc = nullptr;
+
+ /* Look for LDP/STPs and record any leftover LDR/STR in PREV_LOC. */
+ for (auto &loc : m_save_locations)
+ if (loc.group == MEM_128)
+ {
+ if (!prev_loc)
+ {
+ prev_loc = &loc;
+ continue;
+ }
+ gcc_assert (loc.index == prev_loc->index + 1);
+
+ /* The offset of the base of the save area from the current
+ stack pointer. */
+ HOST_WIDE_INT bias = 0;
+ if (prev_loc->index == 0 && seq == PROLOGUE)
+ bias = sp_adjust;
+
+ /* Get the two sets in the LDP/STP. */
+ rtx ops[] = {
+ gen_rtx_REG (mode, REGNO (prev_loc->reg)),
+ get_slot_mem (mode, prev_loc->index * 16 + bias),
+ gen_rtx_REG (mode, REGNO (loc.reg)),
+ get_slot_mem (mode, loc.index * 16 + bias)
+ };
+ unsigned int lhs = (seq == PROLOGUE);
+ rtx set1 = gen_rtx_SET (ops[lhs], ops[1 - lhs]);
+ rtx set2 = gen_rtx_SET (ops[lhs + 2], ops[3 - lhs]);
+
+ /* Combine the sets with any stack allocation/deallocation. */
+ rtvec vec;
+ if (prev_loc->index == 0)
+ {
+ rtx plus_sp = plus_constant (Pmode, sp, sp_adjust);
+ vec = gen_rtvec (3, gen_rtx_SET (sp, plus_sp), set1, set2);
+ }
+ else
+ vec = gen_rtvec (2, set1, set2);
+ rtx pat = gen_rtx_PARALLEL (VOIDmode, vec);
+
+ /* Queue a deallocation to the end, otherwise emit the
+ instruction now. */
+ if (seq == EPILOGUE && prev_loc->index == 0)
+ last_pat = pat;
+ else
+ emit_insn (pat);
+ prev_loc = nullptr;
+ }
+
+ /* Handle any leftover LDR/STR. */
+ if (prev_loc)
+ {
+ rtx reg = gen_rtx_REG (mode, REGNO (prev_loc->reg));
+ rtx addr;
+ if (prev_loc->index != 0)
+ addr = plus_constant (Pmode, sp, prev_loc->index * 16);
+ else if (seq == PROLOGUE)
+ {
+ rtx allocate = plus_constant (Pmode, sp, -count * 16);
+ addr = gen_rtx_PRE_MODIFY (Pmode, sp, allocate);
+ }
+ else
+ {
+ rtx deallocate = plus_constant (Pmode, sp, count * 16);
+ addr = gen_rtx_POST_MODIFY (Pmode, sp, deallocate);
+ }
+ rtx mem = gen_rtx_MEM (mode, addr);
+ if (seq == PROLOGUE)
+ emit_move_insn (mem, reg);
+ else
+ emit_move_insn (reg, mem);
+ }
+
+ if (last_pat)
+ emit_insn (last_pat);
+}
+
+/* Allocate or deallocate the stack space needed by the SVE groups.
+ SEQ chooses between allocating and deallocating. */
+
+void
+aarch64_sme_mode_switch_regs::emit_sve_sp_adjust (sequence seq)
+{
+ if (unsigned int count = m_group_count[MEM_SVE_DATA] + sve_data_headroom ())
+ emit_stack_adjust (seq, count * BYTES_PER_SVE_VECTOR);
+}
+
+/* Save or restore the MEM_SVE_DATA group. SEQ chooses between saving
+ and restoring. */
+
+void
+aarch64_sme_mode_switch_regs::emit_sve_data_moves (sequence seq)
+{
+ for (auto &loc : m_save_locations)
+ if (loc.group == MEM_SVE_DATA)
+ {
+ auto index = loc.index + sve_data_headroom ();
+ emit_mem_move (seq, loc, index * BYTES_PER_SVE_VECTOR);
+ }
+}
+
+/* Save or restore the MEM_SVE_PRED group. SEQ chooses between saving
+ and restoring. */
+
+void
+aarch64_sme_mode_switch_regs::emit_sve_pred_moves (sequence seq)
+{
+ for (auto &loc : m_save_locations)
+ if (loc.group == MEM_SVE_PRED)
+ emit_mem_move (seq, loc, loc.index * BYTES_PER_SVE_PRED);
}
/* Set DEST to (vec_series BASE STEP). */
@@ -4508,7 +5868,9 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
aarch64_report_sve_required ();
return;
}
- if (base == const0_rtx && aarch64_sve_cnt_immediate_p (offset))
+ if (base == const0_rtx
+ && (aarch64_sve_cnt_immediate_p (offset)
+ || aarch64_sve_rdvl_immediate_p (offset)))
emit_insn (gen_rtx_SET (dest, imm));
else
{
@@ -4526,15 +5888,24 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
{
base = aarch64_force_temporary (int_mode, dest, base);
aarch64_add_offset (int_mode, dest, base, offset,
- NULL_RTX, NULL_RTX, false);
+ NULL_RTX, NULL_RTX, 0, false);
}
else
aarch64_add_offset (int_mode, dest, base, offset,
- dest, NULL_RTX, false);
+ dest, NULL_RTX, 0, false);
}
return;
}
+ if (aarch64_rdsvl_immediate_p (base))
+ {
+ /* We could handle non-constant offsets if they are ever
+ generated. */
+ gcc_assert (const_offset == 0);
+ emit_insn (gen_rtx_SET (dest, imm));
+ return;
+ }
+
sty = aarch64_classify_symbol (base, const_offset);
switch (sty)
{
@@ -4548,7 +5919,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
gcc_assert (can_create_pseudo_p ());
base = aarch64_force_temporary (int_mode, dest, base);
aarch64_add_offset (int_mode, dest, base, const_offset,
- NULL_RTX, NULL_RTX, false);
+ NULL_RTX, NULL_RTX, 0, false);
return;
}
@@ -4588,7 +5959,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
gcc_assert(can_create_pseudo_p ());
base = aarch64_force_temporary (int_mode, dest, base);
aarch64_add_offset (int_mode, dest, base, const_offset,
- NULL_RTX, NULL_RTX, false);
+ NULL_RTX, NULL_RTX, 0, false);
return;
}
/* FALLTHRU */
@@ -4609,7 +5980,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
if (!CONST_INT_P (imm))
{
- if (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL)
+ if (aarch64_sve_pred_mode_p (mode))
{
/* Only the low bit of each .H, .S and .D element is defined,
so we can set the upper bits to whatever we like. If the
@@ -4888,6 +6259,11 @@ aarch64_function_ok_for_sibcall (tree, tree exp)
if (crtl->abi->id () != expr_callee_abi (exp).id ())
return false;
+ tree fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
+ if (aarch64_fntype_pstate_sm (fntype) & ~aarch64_cfun_incoming_pstate_sm ())
+ return false;
+ if (aarch64_fntype_pstate_za (fntype) != aarch64_cfun_incoming_pstate_za ())
+ return false;
return true;
}
@@ -5632,6 +7008,40 @@ on_stack:
return;
}
+/* Add the current argument register to the set of those that need
+ to be saved and restored around a change to PSTATE.SM. */
+
+static void
+aarch64_record_sme_mode_switch_args (CUMULATIVE_ARGS *pcum)
+{
+ subrtx_var_iterator::array_type array;
+ FOR_EACH_SUBRTX_VAR (iter, array, pcum->aapcs_reg, NONCONST)
+ {
+ rtx x = *iter;
+ if (REG_P (x) && (FP_REGNUM_P (REGNO (x)) || PR_REGNUM_P (REGNO (x))))
+ {
+ unsigned int i = pcum->num_sme_mode_switch_args++;
+ gcc_assert (i < ARRAY_SIZE (pcum->sme_mode_switch_args));
+ pcum->sme_mode_switch_args[i] = x;
+ }
+ }
+}
+
+/* Return a parallel that contains all the registers that need to be
+ saved around a change to PSTATE.SM. Return const0_rtx if there is
+ no such mode switch, or if no registers need to be saved. */
+
+static rtx
+aarch64_finish_sme_mode_switch_args (CUMULATIVE_ARGS *pcum)
+{
+ if (!pcum->num_sme_mode_switch_args)
+ return const0_rtx;
+
+ auto argvec = gen_rtvec_v (pcum->num_sme_mode_switch_args,
+ pcum->sme_mode_switch_args);
+ return gen_rtx_PARALLEL (VOIDmode, argvec);
+}
+
/* Implement TARGET_FUNCTION_ARG. */
static rtx
@@ -5643,7 +7053,17 @@ aarch64_function_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
|| pcum->pcs_variant == ARM_PCS_SVE);
if (arg.end_marker_p ())
- return gen_int_mode (pcum->pcs_variant, DImode);
+ {
+ rtx abi_cookie = aarch64_gen_callee_cookie (pcum->isa_mode,
+ pcum->pcs_variant);
+ rtx sme_mode_switch_args = aarch64_finish_sme_mode_switch_args (pcum);
+ rtx shared_za_flags = gen_int_mode (pcum->shared_za_flags, SImode);
+ rtx shared_zt0_flags = gen_int_mode (pcum->shared_zt0_flags, SImode);
+ return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, abi_cookie,
+ sme_mode_switch_args,
+ shared_za_flags,
+ shared_zt0_flags));
+ }
aarch64_layout_arg (pcum_v, arg);
return pcum->aapcs_reg;
@@ -5653,7 +7073,7 @@ void
aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
const_tree fntype,
rtx libname ATTRIBUTE_UNUSED,
- const_tree fndecl ATTRIBUTE_UNUSED,
+ const_tree fndecl,
unsigned n_named ATTRIBUTE_UNUSED,
bool silent_p)
{
@@ -5664,14 +7084,25 @@ aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
pcum->aapcs_nextnvrn = 0;
pcum->aapcs_nextnprn = 0;
if (fntype)
- pcum->pcs_variant = (arm_pcs) fntype_abi (fntype).id ();
+ {
+ pcum->pcs_variant = (arm_pcs) fntype_abi (fntype).id ();
+ pcum->isa_mode = aarch64_fntype_isa_mode (fntype);
+ }
else
- pcum->pcs_variant = ARM_PCS_AAPCS64;
+ {
+ pcum->pcs_variant = ARM_PCS_AAPCS64;
+ pcum->isa_mode = AARCH64_FL_DEFAULT_ISA_MODE;
+ }
pcum->aapcs_reg = NULL_RTX;
pcum->aapcs_arg_processed = false;
pcum->aapcs_stack_words = 0;
pcum->aapcs_stack_size = 0;
pcum->silent_p = silent_p;
+ pcum->shared_za_flags
+ = (fntype ? aarch64_fntype_shared_flags (fntype, "za") : 0U);
+ pcum->shared_zt0_flags
+ = (fntype ? aarch64_fntype_shared_flags (fntype, "zt0") : 0U);
+ pcum->num_sme_mode_switch_args = 0;
if (!silent_p
&& !TARGET_FLOAT
@@ -5712,6 +7143,10 @@ aarch64_function_arg_advance (cumulative_args_t pcum_v,
aarch64_layout_arg (pcum_v, arg);
gcc_assert ((pcum->aapcs_reg != NULL_RTX)
!= (pcum->aapcs_stack_words != 0));
+ if (pcum->aapcs_reg
+ && aarch64_call_switches_pstate_sm (pcum->isa_mode))
+ aarch64_record_sme_mode_switch_args (pcum);
+
pcum->aapcs_arg_processed = false;
pcum->aapcs_ncrn = pcum->aapcs_nextncrn;
pcum->aapcs_nvrn = pcum->aapcs_nextnvrn;
@@ -6137,8 +7572,7 @@ aarch64_output_probe_sve_stack_clash (rtx base, rtx adjustment,
static bool
aarch64_needs_frame_chain (void)
{
- /* Force a frame chain for EH returns so the return address is at FP+8. */
- if (frame_pointer_needed || crtl->calls_eh_return)
+ if (frame_pointer_needed)
return true;
/* A leaf function cannot have calls or write LR. */
@@ -6166,6 +7600,50 @@ aarch64_save_regs_above_locals_p ()
return crtl->stack_protect_guard;
}
+/* Return true if the current function needs to record the incoming
+ value of PSTATE.SM. */
+static bool
+aarch64_need_old_pstate_sm ()
+{
+ /* Exit early if the incoming value of PSTATE.SM is known at
+ compile time. */
+ if (aarch64_cfun_incoming_pstate_sm () != 0)
+ return false;
+
+ if (aarch64_cfun_enables_pstate_sm ())
+ return true;
+
+ /* Non-local goto receivers are entered with PSTATE.SM equal to 0,
+ but the function needs to return with PSTATE.SM unchanged. */
+ if (nonlocal_goto_handler_labels)
+ return true;
+
+ /* Likewise for exception handlers. */
+ eh_landing_pad lp;
+ for (unsigned int i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
+ if (lp && lp->post_landing_pad)
+ return true;
+
+ /* Non-local gotos need to set PSTATE.SM to zero. It's possible to call
+ streaming-compatible functions without SME being available, so PSTATE.SM
+ should only be changed if it is currently set to one. */
+ if (crtl->has_nonlocal_goto)
+ return true;
+
+ if (cfun->machine->call_switches_pstate_sm)
+ for (auto insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (auto *call = dyn_cast<rtx_call_insn *> (insn))
+ if (!SIBLING_CALL_P (call))
+ {
+ /* Return true if there is a call to a non-streaming-compatible
+ function. */
+ auto callee_isa_mode = aarch64_insn_callee_isa_mode (call);
+ if (aarch64_call_switches_pstate_sm (callee_isa_mode))
+ return true;
+ }
+ return false;
+}
+
/* Mark the registers that need to be saved by the callee and calculate
the size of the callee-saved registers area and frame record (both FP
and LR may be omitted). */
@@ -6178,6 +7656,7 @@ aarch64_layout_frame (void)
bool frame_related_fp_reg_p = false;
aarch64_frame &frame = cfun->machine->frame;
poly_int64 top_of_locals = -1;
+ bool enables_pstate_sm = aarch64_cfun_enables_pstate_sm ();
vec_safe_truncate (frame.saved_gprs, 0);
vec_safe_truncate (frame.saved_fprs, 0);
@@ -6199,6 +7678,7 @@ aarch64_layout_frame (void)
/* First mark all the registers that really need to be saved... */
for (regno = 0; regno <= LAST_SAVED_REGNUM; regno++)
frame.reg_offset[regno] = SLOT_NOT_REQUIRED;
+ frame.old_svcr_offset = SLOT_NOT_REQUIRED;
/* ... that includes the eh data registers (if needed)... */
if (crtl->calls_eh_return)
@@ -6214,7 +7694,7 @@ aarch64_layout_frame (void)
frame.reg_offset[regno] = SLOT_REQUIRED;
for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
- if (df_regs_ever_live_p (regno)
+ if ((enables_pstate_sm || df_regs_ever_live_p (regno))
&& !fixed_regs[regno]
&& !crtl->abi->clobbers_full_reg_p (regno))
{
@@ -6243,7 +7723,7 @@ aarch64_layout_frame (void)
}
for (regno = P0_REGNUM; regno <= P15_REGNUM; regno++)
- if (df_regs_ever_live_p (regno)
+ if ((enables_pstate_sm || df_regs_ever_live_p (regno))
&& !fixed_regs[regno]
&& !crtl->abi->clobbers_full_reg_p (regno))
frame.reg_offset[regno] = SLOT_REQUIRED;
@@ -6351,6 +7831,22 @@ aarch64_layout_frame (void)
if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
allocate_gpr_slot (regno);
+ if (aarch64_need_old_pstate_sm ())
+ {
+ frame.old_svcr_offset = offset;
+ offset += UNITS_PER_WORD;
+ }
+
+ /* If the current function changes the SVE vector length, ensure that the
+ old value of the DWARF VG register is saved and available in the CFI,
+ so that outer frames with VL-sized offsets can be processed correctly. */
+ if (cfun->machine->call_switches_pstate_sm
+ || aarch64_cfun_enables_pstate_sm ())
+ {
+ frame.reg_offset[VG_REGNUM] = offset;
+ offset += UNITS_PER_WORD;
+ }
+
poly_int64 max_int_offset = offset;
offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
bool has_align_gap = maybe_ne (offset, max_int_offset);
@@ -6388,8 +7884,6 @@ aarch64_layout_frame (void)
if (push_regs.size () > 1)
frame.wb_push_candidate2 = push_regs[1];
}
- else
- gcc_assert (known_eq (saved_regs_size, below_hard_fp_saved_regs_size));
/* With stack-clash, a register must be saved in non-leaf functions.
The saving of the bottommost register counts as an implicit probe,
@@ -6497,7 +7991,8 @@ aarch64_layout_frame (void)
frame.initial_adjust = frame.frame_size - frame.bytes_below_saved_regs;
frame.final_adjust = frame.bytes_below_saved_regs;
}
- else if (frame.bytes_above_hard_fp.is_constant (&const_above_fp)
+ else if (frame.wb_push_candidate1 != INVALID_REGNUM
+ && frame.bytes_above_hard_fp.is_constant (&const_above_fp)
&& const_above_fp < max_push_offset)
{
/* Frame with large area below the saved registers, or with SVE saves,
@@ -6758,17 +8253,6 @@ aarch64_return_address_signing_enabled (void)
/* This function should only be called after frame laid out. */
gcc_assert (cfun->machine->frame.laid_out);
- /* Turn return address signing off in any function that uses
- __builtin_eh_return. The address passed to __builtin_eh_return
- is not signed so either it has to be signed (with original sp)
- or the code path that uses it has to avoid authenticating it.
- Currently eh return introduces a return to anywhere gadget, no
- matter what we do here since it uses ret with user provided
- address. An ideal fix for that is to use indirect branch which
- can be protected with BTI j (to some extent). */
- if (crtl->calls_eh_return)
- return false;
-
/* If signing scope is AARCH_FUNCTION_NON_LEAF, we only sign a leaf function
if its LR is pushed onto stack. */
return (aarch_ra_sign_scope == AARCH_FUNCTION_ALL
@@ -6932,7 +8416,13 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
machine_mode mode = aarch64_reg_save_mode (regno);
rtx reg = gen_rtx_REG (mode, regno);
+ rtx move_src = reg;
offset = frame.reg_offset[regno] - bytes_below_sp;
+ if (regno == VG_REGNUM)
+ {
+ move_src = gen_rtx_REG (DImode, IP0_REGNUM);
+ emit_move_insn (move_src, gen_int_mode (aarch64_sve_vg, DImode));
+ }
rtx base_rtx = stack_pointer_rtx;
poly_int64 sp_offset = offset;
@@ -6940,7 +8430,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
aarch64_adjust_sve_callee_save_base (mode, base_rtx, anchor_reg,
offset, ptrue);
- else if (GP_REGNUM_P (regno)
+ else if (GP_REGNUM_P (REGNO (reg))
&& (!offset.is_constant (&const_offset) || const_offset >= 512))
{
poly_int64 fp_offset = frame.bytes_below_hard_fp - bytes_below_sp;
@@ -6963,6 +8453,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
unsigned int regno2;
if (!aarch64_sve_mode_p (mode)
+ && reg == move_src
&& i + 1 < regs.size ()
&& (regno2 = regs[i + 1], !skip_save_p (regno2))
&& known_eq (GET_MODE_SIZE (mode),
@@ -6994,17 +8485,24 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
}
else if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
{
- insn = emit_insn (gen_aarch64_pred_mov (mode, mem, ptrue, reg));
+ insn = emit_insn (gen_aarch64_pred_mov (mode, mem, ptrue, move_src));
need_cfa_note_p = true;
}
else if (aarch64_sve_mode_p (mode))
- insn = emit_insn (gen_rtx_SET (mem, reg));
+ insn = emit_insn (gen_rtx_SET (mem, move_src));
else
- insn = emit_move_insn (mem, reg);
+ insn = emit_move_insn (mem, move_src);
RTX_FRAME_RELATED_P (insn) = frame_related_p;
if (frame_related_p && need_cfa_note_p)
aarch64_add_cfa_expression (insn, reg, stack_pointer_rtx, sp_offset);
+ else if (frame_related_p && move_src != reg)
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_SET (mem, reg));
+
+ /* Emit a fake instruction to indicate that the VG save slot has
+ been initialized. */
+ if (regno == VG_REGNUM)
+ emit_insn (gen_aarch64_old_vg_saved (move_src, mem));
}
}
@@ -7171,9 +8669,16 @@ aarch64_get_separate_components (void)
bitmap_clear (components);
/* The registers we need saved to the frame. */
+ bool enables_pstate_sm = aarch64_cfun_enables_pstate_sm ();
for (unsigned regno = 0; regno <= LAST_SAVED_REGNUM; regno++)
if (aarch64_register_saved_on_entry (regno))
{
+ /* Disallow shrink wrapping for registers that will be clobbered
+ by an SMSTART SM in the prologue. */
+ if (enables_pstate_sm
+ && (FP_REGNUM_P (regno) || PR_REGNUM_P (regno)))
+ continue;
+
/* Punt on saves and restores that use ST1D and LD1D. We could
try to be smarter, but it would involve making sure that the
spare predicate register itself is safe to use at the save
@@ -7227,6 +8732,10 @@ aarch64_get_separate_components (void)
bitmap_clear_bit (components, frame.hard_fp_save_and_probe);
}
+ /* The VG save sequence needs a temporary GPR. Punt for now on trying
+ to find one. */
+ bitmap_clear_bit (components, VG_REGNUM);
+
return components;
}
@@ -7488,11 +8997,16 @@ aarch64_emit_stack_tie (rtx reg)
events, e.g. if we were to allow the stack to be dropped by more than a page
and then have multiple probes up and we take a signal somewhere in between
then the signal handler doesn't know the state of the stack and can make no
- assumptions about which pages have been probed. */
+ assumptions about which pages have been probed.
+
+ FORCE_ISA_MODE is AARCH64_FL_SM_ON if any variable component of POLY_SIZE
+ is measured relative to the SME vector length instead of the current
+ prevailing vector length. It is 0 otherwise. */
static void
aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
poly_int64 poly_size,
+ aarch64_feature_flags force_isa_mode,
bool frame_related_p,
bool final_adjustment_p)
{
@@ -7534,7 +9048,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
if (known_lt (poly_size, min_probe_threshold)
|| !flag_stack_clash_protection)
{
- aarch64_sub_sp (temp1, temp2, poly_size, frame_related_p);
+ aarch64_sub_sp (temp1, temp2, poly_size, force_isa_mode,
+ frame_related_p);
return;
}
@@ -7551,7 +9066,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
/* First calculate the amount of bytes we're actually spilling. */
aarch64_add_offset (Pmode, temp1, CONST0_RTX (Pmode),
- poly_size, temp1, temp2, false, true);
+ poly_size, temp1, temp2, force_isa_mode,
+ false, true);
rtx_insn *insn = get_last_insn ();
@@ -7609,7 +9125,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
{
for (HOST_WIDE_INT i = 0; i < rounded_size; i += guard_size)
{
- aarch64_sub_sp (NULL, temp2, guard_size, true);
+ aarch64_sub_sp (NULL, temp2, guard_size, force_isa_mode, true);
emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
guard_used_by_caller));
emit_insn (gen_blockage ());
@@ -7620,7 +9136,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
{
/* Compute the ending address. */
aarch64_add_offset (Pmode, temp1, stack_pointer_rtx, -rounded_size,
- temp1, NULL, false, true);
+ temp1, NULL, force_isa_mode, false, true);
rtx_insn *insn = get_last_insn ();
/* For the initial allocation, we don't have a frame pointer
@@ -7686,7 +9202,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
if (final_adjustment_p && rounded_size != 0)
min_probe_threshold = 0;
- aarch64_sub_sp (temp1, temp2, residual, frame_related_p);
+ aarch64_sub_sp (temp1, temp2, residual, force_isa_mode, frame_related_p);
if (residual >= min_probe_threshold)
{
if (dump_file)
@@ -7702,14 +9218,38 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
}
}
+/* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
+
+void
+aarch64_extra_live_on_entry (bitmap regs)
+{
+ if (TARGET_ZA)
+ {
+ bitmap_set_bit (regs, LOWERING_REGNUM);
+ bitmap_set_bit (regs, SME_STATE_REGNUM);
+ bitmap_set_bit (regs, TPIDR2_SETUP_REGNUM);
+ bitmap_set_bit (regs, ZA_FREE_REGNUM);
+ bitmap_set_bit (regs, ZA_SAVED_REGNUM);
+
+ /* The only time ZA can't have live contents on entry is when
+ the function explicitly treats it as a pure output. */
+ auto za_flags = aarch64_cfun_shared_flags ("za");
+ if (za_flags != (AARCH64_STATE_SHARED | AARCH64_STATE_OUT))
+ bitmap_set_bit (regs, ZA_REGNUM);
+
+ /* Since ZT0 is call-clobbered, it is only live on input if
+ it is explicitly shared, and is not a pure output. */
+ auto zt0_flags = aarch64_cfun_shared_flags ("zt0");
+ if (zt0_flags != 0
+ && zt0_flags != (AARCH64_STATE_SHARED | AARCH64_STATE_OUT))
+ bitmap_set_bit (regs, ZT0_REGNUM);
+ }
+}
+
/* Return 1 if the register is used by the epilogue. We need to say the
return register is used, but only after epilogue generation is complete.
Note that in the case of sibcalls, the values "used by the epilogue" are
- considered live at the start of the called function.
-
- For SIMD functions we need to return 1 for FP registers that are saved and
- restored by a function but are not zero in call_used_regs. If we do not do
- this optimizations may remove the restore of the register. */
+ considered live at the start of the called function. */
int
aarch64_epilogue_uses (int regno)
@@ -7719,9 +9259,72 @@ aarch64_epilogue_uses (int regno)
if (regno == LR_REGNUM)
return 1;
}
+ if (regno == LOWERING_REGNUM && TARGET_ZA)
+ return 1;
+ if (regno == SME_STATE_REGNUM && TARGET_ZA)
+ return 1;
+ if (regno == TPIDR2_SETUP_REGNUM && TARGET_ZA)
+ return 1;
+ /* If the function shares SME state with its caller, ensure that that
+ data is not in the lazy save buffer on exit. */
+ if (regno == ZA_SAVED_REGNUM && aarch64_cfun_incoming_pstate_za () != 0)
+ return 1;
+ if (regno == ZA_REGNUM && aarch64_cfun_shared_flags ("za") != 0)
+ return 1;
+ if (regno == ZT0_REGNUM && aarch64_cfun_shared_flags ("zt0") != 0)
+ return 1;
return 0;
}
+/* Implement TARGET_USE_LATE_PROLOGUE_EPILOGUE. */
+
+static bool
+aarch64_use_late_prologue_epilogue ()
+{
+ return aarch64_cfun_enables_pstate_sm ();
+}
+
+/* The current function's frame has a save slot for the incoming state
+ of SVCR. Return a legitimate memory for the slot, based on the hard
+ frame pointer. */
+
+static rtx
+aarch64_old_svcr_mem ()
+{
+ gcc_assert (frame_pointer_needed
+ && known_ge (cfun->machine->frame.old_svcr_offset, 0));
+ rtx base = hard_frame_pointer_rtx;
+ poly_int64 offset = (0
+ /* hard fp -> bottom of frame. */
+ - cfun->machine->frame.bytes_below_hard_fp
+ /* bottom of frame -> save slot. */
+ + cfun->machine->frame.old_svcr_offset);
+ return gen_frame_mem (DImode, plus_constant (Pmode, base, offset));
+}
+
+/* The current function's frame has a save slot for the incoming state
+ of SVCR. Load the slot into register REGNO and return the register. */
+
+static rtx
+aarch64_read_old_svcr (unsigned int regno)
+{
+ rtx svcr = gen_rtx_REG (DImode, regno);
+ emit_move_insn (svcr, aarch64_old_svcr_mem ());
+ return svcr;
+}
+
+/* Like the rtx version of aarch64_guard_switch_pstate_sm, but first
+ load the incoming value of SVCR from its save slot into temporary
+ register REGNO. */
+
+static rtx_insn *
+aarch64_guard_switch_pstate_sm (unsigned int regno,
+ aarch64_feature_flags local_mode)
+{
+ rtx old_svcr = aarch64_read_old_svcr (regno);
+ return aarch64_guard_switch_pstate_sm (old_svcr, local_mode);
+}
+
/* AArch64 stack frames generated by this compiler look like:
+-------------------------------+
@@ -7817,6 +9420,9 @@ aarch64_expand_prologue (void)
unsigned reg2 = frame.wb_push_candidate2;
bool emit_frame_chain = frame.emit_frame_chain;
rtx_insn *insn;
+ aarch64_feature_flags force_isa_mode = 0;
+ if (aarch64_cfun_enables_pstate_sm ())
+ force_isa_mode = AARCH64_FL_SM_ON;
if (flag_stack_clash_protection && known_eq (callee_adjust, 0))
{
@@ -7878,7 +9484,7 @@ aarch64_expand_prologue (void)
less the amount of the guard reserved for use by the caller's
outgoing args. */
aarch64_allocate_and_probe_stack_space (tmp0_rtx, tmp1_rtx, initial_adjust,
- true, false);
+ force_isa_mode, true, false);
if (callee_adjust != 0)
aarch64_push_regs (reg1, reg2, callee_adjust);
@@ -7901,7 +9507,8 @@ aarch64_expand_prologue (void)
gcc_assert (known_eq (chain_offset, 0));
aarch64_add_offset (Pmode, hard_frame_pointer_rtx,
stack_pointer_rtx, chain_offset,
- tmp1_rtx, tmp0_rtx, frame_pointer_needed);
+ tmp1_rtx, tmp0_rtx, force_isa_mode,
+ frame_pointer_needed);
if (frame_pointer_needed && !frame_size.is_constant ())
{
/* Variable-sized frames need to describe the save slot
@@ -7936,12 +9543,19 @@ aarch64_expand_prologue (void)
aarch64_save_callee_saves (bytes_below_sp, frame.saved_gprs, true,
emit_frame_chain);
+ if (maybe_ge (frame.reg_offset[VG_REGNUM], 0))
+ {
+ unsigned int saved_regs[] = { VG_REGNUM };
+ aarch64_save_callee_saves (bytes_below_sp, saved_regs, true,
+ emit_frame_chain);
+ }
if (maybe_ne (sve_callee_adjust, 0))
{
gcc_assert (!flag_stack_clash_protection
|| known_eq (initial_adjust, 0));
aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx,
sve_callee_adjust,
+ force_isa_mode,
!frame_pointer_needed, false);
bytes_below_sp -= sve_callee_adjust;
}
@@ -7954,9 +9568,74 @@ aarch64_expand_prologue (void)
that is assumed by the called. */
gcc_assert (known_eq (bytes_below_sp, final_adjust));
aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx, final_adjust,
+ force_isa_mode,
!frame_pointer_needed, true);
if (emit_frame_chain && maybe_ne (final_adjust, 0))
aarch64_emit_stack_tie (hard_frame_pointer_rtx);
+
+ /* Save the incoming value of PSTATE.SM, if required. Code further
+ down does this for locally-streaming functions. */
+ if (known_ge (frame.old_svcr_offset, 0)
+ && !aarch64_cfun_enables_pstate_sm ())
+ {
+ rtx mem = aarch64_old_svcr_mem ();
+ MEM_VOLATILE_P (mem) = 1;
+ if (TARGET_SME)
+ {
+ rtx reg = gen_rtx_REG (DImode, IP0_REGNUM);
+ emit_insn (gen_aarch64_read_svcr (reg));
+ emit_move_insn (mem, reg);
+ }
+ else
+ {
+ rtx old_r0 = NULL_RTX, old_r1 = NULL_RTX;
+ auto &args = crtl->args.info;
+ if (args.aapcs_ncrn > 0)
+ {
+ old_r0 = gen_rtx_REG (DImode, PROBE_STACK_FIRST_REGNUM);
+ emit_move_insn (old_r0, gen_rtx_REG (DImode, R0_REGNUM));
+ }
+ if (args.aapcs_ncrn > 1)
+ {
+ old_r1 = gen_rtx_REG (DImode, PROBE_STACK_SECOND_REGNUM);
+ emit_move_insn (old_r1, gen_rtx_REG (DImode, R1_REGNUM));
+ }
+ emit_insn (gen_aarch64_get_sme_state ());
+ emit_move_insn (mem, gen_rtx_REG (DImode, R0_REGNUM));
+ if (old_r0)
+ emit_move_insn (gen_rtx_REG (DImode, R0_REGNUM), old_r0);
+ if (old_r1)
+ emit_move_insn (gen_rtx_REG (DImode, R1_REGNUM), old_r1);
+ }
+ }
+
+ /* Enable PSTATE.SM, if required. */
+ if (aarch64_cfun_enables_pstate_sm ())
+ {
+ rtx_insn *guard_label = nullptr;
+ if (known_ge (cfun->machine->frame.old_svcr_offset, 0))
+ {
+ /* The current function is streaming-compatible. Save the
+ original state of PSTATE.SM. */
+ rtx svcr = gen_rtx_REG (DImode, IP0_REGNUM);
+ emit_insn (gen_aarch64_read_svcr (svcr));
+ emit_move_insn (aarch64_old_svcr_mem (), svcr);
+ guard_label = aarch64_guard_switch_pstate_sm (svcr,
+ aarch64_isa_flags);
+ }
+ aarch64_sme_mode_switch_regs args_switch;
+ auto &args = crtl->args.info;
+ for (unsigned int i = 0; i < args.num_sme_mode_switch_args; ++i)
+ {
+ rtx x = args.sme_mode_switch_args[i];
+ args_switch.add_reg (GET_MODE (x), REGNO (x));
+ }
+ args_switch.emit_prologue ();
+ emit_insn (gen_aarch64_smstart_sm ());
+ args_switch.emit_epilogue ();
+ if (guard_label)
+ emit_label (guard_label);
+ }
}
/* Return TRUE if we can use a simple_return insn.
@@ -7983,7 +9662,7 @@ aarch64_use_return_insn_p (void)
from a deallocated stack, and we optimize the unwind records by
emitting them all together if possible. */
void
-aarch64_expand_epilogue (bool for_sibcall)
+aarch64_expand_epilogue (rtx_call_insn *sibcall)
{
aarch64_frame &frame = cfun->machine->frame;
poly_int64 initial_adjust = frame.initial_adjust;
@@ -8003,6 +9682,9 @@ aarch64_expand_epilogue (bool for_sibcall)
HOST_WIDE_INT guard_size
= 1 << param_stack_clash_protection_guard_size;
HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
+ aarch64_feature_flags force_isa_mode = 0;
+ if (aarch64_cfun_enables_pstate_sm ())
+ force_isa_mode = AARCH64_FL_SM_ON;
/* We can re-use the registers when:
@@ -8027,6 +9709,26 @@ aarch64_expand_epilogue (bool for_sibcall)
= maybe_ne (get_frame_size ()
+ frame.saved_varargs_size, 0);
+ /* Reset PSTATE.SM, if required. */
+ if (aarch64_cfun_enables_pstate_sm ())
+ {
+ rtx_insn *guard_label = nullptr;
+ if (known_ge (cfun->machine->frame.old_svcr_offset, 0))
+ guard_label = aarch64_guard_switch_pstate_sm (IP0_REGNUM,
+ aarch64_isa_flags);
+ aarch64_sme_mode_switch_regs return_switch;
+ if (sibcall)
+ return_switch.add_call_args (sibcall);
+ else if (crtl->return_rtx && REG_P (crtl->return_rtx))
+ return_switch.add_reg (GET_MODE (crtl->return_rtx),
+ REGNO (crtl->return_rtx));
+ return_switch.emit_prologue ();
+ emit_insn (gen_aarch64_smstop_sm ());
+ return_switch.emit_epilogue ();
+ if (guard_label)
+ emit_label (guard_label);
+ }
+
/* Emit a barrier to prevent loads from a deallocated stack. */
if (maybe_gt (final_adjust, crtl->outgoing_args_size)
|| cfun->calls_alloca
@@ -8047,19 +9749,21 @@ aarch64_expand_epilogue (bool for_sibcall)
aarch64_add_offset (Pmode, stack_pointer_rtx,
hard_frame_pointer_rtx,
-bytes_below_hard_fp + final_adjust,
- tmp1_rtx, tmp0_rtx, callee_adjust == 0);
+ tmp1_rtx, tmp0_rtx, force_isa_mode,
+ callee_adjust == 0);
else
/* The case where we need to re-use the register here is very rare, so
avoid the complicated condition and just always emit a move if the
immediate doesn't fit. */
- aarch64_add_sp (tmp1_rtx, tmp0_rtx, final_adjust, true);
+ aarch64_add_sp (tmp1_rtx, tmp0_rtx, final_adjust, force_isa_mode, true);
/* Restore the vector registers before the predicate registers,
so that we can use P4 as a temporary for big-endian SVE frames. */
aarch64_restore_callee_saves (final_adjust, frame.saved_fprs, &cfi_ops);
aarch64_restore_callee_saves (final_adjust, frame.saved_prs, &cfi_ops);
if (maybe_ne (sve_callee_adjust, 0))
- aarch64_add_sp (NULL_RTX, NULL_RTX, sve_callee_adjust, true);
+ aarch64_add_sp (NULL_RTX, NULL_RTX, sve_callee_adjust,
+ force_isa_mode, true);
/* When shadow call stack is enabled, the scs_pop in the epilogue will
restore x30, we don't need to restore x30 again in the traditional
@@ -8089,7 +9793,7 @@ aarch64_expand_epilogue (bool for_sibcall)
/* Liveness of EP0_REGNUM can not be trusted across function calls either, so
add restriction on emit_move optimization to leaf functions. */
- aarch64_add_sp (tmp0_rtx, tmp1_rtx, initial_adjust,
+ aarch64_add_sp (tmp0_rtx, tmp1_rtx, initial_adjust, force_isa_mode,
(!can_inherit_p || !crtl->is_leaf
|| df_regs_ever_live_p (EP0_REGNUM)));
@@ -8113,6 +9817,30 @@ aarch64_expand_epilogue (bool for_sibcall)
RTX_FRAME_RELATED_P (insn) = 1;
}
+ /* Stack adjustment for exception handler. */
+ if (crtl->calls_eh_return && !sibcall)
+ {
+ /* If the EH_RETURN_TAKEN_RTX flag is set then we need
+ to unwind the stack and jump to the handler, otherwise
+ skip this eh_return logic and continue with normal
+ return after the label. We have already reset the CFA
+ to be SP; letting the CFA move during this adjustment
+ is just as correct as retaining the CFA from the body
+ of the function. Therefore, do nothing special. */
+ rtx label = gen_label_rtx ();
+ rtx x = gen_rtx_EQ (VOIDmode, EH_RETURN_TAKEN_RTX, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (Pmode, label), pc_rtx);
+ rtx jump = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
+ JUMP_LABEL (jump) = label;
+ LABEL_NUSES (label)++;
+ emit_insn (gen_add2_insn (stack_pointer_rtx,
+ EH_RETURN_STACKADJ_RTX));
+ emit_jump_insn (gen_indirect_jump (EH_RETURN_HANDLER_RTX));
+ emit_barrier ();
+ emit_label (label);
+ }
+
/* We prefer to emit the combined return/authenticate instruction RETAA,
however there are three cases in which we must instead emit an explicit
authentication instruction.
@@ -8125,7 +9853,7 @@ aarch64_expand_epilogue (bool for_sibcall)
explicitly authenticate.
*/
if (aarch64_return_address_signing_enabled ()
- && (for_sibcall || !TARGET_ARMV8_3))
+ && (sibcall || !TARGET_ARMV8_3))
{
switch (aarch_ra_sign_key)
{
@@ -8142,58 +9870,11 @@ aarch64_expand_epilogue (bool for_sibcall)
RTX_FRAME_RELATED_P (insn) = 1;
}
- /* Stack adjustment for exception handler. */
- if (crtl->calls_eh_return && !for_sibcall)
- {
- /* We need to unwind the stack by the offset computed by
- EH_RETURN_STACKADJ_RTX. We have already reset the CFA
- to be SP; letting the CFA move during this adjustment
- is just as correct as retaining the CFA from the body
- of the function. Therefore, do nothing special. */
- emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX));
- }
-
emit_use (gen_rtx_REG (DImode, LR_REGNUM));
- if (!for_sibcall)
+ if (!sibcall)
emit_jump_insn (ret_rtx);
}
-/* Implement EH_RETURN_HANDLER_RTX. EH returns need to either return
- normally or return to a previous frame after unwinding.
-
- An EH return uses a single shared return sequence. The epilogue is
- exactly like a normal epilogue except that it has an extra input
- register (EH_RETURN_STACKADJ_RTX) which contains the stack adjustment
- that must be applied after the frame has been destroyed. An extra label
- is inserted before the epilogue which initializes this register to zero,
- and this is the entry point for a normal return.
-
- An actual EH return updates the return address, initializes the stack
- adjustment and jumps directly into the epilogue (bypassing the zeroing
- of the adjustment). Since the return address is typically saved on the
- stack when a function makes a call, the saved LR must be updated outside
- the epilogue.
-
- This poses problems as the store is generated well before the epilogue,
- so the offset of LR is not known yet. Also optimizations will remove the
- store as it appears dead, even after the epilogue is generated (as the
- base or offset for loading LR is different in many cases).
-
- To avoid these problems this implementation forces the frame pointer
- in eh_return functions so that the location of LR is fixed and known early.
- It also marks the store volatile, so no optimization is permitted to
- remove the store. */
-rtx
-aarch64_eh_return_handler_rtx (void)
-{
- rtx tmp = gen_frame_mem (Pmode,
- plus_constant (Pmode, hard_frame_pointer_rtx, UNITS_PER_WORD));
-
- /* Mark the store volatile, so no optimization is permitted to remove it. */
- MEM_VOLATILE_P (tmp) = true;
- return tmp;
-}
-
/* Output code to add DELTA to the first argument, and then jump
to FUNCTION. Used for C++ multiple inheritance. */
static void
@@ -8222,7 +9903,8 @@ aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
temp1 = gen_rtx_REG (Pmode, EP1_REGNUM);
if (vcall_offset == 0)
- aarch64_add_offset (Pmode, this_rtx, this_rtx, delta, temp1, temp0, false);
+ aarch64_add_offset (Pmode, this_rtx, this_rtx, delta, temp1, temp0,
+ 0, false);
else
{
gcc_assert ((vcall_offset & (POINTER_BYTES - 1)) == 0);
@@ -8235,7 +9917,7 @@ aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
plus_constant (Pmode, this_rtx, delta));
else
aarch64_add_offset (Pmode, this_rtx, this_rtx, delta,
- temp1, temp0, false);
+ temp1, temp0, 0, false);
}
if (Pmode == ptr_mode)
@@ -8272,7 +9954,9 @@ aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
}
funexp = XEXP (DECL_RTL (function), 0);
funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
- rtx callee_abi = gen_int_mode (fndecl_abi (function).id (), DImode);
+ auto isa_mode = aarch64_fntype_isa_mode (TREE_TYPE (function));
+ auto pcs_variant = arm_pcs (fndecl_abi (function).id ());
+ rtx callee_abi = aarch64_gen_callee_cookie (isa_mode, pcs_variant);
insn = emit_call_insn (gen_sibcall (funexp, const0_rtx, callee_abi));
SIBLING_CALL_P (insn) = 1;
@@ -8317,8 +10001,10 @@ aarch64_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
/* There's no way to calculate VL-based values using relocations. */
subrtx_iterator::array_type array;
+ HOST_WIDE_INT factor;
FOR_EACH_SUBRTX (iter, array, x, ALL)
- if (GET_CODE (*iter) == CONST_POLY_INT)
+ if (GET_CODE (*iter) == CONST_POLY_INT
+ || aarch64_sme_vq_unspec_p (x, &factor))
return true;
poly_int64 offset;
@@ -8530,7 +10216,7 @@ aarch64_classify_index (struct aarch64_address_info *info, rtx x,
&& contains_reg_of_mode[GENERAL_REGS][GET_MODE (SUBREG_REG (index))])
index = SUBREG_REG (index);
- if (aarch64_sve_data_mode_p (mode))
+ if (aarch64_sve_data_mode_p (mode) || mode == VNx1TImode)
{
if (type != ADDRESS_REG_REG
|| (1 << shift) != GET_MODE_UNIT_SIZE (mode))
@@ -8633,7 +10319,8 @@ aarch64_classify_address (struct aarch64_address_info *info,
&& ((vec_flags == 0
&& known_lt (GET_MODE_SIZE (mode), 16))
|| vec_flags == VEC_ADVSIMD
- || vec_flags & VEC_SVE_DATA));
+ || vec_flags & VEC_SVE_DATA
+ || mode == VNx1TImode));
/* For SVE, only accept [Rn], [Rn, #offset, MUL VL] and [Rn, Rm, LSL #shift].
The latter is not valid for SVE predicates, and that's rejected through
@@ -8752,7 +10439,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
/* Make "m" use the LD1 offset range for SVE data modes, so
that pre-RTL optimizers like ivopts will work to that
instead of the wider LDR/STR range. */
- if (vec_flags == VEC_SVE_DATA)
+ if (vec_flags == VEC_SVE_DATA || mode == VNx1TImode)
return (type == ADDR_QUERY_M
? offset_4bit_signed_scaled_p (mode, offset)
: offset_9bit_signed_scaled_p (mode, offset));
@@ -8772,6 +10459,15 @@ aarch64_classify_address (struct aarch64_address_info *info,
if (vec_flags == VEC_SVE_PRED)
return offset_9bit_signed_scaled_p (mode, offset);
+ if (vec_flags == (VEC_SVE_PRED | VEC_STRUCT))
+ {
+ poly_int64 end_offset = (offset
+ + GET_MODE_SIZE (mode)
+ - BYTES_PER_SVE_PRED);
+ return (offset_9bit_signed_scaled_p (VNx16BImode, end_offset)
+ && offset_9bit_signed_scaled_p (VNx16BImode, offset));
+ }
+
if (load_store_pair_p)
return ((known_eq (GET_MODE_SIZE (mode), 4)
|| known_eq (GET_MODE_SIZE (mode), 8)
@@ -9181,21 +10877,195 @@ aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
return true;
}
+/* Return a fresh memory reference to the current function's TPIDR2 block,
+ creating a block if necessary. */
+
+static rtx
+aarch64_get_tpidr2_block ()
+{
+ if (!cfun->machine->tpidr2_block)
+ /* The TPIDR2 block is 16 bytes in size and must be aligned to a 128-bit
+ boundary. */
+ cfun->machine->tpidr2_block = assign_stack_local (V16QImode, 16, 128);
+ return copy_rtx (cfun->machine->tpidr2_block);
+}
+
+/* Return a fresh register that points to the current function's
+ TPIDR2 block, creating a block if necessary. */
+
+static rtx
+aarch64_get_tpidr2_ptr ()
+{
+ rtx block = aarch64_get_tpidr2_block ();
+ return force_reg (Pmode, XEXP (block, 0));
+}
+
+/* Emit instructions to allocate a ZA lazy save buffer and initialize the
+ current function's TPIDR2 block. */
+
+static void
+aarch64_init_tpidr2_block ()
+{
+ rtx block = aarch64_get_tpidr2_block ();
+
+ /* The ZA save buffer is SVL.B*SVL.B bytes in size. */
+ rtx svl_bytes = aarch64_sme_vq_immediate (Pmode, 16, AARCH64_ISA_MODE);
+ rtx svl_bytes_reg = force_reg (DImode, svl_bytes);
+ rtx za_size = expand_simple_binop (Pmode, MULT, svl_bytes_reg,
+ svl_bytes_reg, NULL, 0, OPTAB_LIB_WIDEN);
+ rtx za_save_buffer = allocate_dynamic_stack_space (za_size, 128,
+ BITS_PER_UNIT, -1, true);
+ za_save_buffer = force_reg (Pmode, za_save_buffer);
+ cfun->machine->za_save_buffer = za_save_buffer;
+
+ /* The first word of the block points to the save buffer and the second
+ word is the number of ZA slices to save. */
+ rtx block_0 = adjust_address (block, DImode, 0);
+ rtx block_8 = adjust_address (block, DImode, 8);
+ emit_insn (gen_store_pair_dw_didi (block_0, za_save_buffer,
+ block_8, svl_bytes_reg));
+
+ if (!memory_operand (block, V16QImode))
+ block = replace_equiv_address (block, force_reg (Pmode, XEXP (block, 0)));
+ emit_insn (gen_aarch64_setup_local_tpidr2 (block));
+}
+
+/* Restore the contents of ZA from the lazy save buffer, given that
+ register TPIDR2_BLOCK points to the current function's TPIDR2 block.
+ PSTATE.ZA is known to be 0 and TPIDR2_EL0 is known to be null. */
+
+void
+aarch64_restore_za (rtx tpidr2_block)
+{
+ emit_insn (gen_aarch64_smstart_za ());
+ if (REGNO (tpidr2_block) != R0_REGNUM)
+ emit_move_insn (gen_rtx_REG (Pmode, R0_REGNUM), tpidr2_block);
+ emit_insn (gen_aarch64_tpidr2_restore ());
+}
+
+/* Return the ZT0 save buffer, creating one if necessary. */
+
+static rtx
+aarch64_get_zt0_save_buffer ()
+{
+ if (!cfun->machine->zt0_save_buffer)
+ cfun->machine->zt0_save_buffer = assign_stack_local (V8DImode, 64, 128);
+ return cfun->machine->zt0_save_buffer;
+}
+
+/* Save ZT0 to the current function's save buffer. */
+
+static void
+aarch64_save_zt0 ()
+{
+ rtx mem = aarch64_get_zt0_save_buffer ();
+ mem = replace_equiv_address (mem, force_reg (Pmode, XEXP (mem, 0)));
+ emit_insn (gen_aarch64_sme_str_zt0 (mem));
+}
+
+/* Restore ZT0 from the current function's save buffer. FROM_LAZY_SAVE_P
+ is true if the load is happening after a call to a private-ZA function,
+ false if it can be treated as a normal load. */
+
+static void
+aarch64_restore_zt0 (bool from_lazy_save_p)
+{
+ rtx mem = aarch64_get_zt0_save_buffer ();
+ mem = replace_equiv_address (mem, force_reg (Pmode, XEXP (mem, 0)));
+ emit_insn (from_lazy_save_p
+ ? gen_aarch64_restore_zt0 (mem)
+ : gen_aarch64_sme_ldr_zt0 (mem));
+}
+
+/* Implement TARGET_START_CALL_ARGS. */
+
+static void
+aarch64_start_call_args (cumulative_args_t ca_v)
+{
+ CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
+
+ if (!TARGET_SME && (ca->isa_mode & AARCH64_FL_SM_ON))
+ {
+ error ("calling a streaming function requires the ISA extension %qs",
+ "sme");
+ inform (input_location, "you can enable %qs using the command-line"
+ " option %<-march%>, or by using the %<target%>"
+ " attribute or pragma", "sme");
+ }
+
+ if ((ca->shared_za_flags & (AARCH64_STATE_IN | AARCH64_STATE_OUT))
+ && !aarch64_cfun_has_state ("za"))
+ error ("call to a function that shares %qs state from a function"
+ " that has no %qs state", "za", "za");
+ else if ((ca->shared_zt0_flags & (AARCH64_STATE_IN | AARCH64_STATE_OUT))
+ && !aarch64_cfun_has_state ("zt0"))
+ error ("call to a function that shares %qs state from a function"
+ " that has no %qs state", "zt0", "zt0");
+ else if (!TARGET_ZA && (ca->isa_mode & AARCH64_FL_ZA_ON))
+ error ("call to a function that shares SME state from a function"
+ " that has no SME state");
+
+ /* If this is a call to a private ZA function, emit a marker to
+ indicate where any necessary set-up code could be inserted.
+ The code itself is inserted by the mode-switching pass. */
+ if (TARGET_ZA && !(ca->isa_mode & AARCH64_FL_ZA_ON))
+ emit_insn (gen_aarch64_start_private_za_call ());
+
+ /* If this is a call to a shared-ZA function that doesn't share ZT0,
+ save and restore ZT0 around the call. */
+ if (aarch64_cfun_has_state ("zt0")
+ && (ca->isa_mode & AARCH64_FL_ZA_ON)
+ && ca->shared_zt0_flags == 0)
+ aarch64_save_zt0 ();
+}
+
/* This function is used by the call expanders of the machine description.
RESULT is the register in which the result is returned. It's NULL for
"call" and "sibcall".
MEM is the location of the function call.
- CALLEE_ABI is a const_int that gives the arm_pcs of the callee.
+ COOKIE is either:
+ - a const_int that gives the argument to the call's UNSPEC_CALLEE_ABI.
+ - a PARALLEL that contains such a const_int as its first element.
+ The second element is a PARALLEL that lists all the argument
+ registers that need to be saved and restored around a change
+ in PSTATE.SM, or const0_rtx if no such switch is needed.
+ The third and fourth elements are const_ints that contain the
+ sharing flags for ZA and ZT0 respectively.
SIBCALL indicates whether this function call is normal call or sibling call.
It will generate different pattern accordingly. */
void
-aarch64_expand_call (rtx result, rtx mem, rtx callee_abi, bool sibcall)
+aarch64_expand_call (rtx result, rtx mem, rtx cookie, bool sibcall)
{
rtx call, callee, tmp;
rtvec vec;
machine_mode mode;
+ rtx callee_abi = cookie;
+ rtx sme_mode_switch_args = const0_rtx;
+ unsigned int shared_za_flags = 0;
+ unsigned int shared_zt0_flags = 0;
+ if (GET_CODE (cookie) == PARALLEL)
+ {
+ callee_abi = XVECEXP (cookie, 0, 0);
+ sme_mode_switch_args = XVECEXP (cookie, 0, 1);
+ shared_za_flags = INTVAL (XVECEXP (cookie, 0, 2));
+ shared_zt0_flags = INTVAL (XVECEXP (cookie, 0, 3));
+ }
+
+ gcc_assert (CONST_INT_P (callee_abi));
+ auto callee_isa_mode = aarch64_callee_isa_mode (callee_abi);
+
+ if (aarch64_cfun_has_state ("za")
+ && (callee_isa_mode & AARCH64_FL_ZA_ON)
+ && !shared_za_flags)
+ {
+ sorry ("call to a function that shares state other than %qs"
+ " from a function that has %qs state", "za", "za");
+ inform (input_location, "use %<__arm_preserves(\"za\")%> if the"
+ " callee preserves ZA");
+ }
+
gcc_assert (MEM_P (mem));
callee = XEXP (mem, 0);
mode = GET_MODE (callee);
@@ -9210,6 +11080,43 @@ aarch64_expand_call (rtx result, rtx mem, rtx callee_abi, bool sibcall)
: !REG_P (callee))
XEXP (mem, 0) = force_reg (mode, callee);
+ /* Accumulate the return values, including state that is shared via
+ attributes. */
+ auto_vec<rtx, 8> return_values;
+ if (result)
+ {
+ if (GET_CODE (result) == PARALLEL)
+ for (int i = 0; i < XVECLEN (result, 0); ++i)
+ return_values.safe_push (XVECEXP (result, 0, i));
+ else
+ return_values.safe_push (result);
+ }
+ unsigned int orig_num_return_values = return_values.length ();
+ if (shared_za_flags & AARCH64_STATE_OUT)
+ return_values.safe_push (gen_rtx_REG (VNx16BImode, ZA_REGNUM));
+ /* When calling private-ZA functions from functions with ZA state,
+ we want to know whether the call committed a lazy save. */
+ if (TARGET_ZA && !shared_za_flags)
+ return_values.safe_push (gen_rtx_REG (VNx16BImode, ZA_SAVED_REGNUM));
+ if (shared_zt0_flags & AARCH64_STATE_OUT)
+ return_values.safe_push (gen_rtx_REG (V8DImode, ZT0_REGNUM));
+
+ /* Create the new return value, if necessary. */
+ if (orig_num_return_values != return_values.length ())
+ {
+ if (return_values.length () == 1)
+ result = return_values[0];
+ else
+ {
+ for (rtx &x : return_values)
+ if (GET_CODE (x) != EXPR_LIST)
+ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
+ rtvec v = gen_rtvec_v (return_values.length (),
+ return_values.address ());
+ result = gen_rtx_PARALLEL (VOIDmode, v);
+ }
+ }
+
call = gen_rtx_CALL (VOIDmode, mem, const0_rtx);
if (result != NULL_RTX)
@@ -9220,26 +11127,132 @@ aarch64_expand_call (rtx result, rtx mem, rtx callee_abi, bool sibcall)
else
tmp = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNUM));
- gcc_assert (CONST_INT_P (callee_abi));
callee_abi = gen_rtx_UNSPEC (DImode, gen_rtvec (1, callee_abi),
UNSPEC_CALLEE_ABI);
vec = gen_rtvec (3, call, callee_abi, tmp);
call = gen_rtx_PARALLEL (VOIDmode, vec);
- aarch64_emit_call_insn (call);
+ auto call_insn = aarch64_emit_call_insn (call);
+
+ /* Check whether the call requires a change to PSTATE.SM. We can't
+ emit the instructions to change PSTATE.SM yet, since they involve
+ a change in vector length and a change in instruction set, which
+ cannot be represented in RTL.
+
+ For now, just record which registers will be clobbered and used
+ by the changes to PSTATE.SM. */
+ if (!sibcall && aarch64_call_switches_pstate_sm (callee_isa_mode))
+ {
+ aarch64_sme_mode_switch_regs args_switch;
+ if (sme_mode_switch_args != const0_rtx)
+ {
+ unsigned int num_args = XVECLEN (sme_mode_switch_args, 0);
+ for (unsigned int i = 0; i < num_args; ++i)
+ {
+ rtx x = XVECEXP (sme_mode_switch_args, 0, i);
+ args_switch.add_reg (GET_MODE (x), REGNO (x));
+ }
+ }
+
+ aarch64_sme_mode_switch_regs result_switch;
+ if (result)
+ result_switch.add_call_result (call_insn);
+
+ unsigned int num_gprs = MAX (args_switch.num_gprs (),
+ result_switch.num_gprs ());
+ for (unsigned int i = 0; i < num_gprs; ++i)
+ clobber_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (DImode, args_switch.FIRST_GPR + i));
+
+ for (int regno = V0_REGNUM; regno < V0_REGNUM + 32; regno += 4)
+ clobber_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (V4x16QImode, regno));
+
+ for (int regno = P0_REGNUM; regno < P0_REGNUM + 16; regno += 1)
+ clobber_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (VNx16BImode, regno));
+
+ /* Ensure that the VG save slot has been initialized. Also emit
+ an instruction to model the effect of the temporary clobber
+ of VG, so that the prologue/epilogue pass sees the need to
+ save the old value. */
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (DImode, VG_REGNUM));
+ emit_insn_before (gen_aarch64_update_vg (), call_insn);
+
+ cfun->machine->call_switches_pstate_sm = true;
+ }
+
+ /* Add any ZA-related information.
+
+ ZA_REGNUM represents the current function's ZA state, rather than
+ the contents of the ZA register itself. We ensure that the function's
+ ZA state is preserved by private-ZA call sequences, so the call itself
+ does not use or clobber ZA_REGNUM. The same thing applies to
+ ZT0_REGNUM. */
+ if (TARGET_ZA)
+ {
+ /* The callee requires ZA to be active if the callee is shared-ZA,
+ otherwise it requires ZA to be dormant or off. The state of ZA is
+ captured by a combination of SME_STATE_REGNUM, TPIDR2_SETUP_REGNUM,
+ and ZA_SAVED_REGNUM. */
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (DImode, SME_STATE_REGNUM));
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (DImode, TPIDR2_SETUP_REGNUM));
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (VNx16BImode, ZA_SAVED_REGNUM));
+
+ /* Keep the aarch64_start/end_private_za_call markers live. */
+ if (!(callee_isa_mode & AARCH64_FL_ZA_ON))
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (VNx16BImode, LOWERING_REGNUM));
+
+ /* If the callee is a shared-ZA function, record whether it uses the
+ current value of ZA and ZT0. */
+ if (shared_za_flags & AARCH64_STATE_IN)
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (VNx16BImode, ZA_REGNUM));
+
+ if (shared_zt0_flags & AARCH64_STATE_IN)
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (V8DImode, ZT0_REGNUM));
+ }
+}
+
+/* Implement TARGET_END_CALL_ARGS. */
+
+static void
+aarch64_end_call_args (cumulative_args_t ca_v)
+{
+ CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
+
+ /* If this is a call to a private ZA function, emit a marker to
+ indicate where any necessary restoration code could be inserted.
+ The code itself is inserted by the mode-switching pass. */
+ if (TARGET_ZA && !(ca->isa_mode & AARCH64_FL_ZA_ON))
+ emit_insn (gen_aarch64_end_private_za_call ());
+
+ /* If this is a call to a shared-ZA function that doesn't share ZT0,
+ save and restore ZT0 around the call. */
+ if (aarch64_cfun_has_state ("zt0")
+ && (ca->isa_mode & AARCH64_FL_ZA_ON)
+ && ca->shared_zt0_flags == 0)
+ aarch64_restore_zt0 (false);
}
/* Emit call insn with PAT and do aarch64-specific handling. */
-void
+rtx_call_insn *
aarch64_emit_call_insn (rtx pat)
{
- rtx insn = emit_call_insn (pat);
+ auto insn = emit_call_insn (pat);
rtx *fusage = &CALL_INSN_FUNCTION_USAGE (insn);
clobber_reg (fusage, gen_rtx_REG (word_mode, IP0_REGNUM));
clobber_reg (fusage, gen_rtx_REG (word_mode, IP1_REGNUM));
+ return as_a<rtx_call_insn *> (insn);
}
machine_mode
@@ -9775,6 +11788,7 @@ sizetochar (int size)
'0': Print a normal operand, if it's a general register,
then we assume DImode.
'k': Print NZCV for conditional compare instructions.
+ 'K': Print a predicate register as pn<N> rather than p<N>
'A': Output address constant representing the first
argument of X, specifying a relocation offset
if appropriate.
@@ -9951,14 +11965,17 @@ aarch64_print_operand (FILE *f, rtx x, int code)
case 'T':
case 'U':
case 'V':
- if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
+ if (!REG_P (x) || (!FP_REGNUM_P (REGNO (x)) && !PR_REGNUM_P (REGNO (x))))
{
- output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
+ output_operand_lossage ("incompatible operand for '%%%c'", code);
return;
}
- asm_fprintf (f, "%c%d",
- aarch64_sve_data_mode_p (GET_MODE (x)) ? 'z' : 'v',
- REGNO (x) - V0_REGNUM + (code - 'S'));
+ if (PR_REGNUM_P (REGNO (x)))
+ asm_fprintf (f, "p%d", REGNO (x) - P0_REGNUM + (code - 'S'));
+ else
+ asm_fprintf (f, "%c%d",
+ aarch64_sve_data_mode_p (GET_MODE (x)) ? 'z' : 'v',
+ REGNO (x) - V0_REGNUM + (code - 'S'));
break;
case 'R':
@@ -10239,6 +12256,15 @@ aarch64_print_operand (FILE *f, rtx x, int code)
}
break;
+ case 'K':
+ if (!REG_P (x) || !PR_REGNUM_P (REGNO (x)))
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+ asm_fprintf (f, "pn%d", REGNO (x) - P0_REGNUM);
+ break;
+
case 'y':
case 'z':
{
@@ -10441,6 +12467,12 @@ aarch64_label_mentioned_p (rtx x)
enum reg_class
aarch64_regno_regclass (unsigned regno)
{
+ if (W8_W11_REGNUM_P (regno))
+ return W8_W11_REGS;
+
+ if (W12_W15_REGNUM_P (regno))
+ return W12_W15_REGS;
+
if (STUB_REGNUM_P (regno))
return STUB_REGS;
@@ -10464,6 +12496,9 @@ aarch64_regno_regclass (unsigned regno)
if (regno == FFR_REGNUM || regno == FFRT_REGNUM)
return FFR_REGS;
+ if (FAKE_REGNUM_P (regno))
+ return FAKE_REGS;
+
return NO_REGS;
}
@@ -10598,8 +12633,8 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
return NO_REGS;
}
- /* Without the TARGET_SIMD instructions we cannot move a Q register
- to a Q register directly. We need a scratch. */
+ /* Without the TARGET_SIMD or TARGET_SVE instructions we cannot move a
+ Q register to a Q register directly. We need a scratch. */
if (REG_P (x)
&& (mode == TFmode
|| mode == TImode
@@ -10651,6 +12686,16 @@ aarch64_secondary_memory_needed (machine_mode mode, reg_class_t class1,
return false;
}
+/* Implement TARGET_FRAME_POINTER_REQUIRED. */
+
+static bool
+aarch64_frame_pointer_required ()
+{
+ /* If the function needs to record the incoming value of PSTATE.SM,
+ make sure that the slot is accessible from the frame pointer. */
+ return aarch64_need_old_pstate_sm ();
+}
+
static bool
aarch64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
{
@@ -10792,6 +12837,8 @@ aarch64_class_max_nregs (reg_class_t regclass, machine_mode mode)
unsigned int nregs, vec_flags;
switch (regclass)
{
+ case W8_W11_REGS:
+ case W12_W15_REGS:
case STUB_REGS:
case TAILCALL_ADDR_REGS:
case POINTER_REGS:
@@ -10809,12 +12856,16 @@ aarch64_class_max_nregs (reg_class_t regclass, machine_mode mode)
return (vec_flags & VEC_ADVSIMD
? CEIL (lowest_size, UNITS_PER_VREG)
: CEIL (lowest_size, UNITS_PER_WORD));
- case STACK_REG:
+
case PR_REGS:
case PR_LO_REGS:
case PR_HI_REGS:
+ return mode == VNx32BImode ? 2 : 1;
+
+ case STACK_REG:
case FFR_REGS:
case PR_AND_FFR_REGS:
+ case FAKE_REGS:
return 1;
case NO_REGS:
@@ -10959,6 +13010,51 @@ aarch64_output_casesi (rtx *operands)
return "";
}
+/* Return the asm string for an SME ZERO instruction whose 8-bit mask
+ operand is MASK. */
+const char *
+aarch64_output_sme_zero_za (rtx mask)
+{
+ auto mask_val = UINTVAL (mask);
+ if (mask_val == 0)
+ return "zero\t{}";
+
+ if (mask_val == 0xff)
+ return "zero\t{ za }";
+
+ static constexpr std::pair<unsigned int, char> tiles[] = {
+ { 0xff, 'b' },
+ { 0x55, 'h' },
+ { 0x11, 's' },
+ { 0x01, 'd' }
+ };
+ /* The last entry in the list has the form "za7.d }", but that's the
+ same length as "za7.d, ". */
+ static char buffer[sizeof("zero\t{ ") + sizeof ("za7.d, ") * 8 + 1];
+ unsigned int i = 0;
+ i += snprintf (buffer + i, sizeof (buffer) - i, "zero\t");
+ const char *prefix = "{ ";
+ for (auto &tile : tiles)
+ {
+ auto tile_mask = tile.first;
+ unsigned int tile_index = 0;
+ while (tile_mask < 0x100)
+ {
+ if ((mask_val & tile_mask) == tile_mask)
+ {
+ i += snprintf (buffer + i, sizeof (buffer) - i, "%sza%d.%c",
+ prefix, tile_index, tile.second);
+ prefix = ", ";
+ mask_val &= ~tile_mask;
+ }
+ tile_mask <<= 1;
+ tile_index += 1;
+ }
+ }
+ gcc_assert (mask_val == 0 && i + 3 <= sizeof (buffer));
+ snprintf (buffer + i, sizeof (buffer) - i, " }");
+ return buffer;
+}
/* Return size in bits of an arithmetic operand which is shifted/scaled and
masked such that it is suitable for a UXTB, UXTH, or UXTW extend
@@ -11642,6 +13738,18 @@ aarch64_if_then_else_costs (rtx op0, rtx op1, rtx op2, int *cost, bool speed)
/* CSINV/NEG with zero extend + const 0 (*csinv3_uxtw_insn3). */
op1 = XEXP (inner, 0);
}
+ else if (op1 == constm1_rtx || op1 == const1_rtx)
+ {
+ /* Use CSINV or CSINC. */
+ *cost += rtx_cost (op2, VOIDmode, IF_THEN_ELSE, 2, speed);
+ return true;
+ }
+ else if (op2 == constm1_rtx || op2 == const1_rtx)
+ {
+ /* Use CSINV or CSINC. */
+ *cost += rtx_cost (op1, VOIDmode, IF_THEN_ELSE, 1, speed);
+ return true;
+ }
*cost += rtx_cost (op1, VOIDmode, IF_THEN_ELSE, 1, speed);
*cost += rtx_cost (op2, VOIDmode, IF_THEN_ELSE, 2, speed);
@@ -13148,13 +15256,11 @@ aarch64_register_move_cost (machine_mode mode,
const struct cpu_regmove_cost *regmove_cost
= aarch64_tune_params.regmove_cost;
- /* Caller save and pointer regs are equivalent to GENERAL_REGS. */
- if (to == TAILCALL_ADDR_REGS || to == POINTER_REGS
- || to == STUB_REGS)
+ /* Trest any subset of POINTER_REGS as though it were GENERAL_REGS. */
+ if (reg_class_subset_p (to, POINTER_REGS))
to = GENERAL_REGS;
- if (from == TAILCALL_ADDR_REGS || from == POINTER_REGS
- || from == STUB_REGS)
+ if (reg_class_subset_p (from, POINTER_REGS))
from = GENERAL_REGS;
/* Make RDFFR very expensive. In particular, if we know that the FFR
@@ -13192,7 +15298,7 @@ aarch64_register_move_cost (machine_mode mode,
secondary reload. A general register is used as a scratch to move
the upper DI value and the lower DI value is moved directly,
hence the cost is the sum of three moves. */
- if (! TARGET_SIMD)
+ if (!TARGET_SIMD && !TARGET_SVE)
return regmove_cost->GP2FP + regmove_cost->FP2GP + regmove_cost->FP2FP;
return regmove_cost->FP2FP;
@@ -15988,6 +18094,23 @@ aarch64_override_options_internal (struct gcc_options *opts)
&& !fixed_regs[R18_REGNUM])
error ("%<-fsanitize=shadow-call-stack%> requires %<-ffixed-x18%>");
+ if ((opts->x_aarch64_isa_flags & (AARCH64_FL_SM_ON | AARCH64_FL_ZA_ON))
+ && !(opts->x_aarch64_isa_flags & AARCH64_FL_SME))
+ {
+ if (opts->x_aarch64_isa_flags & AARCH64_FL_SM_ON)
+ error ("streaming functions require the ISA extension %qs", "sme");
+ else
+ error ("functions with SME state require the ISA extension %qs",
+ "sme");
+ inform (input_location, "you can enable %qs using the command-line"
+ " option %<-march%>, or by using the %<target%>"
+ " attribute or pragma", "sme");
+ opts->x_target_flags &= ~MASK_GENERAL_REGS_ONLY;
+ auto new_flags = (opts->x_aarch64_asm_isa_flags
+ | feature_deps::SME ().enable);
+ aarch64_set_asm_isa_flags (opts, new_flags);
+ }
+
initialize_aarch64_code_model (opts);
initialize_aarch64_tls_size (opts);
aarch64_tpidr_register = opts->x_aarch64_tpidr_reg;
@@ -16105,6 +18228,12 @@ aarch64_override_options_internal (struct gcc_options *opts)
&& opts->x_optimize >= aarch64_tune_params.prefetch->default_opt_level)
opts->x_flag_prefetch_loop_arrays = 1;
+ /* Avoid loop-dependant FMA chains. */
+ if (aarch64_tune_params.extra_tuning_flags
+ & AARCH64_EXTRA_TUNE_AVOID_CROSS_LOOP_FMA)
+ SET_OPTION_IF_UNSET (opts, &global_options_set, param_avoid_fma_max_bits,
+ 512);
+
aarch64_override_options_after_change_1 (opts);
}
@@ -16433,6 +18562,7 @@ aarch64_override_options (void)
SUBTARGET_OVERRIDE_OPTIONS;
#endif
+ auto isa_mode = AARCH64_FL_DEFAULT_ISA_MODE;
if (cpu && arch)
{
/* If both -mcpu and -march are specified, warn if they are not
@@ -16455,25 +18585,25 @@ aarch64_override_options (void)
}
selected_arch = arch->arch;
- aarch64_set_asm_isa_flags (arch_isa);
+ aarch64_set_asm_isa_flags (arch_isa | isa_mode);
}
else if (cpu)
{
selected_arch = cpu->arch;
- aarch64_set_asm_isa_flags (cpu_isa);
+ aarch64_set_asm_isa_flags (cpu_isa | isa_mode);
}
else if (arch)
{
cpu = &all_cores[arch->ident];
selected_arch = arch->arch;
- aarch64_set_asm_isa_flags (arch_isa);
+ aarch64_set_asm_isa_flags (arch_isa | isa_mode);
}
else
{
/* No -mcpu or -march specified, so use the default CPU. */
cpu = &all_cores[TARGET_CPU_DEFAULT];
selected_arch = cpu->arch;
- aarch64_set_asm_isa_flags (cpu->flags);
+ aarch64_set_asm_isa_flags (cpu->flags | isa_mode);
}
selected_tune = tune ? tune->ident : cpu->ident;
@@ -16646,6 +18776,21 @@ aarch64_save_restore_target_globals (tree new_tree)
TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
}
+/* Return the target_option_node for FNDECL, or the current options
+ if FNDECL is null. */
+
+static tree
+aarch64_fndecl_options (tree fndecl)
+{
+ if (!fndecl)
+ return target_option_current_node;
+
+ if (tree options = DECL_FUNCTION_SPECIFIC_TARGET (fndecl))
+ return options;
+
+ return target_option_default_node;
+}
+
/* Implement TARGET_SET_CURRENT_FUNCTION. Unpack the codegen decisions
like tuning and ISA features from the DECL_FUNCTION_SPECIFIC_TARGET
of the function, if such exists. This function may be called multiple
@@ -16655,25 +18800,38 @@ aarch64_save_restore_target_globals (tree new_tree)
static void
aarch64_set_current_function (tree fndecl)
{
- if (!fndecl || fndecl == aarch64_previous_fndecl)
- return;
+ tree old_tree = aarch64_fndecl_options (aarch64_previous_fndecl);
+ tree new_tree = aarch64_fndecl_options (fndecl);
- tree old_tree = (aarch64_previous_fndecl
- ? DECL_FUNCTION_SPECIFIC_TARGET (aarch64_previous_fndecl)
- : NULL_TREE);
+ auto new_isa_mode = (fndecl
+ ? aarch64_fndecl_isa_mode (fndecl)
+ : AARCH64_FL_DEFAULT_ISA_MODE);
+ auto isa_flags = TREE_TARGET_OPTION (new_tree)->x_aarch64_isa_flags;
- tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
-
- /* If current function has no attributes but the previous one did,
- use the default node. */
- if (!new_tree && old_tree)
- new_tree = target_option_default_node;
+ static bool reported_zt0_p;
+ if (!reported_zt0_p
+ && !(isa_flags & AARCH64_FL_SME2)
+ && fndecl
+ && aarch64_fndecl_has_state (fndecl, "zt0"))
+ {
+ error ("functions with %qs state require the ISA extension %qs",
+ "zt0", "sme2");
+ inform (input_location, "you can enable %qs using the command-line"
+ " option %<-march%>, or by using the %<target%>"
+ " attribute or pragma", "sme2");
+ reported_zt0_p = true;
+ }
/* If nothing to do, return. #pragma GCC reset or #pragma GCC pop to
the default have been handled by aarch64_save_restore_target_globals from
aarch64_pragma_target_parse. */
- if (old_tree == new_tree)
- return;
+ if (old_tree == new_tree
+ && (!fndecl || aarch64_previous_fndecl)
+ && (isa_flags & AARCH64_FL_ISA_MODES) == new_isa_mode)
+ {
+ gcc_assert (AARCH64_ISA_MODE == new_isa_mode);
+ return;
+ }
aarch64_previous_fndecl = fndecl;
@@ -16681,7 +18839,28 @@ aarch64_set_current_function (tree fndecl)
cl_target_option_restore (&global_options, &global_options_set,
TREE_TARGET_OPTION (new_tree));
+ /* The ISA mode can vary based on function type attributes and
+ function declaration attributes. Make sure that the target
+ options correctly reflect these attributes. */
+ if ((isa_flags & AARCH64_FL_ISA_MODES) != new_isa_mode)
+ {
+ auto base_flags = (aarch64_asm_isa_flags & ~AARCH64_FL_ISA_MODES);
+ aarch64_set_asm_isa_flags (base_flags | new_isa_mode);
+
+ aarch64_override_options_internal (&global_options);
+ new_tree = build_target_option_node (&global_options,
+ &global_options_set);
+ DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_tree;
+
+ tree new_optimize = build_optimization_node (&global_options,
+ &global_options_set);
+ if (new_optimize != optimization_default_node)
+ DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
+ }
+
aarch64_save_restore_target_globals (new_tree);
+
+ gcc_assert (AARCH64_ISA_MODE == new_isa_mode);
}
/* Enum describing the various ways we can handle attributes.
@@ -16731,7 +18910,7 @@ aarch64_handle_attr_arch (const char *str)
{
gcc_assert (tmp_arch);
selected_arch = tmp_arch->arch;
- aarch64_set_asm_isa_flags (tmp_flags);
+ aarch64_set_asm_isa_flags (tmp_flags | AARCH64_ISA_MODE);
return true;
}
@@ -16772,7 +18951,7 @@ aarch64_handle_attr_cpu (const char *str)
gcc_assert (tmp_cpu);
selected_tune = tmp_cpu->ident;
selected_arch = tmp_cpu->arch;
- aarch64_set_asm_isa_flags (tmp_flags);
+ aarch64_set_asm_isa_flags (tmp_flags | AARCH64_ISA_MODE);
return true;
}
@@ -16872,7 +19051,7 @@ aarch64_handle_attr_isa_flags (char *str)
features if the user wants to handpick specific features. */
if (strncmp ("+nothing", str, 8) == 0)
{
- isa_flags = 0;
+ isa_flags = AARCH64_ISA_MODE;
str += 8;
}
@@ -17248,6 +19427,18 @@ aarch64_option_valid_attribute_p (tree fndecl, tree, tree args, int)
return ret;
}
+/* Implement TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P. Use an opt-out
+ rather than an opt-in list. */
+
+static bool
+aarch64_function_attribute_inlinable_p (const_tree fndecl)
+{
+ /* A function that has local SME state cannot be inlined into its caller,
+ since we only support managing PSTATE.ZA switches at function scope. */
+ return (!aarch64_fndecl_has_new_state (fndecl, "za")
+ && !aarch64_fndecl_has_new_state (fndecl, "zt0"));
+}
+
/* Helper for aarch64_can_inline_p. In the case where CALLER and CALLEE are
tri-bool options (yes, no, don't care) and the default value is
DEF, determine whether to reject inlining. */
@@ -17269,6 +19460,63 @@ aarch64_tribools_ok_for_inlining_p (int caller, int callee,
return (callee == caller || callee == def);
}
+/* Bit allocations for ipa_fn_summary::target_info. */
+
+/* Set if the function contains a stmt that relies on the function's
+ choice of PSTATE.SM setting (0 for non-streaming, 1 for streaming).
+ Not meaningful for streaming-compatible functions. */
+constexpr auto AARCH64_IPA_SM_FIXED = 1U << 0;
+
+/* Set if the function clobbers ZA and ZT0. Not meaningful for functions that
+ have ZA state. */
+constexpr auto AARCH64_IPA_CLOBBERS_ZA = 1U << 1;
+constexpr auto AARCH64_IPA_CLOBBERS_ZT0 = 1U << 2;
+
+/* Implement TARGET_NEED_IPA_FN_TARGET_INFO. */
+
+static bool
+aarch64_need_ipa_fn_target_info (const_tree, unsigned int &)
+{
+ /* We could in principle skip this for streaming-compatible functions
+ that have ZA state, but that's a rare combination. */
+ return true;
+}
+
+/* Implement TARGET_UPDATE_IPA_FN_TARGET_INFO. */
+
+static bool
+aarch64_update_ipa_fn_target_info (unsigned int &info, const gimple *stmt)
+{
+ if (auto *ga = dyn_cast<const gasm *> (stmt))
+ {
+ /* We don't know what the asm does, so conservatively assume that
+ it requires the function's current SM mode. */
+ info |= AARCH64_IPA_SM_FIXED;
+ for (unsigned int i = 0; i < gimple_asm_nclobbers (ga); ++i)
+ {
+ tree op = gimple_asm_clobber_op (ga, i);
+ const char *clobber = TREE_STRING_POINTER (TREE_VALUE (op));
+ if (strcmp (clobber, "za") == 0)
+ info |= AARCH64_IPA_CLOBBERS_ZA;
+ if (strcmp (clobber, "zt0") == 0)
+ info |= AARCH64_IPA_CLOBBERS_ZT0;
+ }
+ }
+ if (auto *call = dyn_cast<const gcall *> (stmt))
+ {
+ if (gimple_call_builtin_p (call, BUILT_IN_MD))
+ {
+ /* The attributes on AArch64 builtins are supposed to be accurate.
+ If the function isn't marked streaming-compatible then it
+ needs whichever SM mode it selects. */
+ tree decl = gimple_call_fndecl (call);
+ if (aarch64_fndecl_pstate_sm (decl) != 0)
+ info |= AARCH64_IPA_SM_FIXED;
+ }
+ }
+ return true;
+}
+
/* Implement TARGET_CAN_INLINE_P. Decide whether it is valid
to inline CALLEE into CALLER based on target-specific info.
Make sure that the caller and callee have compatible architectural
@@ -17291,12 +19539,60 @@ aarch64_can_inline_p (tree caller, tree callee)
: target_option_default_node);
/* Callee's ISA flags should be a subset of the caller's. */
- if ((caller_opts->x_aarch64_asm_isa_flags
- & callee_opts->x_aarch64_asm_isa_flags)
- != callee_opts->x_aarch64_asm_isa_flags)
+ auto caller_asm_isa = (caller_opts->x_aarch64_asm_isa_flags
+ & ~AARCH64_FL_ISA_MODES);
+ auto callee_asm_isa = (callee_opts->x_aarch64_asm_isa_flags
+ & ~AARCH64_FL_ISA_MODES);
+ if (callee_asm_isa & ~caller_asm_isa)
+ return false;
+
+ auto caller_isa = (caller_opts->x_aarch64_isa_flags
+ & ~AARCH64_FL_ISA_MODES);
+ auto callee_isa = (callee_opts->x_aarch64_isa_flags
+ & ~AARCH64_FL_ISA_MODES);
+ if (callee_isa & ~caller_isa)
+ return false;
+
+ /* Return true if the callee might have target_info property PROPERTY.
+ The answer must be true unless we have positive proof to the contrary. */
+ auto callee_has_property = [&](unsigned int property)
+ {
+ if (ipa_fn_summaries)
+ if (auto *summary = ipa_fn_summaries->get (cgraph_node::get (callee)))
+ if (!(summary->target_info & property))
+ return false;
+ return true;
+ };
+
+ /* Streaming-compatible code can be inlined into functions with any
+ PSTATE.SM mode. Otherwise the caller and callee must agree on
+ PSTATE.SM mode, unless we can prove that the callee is naturally
+ streaming-compatible. */
+ auto caller_sm = (caller_opts->x_aarch64_isa_flags & AARCH64_FL_SM_STATE);
+ auto callee_sm = (callee_opts->x_aarch64_isa_flags & AARCH64_FL_SM_STATE);
+ if (callee_sm
+ && caller_sm != callee_sm
+ && callee_has_property (AARCH64_IPA_SM_FIXED))
+ return false;
+
+ /* aarch64_function_attribute_inlinable_p prevents new-ZA and new-ZT0
+ functions from being inlined into others. We also need to prevent
+ inlining of shared-ZA functions into functions without ZA state,
+ since this is an error condition.
+
+ The only other problematic case for ZA is inlining a function that
+ directly clobbers ZA or ZT0 into a function that has ZA or ZT0 state. */
+ auto caller_za = (caller_opts->x_aarch64_isa_flags & AARCH64_FL_ZA_ON);
+ auto callee_za = (callee_opts->x_aarch64_isa_flags & AARCH64_FL_ZA_ON);
+ if (!caller_za && callee_za)
return false;
- if ((caller_opts->x_aarch64_isa_flags & callee_opts->x_aarch64_isa_flags)
- != callee_opts->x_aarch64_isa_flags)
+ if (!callee_za
+ && aarch64_fndecl_has_state (caller, "za")
+ && callee_has_property (AARCH64_IPA_CLOBBERS_ZA))
+ return false;
+ if (!callee_za
+ && aarch64_fndecl_has_state (caller, "zt0")
+ && callee_has_property (AARCH64_IPA_CLOBBERS_ZT0))
return false;
/* Allow non-strict aligned functions inlining into strict
@@ -17365,7 +19661,7 @@ aarch64_can_inline_p (tree caller, tree callee)
/* Return the ID of the TLDESC ABI, initializing the descriptor if hasn't
been already. */
-unsigned int
+arm_pcs
aarch64_tlsdesc_abi_id ()
{
predefined_function_abi &tlsdesc_abi = function_abis[ARM_PCS_TLSDESC];
@@ -17379,7 +19675,7 @@ aarch64_tlsdesc_abi_id ()
SET_HARD_REG_BIT (full_reg_clobbers, regno);
tlsdesc_abi.initialize (ARM_PCS_TLSDESC, full_reg_clobbers);
}
- return tlsdesc_abi.id ();
+ return ARM_PCS_TLSDESC;
}
/* Return true if SYMBOL_REF X binds locally. */
@@ -18196,9 +20492,12 @@ aarch64_conditional_register_usage (void)
call_used_regs[i] = 1;
}
- /* Only allow the FFR and FFRT to be accessed via special patterns. */
+ /* Only allow these registers to be accessed via special patterns. */
+ CLEAR_HARD_REG_BIT (operand_reg_set, VG_REGNUM);
CLEAR_HARD_REG_BIT (operand_reg_set, FFR_REGNUM);
CLEAR_HARD_REG_BIT (operand_reg_set, FFRT_REGNUM);
+ for (int i = FIRST_FAKE_REGNUM; i <= LAST_FAKE_REGNUM; ++i)
+ CLEAR_HARD_REG_BIT (operand_reg_set, i);
/* When tracking speculation, we need a couple of call-clobbered registers
to track the speculation state. It would be nice to just use
@@ -18223,11 +20522,11 @@ aarch64_member_type_forces_blk (const_tree field_or_array, machine_mode mode)
an ARRAY_TYPE. In both cases we're interested in the TREE_TYPE. */
const_tree type = TREE_TYPE (field_or_array);
- /* Assign BLKmode to anything that contains multiple SVE predicates.
+ /* Assign BLKmode to anything that contains more than 2 SVE predicates.
For structures, the "multiple" case is indicated by MODE being
VOIDmode. */
unsigned int num_zr, num_pr;
- if (aarch64_sve::builtin_type_p (type, &num_zr, &num_pr) && num_pr != 0)
+ if (aarch64_sve::builtin_type_p (type, &num_zr, &num_pr) && num_pr > 2)
{
if (TREE_CODE (field_or_array) == ARRAY_TYPE)
return !simple_cst_equal (TYPE_SIZE (field_or_array),
@@ -18765,7 +21064,7 @@ aarch64_simd_container_mode (scalar_mode mode, poly_int64 width)
return aarch64_full_sve_mode (mode).else_mode (word_mode);
gcc_assert (known_eq (width, 64) || known_eq (width, 128));
- if (TARGET_SIMD)
+ if (TARGET_BASE_SIMD)
{
if (known_eq (width, 128))
return aarch64_vq_mode (mode).else_mode (word_mode);
@@ -19467,6 +21766,9 @@ aarch64_simd_valid_immediate (rtx op, simd_immediate_info *info,
if ((vec_flags & VEC_ADVSIMD) && !TARGET_SIMD)
return false;
+ if (vec_flags == (VEC_SVE_PRED | VEC_STRUCT))
+ return op == CONST0_RTX (mode) || op == CONSTM1_RTX (mode);
+
if (vec_flags & VEC_SVE_PRED)
return aarch64_sve_pred_valid_immediate (op, info);
@@ -19640,7 +21942,8 @@ aarch64_mov_operand_p (rtx x, machine_mode mode)
force everything to have a canonical form. */
if (!lra_in_progress
&& !reload_completed
- && GET_MODE_CLASS (GET_MODE (x)) == MODE_VECTOR_BOOL
+ && aarch64_sve_pred_mode_p (GET_MODE (x))
+ && known_eq (GET_MODE_SIZE (GET_MODE (x)), BYTES_PER_SVE_PRED)
&& GET_MODE (x) != VNx16BImode)
return false;
@@ -19658,7 +21961,12 @@ aarch64_mov_operand_p (rtx x, machine_mode mode)
if (SYMBOL_REF_P (x) && mode == DImode && CONSTANT_ADDRESS_P (x))
return true;
- if (TARGET_SVE && aarch64_sve_cnt_immediate_p (x))
+ if (TARGET_SVE
+ && (aarch64_sve_cnt_immediate_p (x)
+ || aarch64_sve_rdvl_immediate_p (x)))
+ return true;
+
+ if (aarch64_rdsvl_immediate_p (x))
return true;
return aarch64_classify_symbolic_expression (x)
@@ -19972,6 +22280,31 @@ aarch64_sve_struct_memory_operand_p (rtx op)
&& offset_4bit_signed_scaled_p (SVE_BYTE_MODE, last));
}
+/* Return true if OFFSET is a constant integer and if VNUM is
+ OFFSET * the number of bytes in an SVE vector. This is the requirement
+ that exists in SME LDR and STR instructions, where the VL offset must
+ equal the ZA slice offset. */
+bool
+aarch64_sme_ldr_vnum_offset_p (rtx offset, rtx vnum)
+{
+ if (!CONST_INT_P (offset) || !IN_RANGE (INTVAL (offset), 0, 15))
+ return false;
+
+ if (TARGET_STREAMING)
+ {
+ poly_int64 const_vnum;
+ return (poly_int_rtx_p (vnum, &const_vnum)
+ && known_eq (const_vnum,
+ INTVAL (offset) * BYTES_PER_SVE_VECTOR));
+ }
+ else
+ {
+ HOST_WIDE_INT factor;
+ return (aarch64_sme_vq_unspec_p (vnum, &factor)
+ && factor == INTVAL (offset) * 16);
+ }
+}
+
/* Emit a register copy from operand to operand, taking care not to
early-clobber source registers in the process.
@@ -21294,11 +23627,11 @@ aarch64_split_compare_and_swap (rtx operands[])
mem = operands[1];
oldval = operands[2];
newval = operands[3];
- is_weak = (operands[4] != const0_rtx);
model_rtx = operands[5];
scratch = operands[7];
mode = GET_MODE (mem);
model = memmodel_from_int (INTVAL (model_rtx));
+ is_weak = operands[4] != const0_rtx && mode != TImode;
/* When OLDVAL is zero and we want the strong version we can emit a tighter
loop:
@@ -21359,6 +23692,33 @@ aarch64_split_compare_and_swap (rtx operands[])
else
aarch64_gen_compare_reg (NE, scratch, const0_rtx);
+ /* 128-bit LDAXP is not atomic unless STLXP succeeds. So for a mismatch,
+ store the returned value and loop if the STLXP fails. */
+ if (mode == TImode)
+ {
+ rtx_code_label *label3 = gen_label_rtx ();
+ emit_jump_insn (gen_rtx_SET (pc_rtx, gen_rtx_LABEL_REF (Pmode, label3)));
+ emit_barrier ();
+
+ emit_label (label2);
+ aarch64_emit_store_exclusive (mode, scratch, mem, rval, model_rtx);
+
+ if (aarch64_track_speculation)
+ {
+ /* Emit an explicit compare instruction, so that we can correctly
+ track the condition codes. */
+ rtx cc_reg = aarch64_gen_compare_reg (NE, scratch, const0_rtx);
+ x = gen_rtx_NE (GET_MODE (cc_reg), cc_reg, const0_rtx);
+ }
+ else
+ x = gen_rtx_NE (VOIDmode, scratch, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (Pmode, label1), pc_rtx);
+ aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
+
+ label2 = label3;
+ }
+
emit_label (label2);
/* If we used a CBNZ in the exchange loop emit an explicit compare with RVAL
@@ -22186,7 +24546,7 @@ aarch64_evpc_ext (struct expand_vec_perm_d *d)
/* The first element always refers to the first vector.
Check if the extracted indices are increasing by one. */
- if (d->vec_flags == VEC_SVE_PRED
+ if ((d->vec_flags & VEC_SVE_PRED)
|| !d->perm[0].is_constant (&location)
|| !d->perm.series_p (0, 1, location, 1))
return false;
@@ -22230,7 +24590,7 @@ aarch64_evpc_rev_local (struct expand_vec_perm_d *d)
unsigned int i, size, unspec;
machine_mode pred_mode;
- if (d->vec_flags == VEC_SVE_PRED
+ if ((d->vec_flags & VEC_SVE_PRED)
|| !d->one_vector_p
|| !d->perm[0].is_constant (&diff)
|| !diff)
@@ -22311,7 +24671,7 @@ aarch64_evpc_dup (struct expand_vec_perm_d *d)
machine_mode vmode = d->vmode;
rtx lane;
- if (d->vec_flags == VEC_SVE_PRED
+ if ((d->vec_flags & VEC_SVE_PRED)
|| d->perm.encoding ().encoded_nelts () != 1
|| !d->perm[0].is_constant (&elt))
return false;
@@ -23127,33 +25487,33 @@ aarch64_expand_cpymem (rtx *operands)
int mode_bits;
rtx dst = operands[0];
rtx src = operands[1];
+ unsigned align = UINTVAL (operands[3]);
rtx base;
machine_mode cur_mode = BLKmode;
+ bool size_p = optimize_function_for_size_p (cfun);
- /* Variable-sized memcpy can go through the MOPS expansion if available. */
- if (!CONST_INT_P (operands[2]))
+ /* Variable-sized or strict-align copies may use the MOPS expansion. */
+ if (!CONST_INT_P (operands[2]) || (STRICT_ALIGNMENT && align < 16))
return aarch64_expand_cpymem_mops (operands);
- unsigned HOST_WIDE_INT size = INTVAL (operands[2]);
+ unsigned HOST_WIDE_INT size = UINTVAL (operands[2]);
- /* Try to inline up to 256 bytes or use the MOPS threshold if available. */
- unsigned HOST_WIDE_INT max_copy_size
- = TARGET_MOPS ? aarch64_mops_memcpy_size_threshold : 256;
+ /* Try to inline up to 256 bytes. */
+ unsigned max_copy_size = 256;
+ unsigned mops_threshold = aarch64_mops_memcpy_size_threshold;
- bool size_p = optimize_function_for_size_p (cfun);
-
- /* Large constant-sized cpymem should go through MOPS when possible.
- It should be a win even for size optimization in the general case.
- For speed optimization the choice between MOPS and the SIMD sequence
- depends on the size of the copy, rather than number of instructions,
- alignment etc. */
- if (size > max_copy_size)
+ /* Large copies use MOPS when available or a library call. */
+ if (size > max_copy_size || (TARGET_MOPS && size > mops_threshold))
return aarch64_expand_cpymem_mops (operands);
int copy_bits = 256;
/* Default to 256-bit LDP/STP on large copies, however small copies, no SIMD
- support or slow 256-bit LDP/STP fall back to 128-bit chunks. */
+ support or slow 256-bit LDP/STP fall back to 128-bit chunks.
+
+ ??? Although it would be possible to use LDP/STP Qn in streaming mode
+ (so using TARGET_BASE_SIMD instead of TARGET_SIMD), it isn't clear
+ whether that would improve performance. */
if (size <= 24
|| !TARGET_SIMD
|| (aarch64_tune_params.extra_tuning_flags
@@ -23311,12 +25671,13 @@ aarch64_expand_setmem (rtx *operands)
unsigned HOST_WIDE_INT len;
rtx dst = operands[0];
rtx val = operands[2], src;
+ unsigned align = UINTVAL (operands[3]);
rtx base;
machine_mode cur_mode = BLKmode, next_mode;
- /* If we don't have SIMD registers or the size is variable use the MOPS
- inlined sequence if possible. */
- if (!CONST_INT_P (operands[1]) || !TARGET_SIMD)
+ /* Variable-sized or strict-align memset may use the MOPS expansion. */
+ if (!CONST_INT_P (operands[1]) || !TARGET_SIMD
+ || (STRICT_ALIGNMENT && align < 16))
return aarch64_expand_setmem_mops (operands);
bool size_p = optimize_function_for_size_p (cfun);
@@ -23324,10 +25685,13 @@ aarch64_expand_setmem (rtx *operands)
/* Default the maximum to 256-bytes when considering only libcall vs
SIMD broadcast sequence. */
unsigned max_set_size = 256;
+ unsigned mops_threshold = aarch64_mops_memset_size_threshold;
- len = INTVAL (operands[1]);
- if (len > max_set_size && !TARGET_MOPS)
- return false;
+ len = UINTVAL (operands[1]);
+
+ /* Large memset uses MOPS when available or a library call. */
+ if (len > max_set_size || (TARGET_MOPS && len > mops_threshold))
+ return aarch64_expand_setmem_mops (operands);
int cst_val = !!(CONST_INT_P (val) && (INTVAL (val) != 0));
/* The MOPS sequence takes:
@@ -23340,12 +25704,6 @@ aarch64_expand_setmem (rtx *operands)
the arguments + 1 for the call. */
unsigned libcall_cost = 4;
- /* Upper bound check. For large constant-sized setmem use the MOPS sequence
- when available. */
- if (TARGET_MOPS
- && len >= (unsigned HOST_WIDE_INT) aarch64_mops_memset_size_threshold)
- return aarch64_expand_setmem_mops (operands);
-
/* Attempt a sequence with a vector broadcast followed by stores.
Count the number of operations involved to see if it's worth it
against the alternatives. A simple counter simd_ops on the
@@ -23387,10 +25745,8 @@ aarch64_expand_setmem (rtx *operands)
simd_ops++;
n -= mode_bits;
- /* Do certain trailing copies as overlapping if it's going to be
- cheaper. i.e. less instructions to do so. For instance doing a 15
- byte copy it's more efficient to do two overlapping 8 byte copies than
- 8 + 4 + 2 + 1. Only do this when -mstrict-align is not supplied. */
+ /* Emit trailing writes using overlapping unaligned accesses
+ (when !STRICT_ALIGNMENT) - this is smaller and faster. */
if (n > 0 && n < copy_limit / 2 && !STRICT_ALIGNMENT)
{
next_mode = smallest_mode_for_size (n, MODE_INT);
@@ -25313,26 +27669,69 @@ aarch64_simd_clone_usable (struct cgraph_node *node)
static int
aarch64_comp_type_attributes (const_tree type1, const_tree type2)
{
- auto check_attr = [&](const char *name) {
- tree attr1 = lookup_attribute (name, TYPE_ATTRIBUTES (type1));
- tree attr2 = lookup_attribute (name, TYPE_ATTRIBUTES (type2));
+ auto check_attr = [&](const char *ns, const char *name) {
+ tree attr1 = lookup_attribute (ns, name, TYPE_ATTRIBUTES (type1));
+ tree attr2 = lookup_attribute (ns, name, TYPE_ATTRIBUTES (type2));
if (!attr1 && !attr2)
return true;
return attr1 && attr2 && attribute_value_equal (attr1, attr2);
};
- if (!check_attr ("aarch64_vector_pcs"))
+ if (!check_attr ("gnu", "aarch64_vector_pcs"))
+ return 0;
+ if (!check_attr ("gnu", "Advanced SIMD type"))
+ return 0;
+ if (!check_attr ("gnu", "SVE type"))
return 0;
- if (!check_attr ("Advanced SIMD type"))
+ if (!check_attr ("gnu", "SVE sizeless type"))
return 0;
- if (!check_attr ("SVE type"))
+ if (!check_attr ("arm", "streaming"))
return 0;
- if (!check_attr ("SVE sizeless type"))
+ if (!check_attr ("arm", "streaming_compatible"))
+ return 0;
+ if (aarch64_lookup_shared_state_flags (TYPE_ATTRIBUTES (type1), "za")
+ != aarch64_lookup_shared_state_flags (TYPE_ATTRIBUTES (type2), "za"))
+ return 0;
+ if (aarch64_lookup_shared_state_flags (TYPE_ATTRIBUTES (type1), "zt0")
+ != aarch64_lookup_shared_state_flags (TYPE_ATTRIBUTES (type2), "zt0"))
return 0;
return 1;
}
+/* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
+
+static tree
+aarch64_merge_decl_attributes (tree olddecl, tree newdecl)
+{
+ tree old_attrs = DECL_ATTRIBUTES (olddecl);
+ tree old_new = lookup_attribute ("arm", "new", old_attrs);
+
+ tree new_attrs = DECL_ATTRIBUTES (newdecl);
+ tree new_new = lookup_attribute ("arm", "new", new_attrs);
+
+ if (DECL_INITIAL (olddecl) && new_new)
+ {
+ error ("cannot apply attribute %qs to %q+D after the function"
+ " has been defined", "new", newdecl);
+ inform (DECL_SOURCE_LOCATION (olddecl), "%q+D defined here",
+ newdecl);
+ }
+ else
+ {
+ if (old_new && new_new)
+ {
+ old_attrs = remove_attribute ("arm", "new", old_attrs);
+ TREE_VALUE (new_new) = chainon (TREE_VALUE (new_new),
+ TREE_VALUE (old_new));
+ }
+ if (new_new)
+ aarch64_check_arm_new_against_type (TREE_VALUE (new_new), newdecl);
+ }
+
+ return merge_attributes (old_attrs, new_attrs);
+}
+
/* Implement TARGET_GET_MULTILIB_ABI_NAME */
static const char *
@@ -25757,6 +28156,953 @@ aarch64_pars_overlap_p (rtx par1, rtx par2)
return false;
}
+/* Implement OPTIMIZE_MODE_SWITCHING. */
+
+bool
+aarch64_optimize_mode_switching (aarch64_mode_entity entity)
+{
+ bool have_sme_state = (aarch64_cfun_incoming_pstate_za () != 0
+ || (aarch64_cfun_has_new_state ("za")
+ && df_regs_ever_live_p (ZA_REGNUM))
+ || (aarch64_cfun_has_new_state ("zt0")
+ && df_regs_ever_live_p (ZT0_REGNUM)));
+
+ if (have_sme_state && nonlocal_goto_handler_labels)
+ {
+ static bool reported;
+ if (!reported)
+ {
+ sorry ("non-local gotos in functions with SME state");
+ reported = true;
+ }
+ }
+
+ switch (entity)
+ {
+ case aarch64_mode_entity::HAVE_ZA_SAVE_BUFFER:
+ case aarch64_mode_entity::LOCAL_SME_STATE:
+ return have_sme_state && !nonlocal_goto_handler_labels;
+ }
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_MODE_EMIT for ZA_SAVE_BUFFER. */
+
+static void
+aarch64_mode_emit_za_save_buffer (aarch64_tristate_mode mode,
+ aarch64_tristate_mode prev_mode)
+{
+ if (mode == aarch64_tristate_mode::YES)
+ {
+ gcc_assert (prev_mode == aarch64_tristate_mode::NO);
+ aarch64_init_tpidr2_block ();
+ }
+ else
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_MODE_EMIT for LOCAL_SME_STATE. */
+
+static void
+aarch64_mode_emit_local_sme_state (aarch64_local_sme_state mode,
+ aarch64_local_sme_state prev_mode)
+{
+ /* Back-propagation should ensure that we're always starting from
+ a known mode. */
+ gcc_assert (prev_mode != aarch64_local_sme_state::ANY);
+
+ if (prev_mode == aarch64_local_sme_state::INACTIVE_CALLER)
+ {
+ /* Commit any uncommitted lazy save. This leaves ZA either active
+ and zero (lazy save case) or off (normal case).
+
+ The sequence is:
+
+ mrs <temp>, tpidr2_el0
+ cbz <temp>, no_save
+ bl __arm_tpidr2_save
+ msr tpidr2_el0, xzr
+ zero { za } // Only if ZA is live
+ no_save: */
+ bool is_active = (mode == aarch64_local_sme_state::ACTIVE_LIVE
+ || mode == aarch64_local_sme_state::ACTIVE_DEAD);
+ auto tmp_reg = gen_reg_rtx (DImode);
+ auto active_flag = gen_int_mode (is_active, DImode);
+ emit_insn (gen_aarch64_read_tpidr2 (tmp_reg));
+ emit_insn (gen_aarch64_commit_lazy_save (tmp_reg, active_flag));
+ }
+
+ if (mode == aarch64_local_sme_state::ACTIVE_LIVE
+ || mode == aarch64_local_sme_state::ACTIVE_DEAD)
+ {
+ if (prev_mode == aarch64_local_sme_state::INACTIVE_LOCAL)
+ {
+ /* Make ZA active after being inactive.
+
+ First handle the case in which the lazy save we set up was
+ committed by a callee. If the function's source-level ZA state
+ is live then we must conditionally restore it from the lazy
+ save buffer. Otherwise we can just force PSTATE.ZA to 1. */
+ if (mode == aarch64_local_sme_state::ACTIVE_LIVE)
+ emit_insn (gen_aarch64_restore_za (aarch64_get_tpidr2_ptr ()));
+ else
+ emit_insn (gen_aarch64_smstart_za ());
+
+ /* Now handle the case in which the lazy save was not committed.
+ In that case, ZA still contains the current function's ZA state,
+ and we just need to cancel the lazy save. */
+ emit_insn (gen_aarch64_clear_tpidr2 ());
+
+ /* Restore the ZT0 state, if we have some. */
+ if (aarch64_cfun_has_state ("zt0"))
+ aarch64_restore_zt0 (true);
+
+ return;
+ }
+
+ if (prev_mode == aarch64_local_sme_state::SAVED_LOCAL)
+ {
+ /* Retrieve the current function's ZA state from the lazy save
+ buffer. */
+ aarch64_restore_za (aarch64_get_tpidr2_ptr ());
+
+ /* Restore the ZT0 state, if we have some. */
+ if (aarch64_cfun_has_state ("zt0"))
+ aarch64_restore_zt0 (true);
+ return;
+ }
+
+ if (prev_mode == aarch64_local_sme_state::INACTIVE_CALLER
+ || prev_mode == aarch64_local_sme_state::OFF)
+ {
+ /* INACTIVE_CALLER means that we are enabling ZA for the first
+ time in this function. The code above means that ZA is either
+ active and zero (if we committed a lazy save) or off. Handle
+ the latter case by forcing ZA on.
+
+ OFF means that PSTATE.ZA is guaranteed to be 0. We just need
+ to force it to 1.
+
+ Both cases leave ZA zeroed. */
+ emit_insn (gen_aarch64_smstart_za ());
+
+ /* Restore the ZT0 state, if we have some. */
+ if (prev_mode == aarch64_local_sme_state::OFF
+ && aarch64_cfun_has_state ("zt0"))
+ aarch64_restore_zt0 (true);
+ return;
+ }
+
+ if (prev_mode == aarch64_local_sme_state::ACTIVE_DEAD
+ || prev_mode == aarch64_local_sme_state::ACTIVE_LIVE)
+ /* A simple change in liveness, such as in a CFG structure where
+ ZA is only conditionally defined. No code is needed. */
+ return;
+
+ gcc_unreachable ();
+ }
+
+ if (mode == aarch64_local_sme_state::INACTIVE_LOCAL)
+ {
+ if (prev_mode == aarch64_local_sme_state::ACTIVE_LIVE
+ || prev_mode == aarch64_local_sme_state::ACTIVE_DEAD
+ || prev_mode == aarch64_local_sme_state::INACTIVE_CALLER)
+ {
+ /* Save the ZT0 state, if we have some. */
+ if (aarch64_cfun_has_state ("zt0"))
+ aarch64_save_zt0 ();
+
+ /* A transition from ACTIVE_LIVE to INACTIVE_LOCAL is the usual
+ case of setting up a lazy save buffer before a call.
+ A transition from INACTIVE_CALLER is similar, except that
+ the contents of ZA are known to be zero.
+
+ A transition from ACTIVE_DEAD means that ZA is live at the
+ point of the transition, but is dead on at least one incoming
+ edge. (That is, ZA is only conditionally initialized.)
+ For efficiency, we want to set up a lazy save even for
+ dead contents, since forcing ZA off would make later code
+ restore ZA from the lazy save buffer. */
+ emit_insn (gen_aarch64_write_tpidr2 (aarch64_get_tpidr2_ptr ()));
+ return;
+ }
+
+ if (prev_mode == aarch64_local_sme_state::SAVED_LOCAL
+ || prev_mode == aarch64_local_sme_state::OFF)
+ /* We're simply discarding the information about which inactive
+ state applies. */
+ return;
+
+ gcc_unreachable ();
+ }
+
+ if (mode == aarch64_local_sme_state::INACTIVE_CALLER
+ || mode == aarch64_local_sme_state::OFF)
+ {
+ /* Save the ZT0 state, if we have some. */
+ if ((prev_mode == aarch64_local_sme_state::ACTIVE_LIVE
+ || prev_mode == aarch64_local_sme_state::ACTIVE_DEAD)
+ && mode == aarch64_local_sme_state::OFF
+ && aarch64_cfun_has_state ("zt0"))
+ aarch64_save_zt0 ();
+
+ /* The transition to INACTIVE_CALLER is used before returning from
+ new("za") functions. Any state in ZA belongs to the current
+ function rather than a caller, but that state is no longer
+ needed. Clear any pending lazy save and turn ZA off.
+
+ The transition to OFF is used before calling a private-ZA function.
+ We committed any incoming lazy save above, so at this point any
+ contents in ZA belong to the current function. */
+ if (prev_mode == aarch64_local_sme_state::INACTIVE_LOCAL)
+ emit_insn (gen_aarch64_clear_tpidr2 ());
+
+ if (prev_mode != aarch64_local_sme_state::OFF
+ && prev_mode != aarch64_local_sme_state::SAVED_LOCAL)
+ emit_insn (gen_aarch64_smstop_za ());
+
+ return;
+ }
+
+ if (mode == aarch64_local_sme_state::SAVED_LOCAL)
+ {
+ /* This is a transition to an exception handler. */
+ gcc_assert (prev_mode == aarch64_local_sme_state::OFF
+ || prev_mode == aarch64_local_sme_state::INACTIVE_LOCAL);
+ return;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_MODE_EMIT. */
+
+static void
+aarch64_mode_emit (int entity, int mode, int prev_mode, HARD_REG_SET live)
+{
+ if (mode == prev_mode)
+ return;
+
+ start_sequence ();
+ switch (aarch64_mode_entity (entity))
+ {
+ case aarch64_mode_entity::HAVE_ZA_SAVE_BUFFER:
+ aarch64_mode_emit_za_save_buffer (aarch64_tristate_mode (mode),
+ aarch64_tristate_mode (prev_mode));
+ break;
+
+ case aarch64_mode_entity::LOCAL_SME_STATE:
+ aarch64_mode_emit_local_sme_state (aarch64_local_sme_state (mode),
+ aarch64_local_sme_state (prev_mode));
+ break;
+ }
+ rtx_insn *seq = get_insns ();
+ end_sequence ();
+
+ /* Get the set of clobbered registers that are currently live. */
+ HARD_REG_SET clobbers = {};
+ for (rtx_insn *insn = seq; insn; insn = NEXT_INSN (insn))
+ {
+ vec_rtx_properties properties;
+ properties.add_insn (insn, false);
+ for (rtx_obj_reference ref : properties.refs ())
+ if (ref.is_write () && HARD_REGISTER_NUM_P (ref.regno))
+ SET_HARD_REG_BIT (clobbers, ref.regno);
+ }
+ clobbers &= live;
+
+ /* Emit instructions to save clobbered registers to pseudos. Queue
+ instructions to restore the registers afterwards.
+
+ This should only needed in rare situations. */
+ auto_vec<rtx, 33> after;
+ for (unsigned int regno = R0_REGNUM; regno < R30_REGNUM; ++regno)
+ if (TEST_HARD_REG_BIT (clobbers, regno))
+ {
+ rtx hard_reg = gen_rtx_REG (DImode, regno);
+ rtx pseudo_reg = gen_reg_rtx (DImode);
+ emit_move_insn (pseudo_reg, hard_reg);
+ after.quick_push (gen_move_insn (hard_reg, pseudo_reg));
+ }
+ if (TEST_HARD_REG_BIT (clobbers, CC_REGNUM))
+ {
+ rtx pseudo_reg = gen_reg_rtx (DImode);
+ emit_insn (gen_aarch64_save_nzcv (pseudo_reg));
+ after.quick_push (gen_aarch64_restore_nzcv (pseudo_reg));
+ }
+
+ /* Emit the transition instructions themselves. */
+ emit_insn (seq);
+
+ /* Restore the clobbered registers. */
+ for (auto *insn : after)
+ emit_insn (insn);
+}
+
+/* Return true if INSN references the SME state represented by hard register
+ REGNO. */
+
+static bool
+aarch64_insn_references_sme_state_p (rtx_insn *insn, unsigned int regno)
+{
+ df_ref ref;
+ FOR_EACH_INSN_DEF (ref, insn)
+ if (!DF_REF_FLAGS_IS_SET (ref, DF_REF_MUST_CLOBBER)
+ && DF_REF_REGNO (ref) == regno)
+ return true;
+ FOR_EACH_INSN_USE (ref, insn)
+ if (DF_REF_REGNO (ref) == regno)
+ return true;
+ return false;
+}
+
+/* Implement TARGET_MODE_NEEDED for LOCAL_SME_STATE. */
+
+static aarch64_local_sme_state
+aarch64_mode_needed_local_sme_state (rtx_insn *insn, HARD_REG_SET live)
+{
+ if (!CALL_P (insn)
+ && find_reg_note (insn, REG_EH_REGION, NULL_RTX))
+ {
+ static bool reported;
+ if (!reported)
+ {
+ sorry ("catching non-call exceptions in functions with SME state");
+ reported = true;
+ }
+ /* Aim for graceful error recovery by picking the value that is
+ least likely to generate an ICE. */
+ return aarch64_local_sme_state::INACTIVE_LOCAL;
+ }
+
+ /* A non-local goto is equivalent to a return. We disallow non-local
+ receivers in functions with SME state, so we know that the target
+ expects ZA to be dormant or off. */
+ if (JUMP_P (insn)
+ && find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
+ return aarch64_local_sme_state::INACTIVE_CALLER;
+
+ /* start_private_za_call and end_private_za_call bracket a sequence
+ that calls a private-ZA function. Force ZA to be turned off if the
+ function doesn't have any live ZA state, otherwise require ZA to be
+ inactive. */
+ auto icode = recog_memoized (insn);
+ if (icode == CODE_FOR_aarch64_start_private_za_call
+ || icode == CODE_FOR_aarch64_end_private_za_call)
+ return (TEST_HARD_REG_BIT (live, ZA_REGNUM)
+ ? aarch64_local_sme_state::INACTIVE_LOCAL
+ : aarch64_local_sme_state::OFF);
+
+ /* Force ZA to contain the current function's ZA state if INSN wants
+ to access it. Do the same for accesses to ZT0, since ZA and ZT0
+ are both controlled by PSTATE.ZA. */
+ if (aarch64_insn_references_sme_state_p (insn, ZA_REGNUM)
+ || aarch64_insn_references_sme_state_p (insn, ZT0_REGNUM))
+ return (TEST_HARD_REG_BIT (live, ZA_REGNUM)
+ ? aarch64_local_sme_state::ACTIVE_LIVE
+ : aarch64_local_sme_state::ACTIVE_DEAD);
+
+ return aarch64_local_sme_state::ANY;
+}
+
+/* Implement TARGET_MODE_NEEDED for ZA_SAVE_BUFFER. */
+
+static aarch64_tristate_mode
+aarch64_mode_needed_za_save_buffer (rtx_insn *insn, HARD_REG_SET live)
+{
+ /* We need to set up a lazy save buffer no later than the first
+ transition to INACTIVE_LOCAL (which involves setting up a lazy save). */
+ if (aarch64_mode_needed_local_sme_state (insn, live)
+ == aarch64_local_sme_state::INACTIVE_LOCAL)
+ return aarch64_tristate_mode::YES;
+
+ /* Also make sure that the lazy save buffer is set up before the first
+ insn that throws internally. The exception handler will sometimes
+ load from it. */
+ if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
+ return aarch64_tristate_mode::YES;
+
+ return aarch64_tristate_mode::MAYBE;
+}
+
+/* Implement TARGET_MODE_NEEDED. */
+
+static int
+aarch64_mode_needed (int entity, rtx_insn *insn, HARD_REG_SET live)
+{
+ switch (aarch64_mode_entity (entity))
+ {
+ case aarch64_mode_entity::HAVE_ZA_SAVE_BUFFER:
+ return int (aarch64_mode_needed_za_save_buffer (insn, live));
+
+ case aarch64_mode_entity::LOCAL_SME_STATE:
+ return int (aarch64_mode_needed_local_sme_state (insn, live));
+ }
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_MODE_AFTER for LOCAL_SME_STATE. */
+
+static aarch64_local_sme_state
+aarch64_mode_after_local_sme_state (aarch64_local_sme_state mode,
+ HARD_REG_SET live)
+{
+ /* Note places where ZA dies, so that we can try to avoid saving and
+ restoring state that isn't needed. */
+ if (mode == aarch64_local_sme_state::ACTIVE_LIVE
+ && !TEST_HARD_REG_BIT (live, ZA_REGNUM))
+ return aarch64_local_sme_state::ACTIVE_DEAD;
+
+ /* Note where ZA is born, e.g. when moving past an __arm_out("za")
+ function. */
+ if (mode == aarch64_local_sme_state::ACTIVE_DEAD
+ && TEST_HARD_REG_BIT (live, ZA_REGNUM))
+ return aarch64_local_sme_state::ACTIVE_LIVE;
+
+ return mode;
+}
+
+/* Implement TARGET_MODE_AFTER. */
+
+static int
+aarch64_mode_after (int entity, int mode, rtx_insn *, HARD_REG_SET live)
+{
+ switch (aarch64_mode_entity (entity))
+ {
+ case aarch64_mode_entity::HAVE_ZA_SAVE_BUFFER:
+ return mode;
+
+ case aarch64_mode_entity::LOCAL_SME_STATE:
+ return int (aarch64_mode_after_local_sme_state
+ (aarch64_local_sme_state (mode), live));
+ }
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_MODE_CONFLUENCE for LOCAL_SME_STATE. */
+
+static aarch64_local_sme_state
+aarch64_local_sme_confluence (aarch64_local_sme_state mode1,
+ aarch64_local_sme_state mode2)
+{
+ /* Perform a symmetrical check for two values. */
+ auto is_pair = [&](aarch64_local_sme_state val1,
+ aarch64_local_sme_state val2)
+ {
+ return ((mode1 == val1 && mode2 == val2)
+ || (mode1 == val2 && mode2 == val1));
+ };
+
+ /* INACTIVE_CALLER means ZA is off or it has dormant contents belonging
+ to a caller. OFF is one of the options. */
+ if (is_pair (aarch64_local_sme_state::INACTIVE_CALLER,
+ aarch64_local_sme_state::OFF))
+ return aarch64_local_sme_state::INACTIVE_CALLER;
+
+ /* Similarly for dormant contents belonging to the current function. */
+ if (is_pair (aarch64_local_sme_state::INACTIVE_LOCAL,
+ aarch64_local_sme_state::OFF))
+ return aarch64_local_sme_state::INACTIVE_LOCAL;
+
+ /* Treat a conditionally-initialized value as a fully-initialized value. */
+ if (is_pair (aarch64_local_sme_state::ACTIVE_LIVE,
+ aarch64_local_sme_state::ACTIVE_DEAD))
+ return aarch64_local_sme_state::ACTIVE_LIVE;
+
+ return aarch64_local_sme_state::ANY;
+}
+
+/* Implement TARGET_MODE_CONFLUENCE. */
+
+static int
+aarch64_mode_confluence (int entity, int mode1, int mode2)
+{
+ gcc_assert (mode1 != mode2);
+ switch (aarch64_mode_entity (entity))
+ {
+ case aarch64_mode_entity::HAVE_ZA_SAVE_BUFFER:
+ return int (aarch64_tristate_mode::MAYBE);
+
+ case aarch64_mode_entity::LOCAL_SME_STATE:
+ return int (aarch64_local_sme_confluence
+ (aarch64_local_sme_state (mode1),
+ aarch64_local_sme_state (mode2)));
+ }
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_MODE_BACKPROP for an entity that either stays
+ NO throughput, or makes one transition from NO to YES. */
+
+static aarch64_tristate_mode
+aarch64_one_shot_backprop (aarch64_tristate_mode mode1,
+ aarch64_tristate_mode mode2)
+{
+ /* Keep bringing the transition forward until it starts from NO. */
+ if (mode1 == aarch64_tristate_mode::MAYBE
+ && mode2 == aarch64_tristate_mode::YES)
+ return mode2;
+
+ return aarch64_tristate_mode::MAYBE;
+}
+
+/* Implement TARGET_MODE_BACKPROP for LOCAL_SME_STATE. */
+
+static aarch64_local_sme_state
+aarch64_local_sme_backprop (aarch64_local_sme_state mode1,
+ aarch64_local_sme_state mode2)
+{
+ /* We always need to know what the current state is when transitioning
+ to a new state. Force any location with indeterminate starting state
+ to be active. */
+ if (mode1 == aarch64_local_sme_state::ANY)
+ switch (mode2)
+ {
+ case aarch64_local_sme_state::INACTIVE_CALLER:
+ case aarch64_local_sme_state::OFF:
+ case aarch64_local_sme_state::ACTIVE_DEAD:
+ /* The current function's ZA state is not live. */
+ return aarch64_local_sme_state::ACTIVE_DEAD;
+
+ case aarch64_local_sme_state::INACTIVE_LOCAL:
+ case aarch64_local_sme_state::ACTIVE_LIVE:
+ /* The current function's ZA state is live. */
+ return aarch64_local_sme_state::ACTIVE_LIVE;
+
+ case aarch64_local_sme_state::SAVED_LOCAL:
+ /* This is a transition to an exception handler. Since we don't
+ support non-call exceptions for SME functions, the source of
+ the transition must be known. We'll assert later if that's
+ not the case. */
+ return aarch64_local_sme_state::ANY;
+
+ case aarch64_local_sme_state::ANY:
+ return aarch64_local_sme_state::ANY;
+ }
+
+ return aarch64_local_sme_state::ANY;
+}
+
+/* Implement TARGET_MODE_BACKPROP. */
+
+static int
+aarch64_mode_backprop (int entity, int mode1, int mode2)
+{
+ switch (aarch64_mode_entity (entity))
+ {
+ case aarch64_mode_entity::HAVE_ZA_SAVE_BUFFER:
+ return int (aarch64_one_shot_backprop (aarch64_tristate_mode (mode1),
+ aarch64_tristate_mode (mode2)));
+
+ case aarch64_mode_entity::LOCAL_SME_STATE:
+ return int (aarch64_local_sme_backprop
+ (aarch64_local_sme_state (mode1),
+ aarch64_local_sme_state (mode2)));
+ }
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_MODE_ENTRY. */
+
+static int
+aarch64_mode_entry (int entity)
+{
+ switch (aarch64_mode_entity (entity))
+ {
+ case aarch64_mode_entity::HAVE_ZA_SAVE_BUFFER:
+ return int (aarch64_tristate_mode::NO);
+
+ case aarch64_mode_entity::LOCAL_SME_STATE:
+ return int (aarch64_cfun_shared_flags ("za") != 0
+ ? aarch64_local_sme_state::ACTIVE_LIVE
+ : aarch64_cfun_incoming_pstate_za () != 0
+ ? aarch64_local_sme_state::ACTIVE_DEAD
+ : aarch64_local_sme_state::INACTIVE_CALLER);
+ }
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_MODE_EXIT. */
+
+static int
+aarch64_mode_exit (int entity)
+{
+ switch (aarch64_mode_entity (entity))
+ {
+ case aarch64_mode_entity::HAVE_ZA_SAVE_BUFFER:
+ return int (aarch64_tristate_mode::MAYBE);
+
+ case aarch64_mode_entity::LOCAL_SME_STATE:
+ return int (aarch64_cfun_shared_flags ("za") != 0
+ ? aarch64_local_sme_state::ACTIVE_LIVE
+ : aarch64_cfun_incoming_pstate_za () != 0
+ ? aarch64_local_sme_state::ACTIVE_DEAD
+ : aarch64_local_sme_state::INACTIVE_CALLER);
+ }
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_MODE_EH_HANDLER. */
+
+static int
+aarch64_mode_eh_handler (int entity)
+{
+ switch (aarch64_mode_entity (entity))
+ {
+ case aarch64_mode_entity::HAVE_ZA_SAVE_BUFFER:
+ /* Require a lazy save buffer to be allocated before the first
+ insn that can throw. */
+ return int (aarch64_tristate_mode::YES);
+
+ case aarch64_mode_entity::LOCAL_SME_STATE:
+ return int (aarch64_local_sme_state::SAVED_LOCAL);
+ }
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_MODE_PRIORITY. */
+
+static int
+aarch64_mode_priority (int, int n)
+{
+ return n;
+}
+
+/* Implement TARGET_MD_ASM_ADJUST. */
+
+static rtx_insn *
+aarch64_md_asm_adjust (vec<rtx> &outputs, vec<rtx> &inputs,
+ vec<machine_mode> &input_modes,
+ vec<const char *> &constraints,
+ vec<rtx> &uses, vec<rtx> &clobbers,
+ HARD_REG_SET &clobbered_regs, location_t loc)
+{
+ rtx_insn *seq = arm_md_asm_adjust (outputs, inputs, input_modes, constraints,
+ uses, clobbers, clobbered_regs, loc);
+
+ /* "za" in the clobber list of a function with ZA state is defined to
+ mean that the asm can read from and write to ZA. We can model the
+ read using a USE, but unfortunately, it's not possible to model the
+ write directly. Use a separate insn to model the effect.
+
+ We must ensure that ZA is active on entry, which is enforced by using
+ SME_STATE_REGNUM. The asm must ensure that ZA is active on return.
+
+ The same thing applies to ZT0. */
+ if (TARGET_ZA)
+ for (unsigned int i = clobbers.length (); i-- > 0; )
+ {
+ rtx x = clobbers[i];
+ if (REG_P (x)
+ && (REGNO (x) == ZA_REGNUM || REGNO (x) == ZT0_REGNUM))
+ {
+ auto id = cfun->machine->next_asm_update_za_id++;
+
+ start_sequence ();
+ if (seq)
+ emit_insn (seq);
+ rtx id_rtx = gen_int_mode (id, SImode);
+ emit_insn (REGNO (x) == ZA_REGNUM
+ ? gen_aarch64_asm_update_za (id_rtx)
+ : gen_aarch64_asm_update_zt0 (id_rtx));
+ seq = get_insns ();
+ end_sequence ();
+
+ auto mode = REGNO (x) == ZA_REGNUM ? VNx16QImode : V8DImode;
+ uses.safe_push (gen_rtx_REG (mode, REGNO (x)));
+ uses.safe_push (gen_rtx_REG (DImode, SME_STATE_REGNUM));
+
+ clobbers.ordered_remove (i);
+ CLEAR_HARD_REG_BIT (clobbered_regs, REGNO (x));
+ }
+ }
+ return seq;
+}
+
+/* BB is the target of an exception or nonlocal goto edge, which means
+ that PSTATE.SM is known to be 0 on entry. Put it into the state that
+ the current function requires. */
+
+static bool
+aarch64_switch_pstate_sm_for_landing_pad (basic_block bb)
+{
+ if (TARGET_NON_STREAMING)
+ return false;
+
+ start_sequence ();
+ rtx_insn *guard_label = nullptr;
+ if (TARGET_STREAMING_COMPATIBLE)
+ guard_label = aarch64_guard_switch_pstate_sm (IP0_REGNUM,
+ AARCH64_FL_SM_OFF);
+ aarch64_sme_mode_switch_regs args_switch;
+ args_switch.add_call_preserved_regs (df_get_live_in (bb));
+ args_switch.emit_prologue ();
+ aarch64_switch_pstate_sm (AARCH64_FL_SM_OFF, AARCH64_FL_SM_ON);
+ args_switch.emit_epilogue ();
+ if (guard_label)
+ emit_label (guard_label);
+ auto seq = get_insns ();
+ end_sequence ();
+
+ emit_insn_after (seq, bb_note (bb));
+ return true;
+}
+
+/* JUMP is a nonlocal goto. Its target requires PSTATE.SM to be 0 on entry,
+ so arrange to make it so. */
+
+static bool
+aarch64_switch_pstate_sm_for_jump (rtx_insn *jump)
+{
+ if (TARGET_NON_STREAMING)
+ return false;
+
+ start_sequence ();
+ rtx_insn *guard_label = nullptr;
+ if (TARGET_STREAMING_COMPATIBLE)
+ guard_label = aarch64_guard_switch_pstate_sm (IP0_REGNUM,
+ AARCH64_FL_SM_OFF);
+ aarch64_switch_pstate_sm (AARCH64_FL_SM_ON, AARCH64_FL_SM_OFF);
+ if (guard_label)
+ emit_label (guard_label);
+ auto seq = get_insns ();
+ end_sequence ();
+
+ emit_insn_before (seq, jump);
+ return true;
+}
+
+/* If CALL involves a change in PSTATE.SM, emit the instructions needed
+ to switch to the new mode and the instructions needed to restore the
+ original mode. Return true if something changed. */
+static bool
+aarch64_switch_pstate_sm_for_call (rtx_call_insn *call)
+{
+ /* Mode switches for sibling calls are handled via the epilogue. */
+ if (SIBLING_CALL_P (call))
+ return false;
+
+ auto callee_isa_mode = aarch64_insn_callee_isa_mode (call);
+ if (!aarch64_call_switches_pstate_sm (callee_isa_mode))
+ return false;
+
+ /* Switch mode before the call, preserving any argument registers
+ across the switch. */
+ start_sequence ();
+ rtx_insn *args_guard_label = nullptr;
+ if (TARGET_STREAMING_COMPATIBLE)
+ args_guard_label = aarch64_guard_switch_pstate_sm (IP0_REGNUM,
+ callee_isa_mode);
+ aarch64_sme_mode_switch_regs args_switch;
+ args_switch.add_call_args (call);
+ args_switch.emit_prologue ();
+ aarch64_switch_pstate_sm (AARCH64_ISA_MODE, callee_isa_mode);
+ args_switch.emit_epilogue ();
+ if (args_guard_label)
+ emit_label (args_guard_label);
+ auto args_seq = get_insns ();
+ end_sequence ();
+ emit_insn_before (args_seq, call);
+
+ if (find_reg_note (call, REG_NORETURN, NULL_RTX))
+ return true;
+
+ /* Switch mode after the call, preserving any return registers across
+ the switch. */
+ start_sequence ();
+ rtx_insn *return_guard_label = nullptr;
+ if (TARGET_STREAMING_COMPATIBLE)
+ return_guard_label = aarch64_guard_switch_pstate_sm (IP0_REGNUM,
+ callee_isa_mode);
+ aarch64_sme_mode_switch_regs return_switch;
+ return_switch.add_call_result (call);
+ return_switch.emit_prologue ();
+ aarch64_switch_pstate_sm (callee_isa_mode, AARCH64_ISA_MODE);
+ return_switch.emit_epilogue ();
+ if (return_guard_label)
+ emit_label (return_guard_label);
+ auto result_seq = get_insns ();
+ end_sequence ();
+ emit_insn_after (result_seq, call);
+ return true;
+}
+
+namespace {
+
+const pass_data pass_data_switch_pstate_sm =
+{
+ RTL_PASS, // type
+ "smstarts", // name
+ OPTGROUP_NONE, // optinfo_flags
+ TV_NONE, // tv_id
+ 0, // properties_required
+ 0, // properties_provided
+ 0, // properties_destroyed
+ 0, // todo_flags_start
+ TODO_df_finish, // todo_flags_finish
+};
+
+class pass_switch_pstate_sm : public rtl_opt_pass
+{
+public:
+ pass_switch_pstate_sm (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_switch_pstate_sm, ctxt)
+ {}
+
+ // opt_pass methods:
+ bool gate (function *) override final;
+ unsigned int execute (function *) override final;
+};
+
+bool
+pass_switch_pstate_sm::gate (function *fn)
+{
+ return (aarch64_fndecl_pstate_sm (fn->decl) != AARCH64_FL_SM_OFF
+ || cfun->machine->call_switches_pstate_sm);
+}
+
+/* Emit any instructions needed to switch PSTATE.SM. */
+unsigned int
+pass_switch_pstate_sm::execute (function *fn)
+{
+ basic_block bb;
+
+ auto_sbitmap blocks (last_basic_block_for_fn (cfun));
+ bitmap_clear (blocks);
+ FOR_EACH_BB_FN (bb, fn)
+ {
+ if (has_abnormal_call_or_eh_pred_edge_p (bb)
+ && aarch64_switch_pstate_sm_for_landing_pad (bb))
+ bitmap_set_bit (blocks, bb->index);
+
+ if (cfun->machine->call_switches_pstate_sm)
+ {
+ rtx_insn *insn;
+ FOR_BB_INSNS (bb, insn)
+ if (auto *call = dyn_cast<rtx_call_insn *> (insn))
+ if (aarch64_switch_pstate_sm_for_call (call))
+ bitmap_set_bit (blocks, bb->index);
+ }
+
+ auto end = BB_END (bb);
+ if (JUMP_P (end)
+ && find_reg_note (end, REG_NON_LOCAL_GOTO, NULL_RTX)
+ && aarch64_switch_pstate_sm_for_jump (end))
+ bitmap_set_bit (blocks, bb->index);
+ }
+ find_many_sub_basic_blocks (blocks);
+ clear_aux_for_blocks ();
+ return 0;
+}
+
+}
+
+rtl_opt_pass *
+make_pass_switch_pstate_sm (gcc::context *ctxt)
+{
+ return new pass_switch_pstate_sm (ctxt);
+}
+
+/* Parse an implementation-defined system register name of
+ the form S[0-3]_[0-7]_C[0-15]_C[0-15]_[0-7].
+ Return true if name matched against above pattern, false
+ otherwise. */
+bool
+aarch64_is_implem_def_reg (const char *regname)
+{
+ unsigned pos = 0;
+ unsigned name_len = strlen (regname);
+ if (name_len < 12 || name_len > 14)
+ return false;
+
+ auto cterm_valid_p = [&]()
+ {
+ bool leading_zero_p = false;
+ unsigned i = 0;
+ char n[3] = {0};
+
+ if (regname[pos] != 'c')
+ return false;
+ pos++;
+ while (regname[pos] != '_')
+ {
+ if (leading_zero_p)
+ return false;
+ if (i == 0 && regname[pos] == '0')
+ leading_zero_p = true;
+ if (i > 2)
+ return false;
+ if (!ISDIGIT (regname[pos]))
+ return false;
+ n[i++] = regname[pos++];
+ }
+ if (atoi (n) > 15)
+ return false;
+ return true;
+ };
+
+ if (regname[pos] != 's')
+ return false;
+ pos++;
+ if (regname[pos] < '0' || regname[pos] > '3')
+ return false;
+ pos++;
+ if (regname[pos++] != '_')
+ return false;
+ if (regname[pos] < '0' || regname[pos] > '7')
+ return false;
+ pos++;
+ if (regname[pos++] != '_')
+ return false;
+ if (!cterm_valid_p ())
+ return false;
+ if (regname[pos++] != '_')
+ return false;
+ if (!cterm_valid_p ())
+ return false;
+ if (regname[pos++] != '_')
+ return false;
+ if (regname[pos] < '0' || regname[pos] > '7')
+ return false;
+ return true;
+}
+
+/* Return true if REGNAME matches either a known permitted system
+ register name, or a generic sysreg specification. For use in
+ back-end predicate `aarch64_sysreg_string'. */
+bool
+aarch64_valid_sysreg_name_p (const char *regname)
+{
+ const sysreg_t *sysreg = aarch64_lookup_sysreg_map (regname);
+ if (sysreg == NULL)
+ return aarch64_is_implem_def_reg (regname);
+ if (sysreg->arch_reqs)
+ return (aarch64_isa_flags & sysreg->arch_reqs);
+ return true;
+}
+
+/* Return the generic sysreg specification for a valid system register
+ name, otherwise NULL. WRITE_P is true iff the register is being
+ written to. */
+const char *
+aarch64_retrieve_sysreg (const char *regname, bool write_p)
+{
+ const sysreg_t *sysreg = aarch64_lookup_sysreg_map (regname);
+ if (sysreg == NULL)
+ {
+ if (aarch64_is_implem_def_reg (regname))
+ return regname;
+ else
+ return NULL;
+ }
+ if ((write_p && (sysreg->properties & F_REG_READ))
+ || (!write_p && (sysreg->properties & F_REG_WRITE)))
+ return NULL;
+ if ((~aarch64_isa_flags & sysreg->arch_reqs) != 0)
+ return NULL;
+ return sysreg->encoding;
+}
+
/* Target-specific selftests. */
#if CHECKING_P
@@ -25884,6 +29230,48 @@ aarch64_test_fractional_cost ()
ASSERT_EQ (cf (1, 2).as_double (), 0.5);
}
+/* Calculate whether our system register data, as imported from
+ `aarch64-sys-reg.def' has any duplicate entries. */
+static void
+aarch64_test_sysreg_encoding_clashes (void)
+{
+ using dup_instances_t = hash_map<nofree_string_hash,
+ std::vector<const sysreg_t*>>;
+
+ dup_instances_t duplicate_instances;
+
+ /* Every time an encoding is established to come up more than once
+ we add it to a "clash-analysis queue", which is then used to extract
+ necessary information from our hash map when establishing whether
+ repeated encodings are valid. */
+
+ /* 1) Collect recurrence information. */
+ for (unsigned i = 0; i < ARRAY_SIZE (aarch64_sysregs); i++)
+ {
+ const sysreg_t *reg = aarch64_sysregs + i;
+
+ std::vector<const sysreg_t*> *tmp
+ = &duplicate_instances.get_or_insert (reg->encoding);
+
+ tmp->push_back (reg);
+ }
+
+ /* 2) Carry out analysis on collected data. */
+ for (auto instance : duplicate_instances)
+ {
+ unsigned nrep = instance.second.size ();
+ if (nrep > 1)
+ for (unsigned i = 0; i < nrep; i++)
+ for (unsigned j = i + 1; j < nrep; j++)
+ {
+ const sysreg_t *a = instance.second[i];
+ const sysreg_t *b = instance.second[j];
+ ASSERT_TRUE ((a->properties != b->properties)
+ || (a->arch_reqs != b->arch_reqs));
+ }
+ }
+}
+
/* Run all target-specific selftests. */
static void
@@ -25891,6 +29279,7 @@ aarch64_run_selftests (void)
{
aarch64_test_loading_full_dump ();
aarch64_test_fractional_cost ();
+ aarch64_test_sysreg_encoding_clashes ();
}
} // namespace selftest
@@ -25944,9 +29333,22 @@ aarch64_run_selftests (void)
#undef TARGET_CALLEE_COPIES
#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_arg_info_false
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required
+
#undef TARGET_CAN_ELIMINATE
#define TARGET_CAN_ELIMINATE aarch64_can_eliminate
+#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
+#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P \
+ aarch64_function_attribute_inlinable_p
+
+#undef TARGET_NEED_IPA_FN_TARGET_INFO
+#define TARGET_NEED_IPA_FN_TARGET_INFO aarch64_need_ipa_fn_target_info
+
+#undef TARGET_UPDATE_IPA_FN_TARGET_INFO
+#define TARGET_UPDATE_IPA_FN_TARGET_INFO aarch64_update_ipa_fn_target_info
+
#undef TARGET_CAN_INLINE_P
#define TARGET_CAN_INLINE_P aarch64_can_inline_p
@@ -26022,6 +29424,12 @@ aarch64_run_selftests (void)
#undef TARGET_FUNCTION_VALUE_REGNO_P
#define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p
+#undef TARGET_START_CALL_ARGS
+#define TARGET_START_CALL_ARGS aarch64_start_call_args
+
+#undef TARGET_END_CALL_ARGS
+#define TARGET_END_CALL_ARGS aarch64_end_call_args
+
#undef TARGET_GIMPLE_FOLD_BUILTIN
#define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin
@@ -26390,6 +29798,9 @@ aarch64_libgcc_floating_mode_supported_p
#undef TARGET_COMP_TYPE_ATTRIBUTES
#define TARGET_COMP_TYPE_ATTRIBUTES aarch64_comp_type_attributes
+#undef TARGET_MERGE_DECL_ATTRIBUTES
+#define TARGET_MERGE_DECL_ATTRIBUTES aarch64_merge_decl_attributes
+
#undef TARGET_GET_MULTILIB_ABI_NAME
#define TARGET_GET_MULTILIB_ABI_NAME aarch64_get_multilib_abi_name
@@ -26410,8 +29821,35 @@ aarch64_libgcc_floating_mode_supported_p
#undef TARGET_STRICT_ARGUMENT_NAMING
#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
+#undef TARGET_MODE_EMIT
+#define TARGET_MODE_EMIT aarch64_mode_emit
+
+#undef TARGET_MODE_NEEDED
+#define TARGET_MODE_NEEDED aarch64_mode_needed
+
+#undef TARGET_MODE_AFTER
+#define TARGET_MODE_AFTER aarch64_mode_after
+
+#undef TARGET_MODE_CONFLUENCE
+#define TARGET_MODE_CONFLUENCE aarch64_mode_confluence
+
+#undef TARGET_MODE_BACKPROP
+#define TARGET_MODE_BACKPROP aarch64_mode_backprop
+
+#undef TARGET_MODE_ENTRY
+#define TARGET_MODE_ENTRY aarch64_mode_entry
+
+#undef TARGET_MODE_EXIT
+#define TARGET_MODE_EXIT aarch64_mode_exit
+
+#undef TARGET_MODE_EH_HANDLER
+#define TARGET_MODE_EH_HANDLER aarch64_mode_eh_handler
+
+#undef TARGET_MODE_PRIORITY
+#define TARGET_MODE_PRIORITY aarch64_mode_priority
+
#undef TARGET_MD_ASM_ADJUST
-#define TARGET_MD_ASM_ADJUST arm_md_asm_adjust
+#define TARGET_MD_ASM_ADJUST aarch64_md_asm_adjust
#undef TARGET_ASM_FILE_END
#define TARGET_ASM_FILE_END aarch64_asm_file_end
@@ -26425,6 +29863,15 @@ aarch64_libgcc_floating_mode_supported_p
#undef TARGET_CONST_ANCHOR
#define TARGET_CONST_ANCHOR 0x1000000
+#undef TARGET_EXTRA_LIVE_ON_ENTRY
+#define TARGET_EXTRA_LIVE_ON_ENTRY aarch64_extra_live_on_entry
+
+#undef TARGET_USE_LATE_PROLOGUE_EPILOGUE
+#define TARGET_USE_LATE_PROLOGUE_EPILOGUE aarch64_use_late_prologue_epilogue
+
+#undef TARGET_EMIT_EPILOGUE_FOR_SIBCALL
+#define TARGET_EMIT_EPILOGUE_FOR_SIBCALL aarch64_expand_epilogue
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-aarch64.h"
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 1ac2989..f277e78 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -61,8 +61,15 @@
#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
/* AdvSIMD is supported in the default configuration, unless disabled by
- -mgeneral-regs-only or by the +nosimd extension. */
-#define TARGET_SIMD (AARCH64_ISA_SIMD)
+ -mgeneral-regs-only or by the +nosimd extension. The set of available
+ instructions is then subdivided into:
+
+ - the "base" set, available both in SME streaming mode and in
+ non-streaming mode
+
+ - the full set, available only in non-streaming mode. */
+#define TARGET_BASE_SIMD (AARCH64_ISA_SIMD)
+#define TARGET_SIMD (AARCH64_ISA_SIMD && AARCH64_ISA_SM_OFF)
#define TARGET_FLOAT (AARCH64_ISA_FP)
#define UNITS_PER_WORD 8
@@ -157,10 +164,13 @@
#ifndef USED_FOR_TARGET
-/* Define an enum of all features (architectures and extensions). */
+/* Define an enum of all features (ISA modes, architectures and extensions).
+ The ISA modes must come first. */
enum class aarch64_feature : unsigned char {
+#define DEF_AARCH64_ISA_MODE(IDENT) IDENT,
#define AARCH64_OPT_EXTENSION(A, IDENT, C, D, E, F) IDENT,
#define AARCH64_ARCH(A, B, IDENT, D, E) IDENT,
+#include "aarch64-isa-modes.def"
#include "aarch64-option-extensions.def"
#include "aarch64-arches.def"
};
@@ -169,16 +179,39 @@ enum class aarch64_feature : unsigned char {
#define HANDLE(IDENT) \
constexpr auto AARCH64_FL_##IDENT \
= aarch64_feature_flags (1) << int (aarch64_feature::IDENT);
+#define DEF_AARCH64_ISA_MODE(IDENT) HANDLE (IDENT)
#define AARCH64_OPT_EXTENSION(A, IDENT, C, D, E, F) HANDLE (IDENT)
#define AARCH64_ARCH(A, B, IDENT, D, E) HANDLE (IDENT)
+#include "aarch64-isa-modes.def"
#include "aarch64-option-extensions.def"
#include "aarch64-arches.def"
#undef HANDLE
+constexpr auto AARCH64_FL_SM_STATE = AARCH64_FL_SM_ON | AARCH64_FL_SM_OFF;
+
+constexpr unsigned int AARCH64_NUM_ISA_MODES = (0
+#define DEF_AARCH64_ISA_MODE(IDENT) + 1
+#include "aarch64-isa-modes.def"
+);
+
+/* The mask of all ISA modes. */
+constexpr auto AARCH64_FL_ISA_MODES
+ = (aarch64_feature_flags (1) << AARCH64_NUM_ISA_MODES) - 1;
+
+/* The default ISA mode, for functions with no attributes that specify
+ something to the contrary. */
+constexpr auto AARCH64_FL_DEFAULT_ISA_MODE = AARCH64_FL_SM_OFF;
+
#endif
/* Macros to test ISA flags. */
+#define AARCH64_ISA_SM_OFF (aarch64_isa_flags & AARCH64_FL_SM_OFF)
+#define AARCH64_ISA_SM_ON (aarch64_isa_flags & AARCH64_FL_SM_ON)
+#define AARCH64_ISA_ZA_ON (aarch64_isa_flags & AARCH64_FL_ZA_ON)
+#define AARCH64_ISA_MODE (aarch64_isa_flags & AARCH64_FL_ISA_MODES)
+#define AARCH64_ISA_V8A (aarch64_isa_flags & AARCH64_FL_V8A)
+#define AARCH64_ISA_V8_1A (aarch64_isa_flags & AARCH64_FL_V8_1A)
#define AARCH64_ISA_CRC (aarch64_isa_flags & AARCH64_FL_CRC)
#define AARCH64_ISA_CRYPTO (aarch64_isa_flags & AARCH64_FL_CRYPTO)
#define AARCH64_ISA_FP (aarch64_isa_flags & AARCH64_FL_FP)
@@ -193,6 +226,10 @@ enum class aarch64_feature : unsigned char {
#define AARCH64_ISA_SVE2_BITPERM (aarch64_isa_flags & AARCH64_FL_SVE2_BITPERM)
#define AARCH64_ISA_SVE2_SHA3 (aarch64_isa_flags & AARCH64_FL_SVE2_SHA3)
#define AARCH64_ISA_SVE2_SM4 (aarch64_isa_flags & AARCH64_FL_SVE2_SM4)
+#define AARCH64_ISA_SME (aarch64_isa_flags & AARCH64_FL_SME)
+#define AARCH64_ISA_SME_I16I64 (aarch64_isa_flags & AARCH64_FL_SME_I16I64)
+#define AARCH64_ISA_SME_F64F64 (aarch64_isa_flags & AARCH64_FL_SME_F64F64)
+#define AARCH64_ISA_SME2 (aarch64_isa_flags & AARCH64_FL_SME2)
#define AARCH64_ISA_V8_3A (aarch64_isa_flags & AARCH64_FL_V8_3A)
#define AARCH64_ISA_DOTPROD (aarch64_isa_flags & AARCH64_FL_DOTPROD)
#define AARCH64_ISA_AES (aarch64_isa_flags & AARCH64_FL_AES)
@@ -215,6 +252,8 @@ enum class aarch64_feature : unsigned char {
#define AARCH64_ISA_SB (aarch64_isa_flags & AARCH64_FL_SB)
#define AARCH64_ISA_V8R (aarch64_isa_flags & AARCH64_FL_V8R)
#define AARCH64_ISA_PAUTH (aarch64_isa_flags & AARCH64_FL_PAUTH)
+#define AARCH64_ISA_V8_7A (aarch64_isa_flags & AARCH64_FL_V8_7A)
+#define AARCH64_ISA_V8_8A (aarch64_isa_flags & AARCH64_FL_V8_8A)
#define AARCH64_ISA_V9A (aarch64_isa_flags & AARCH64_FL_V9A)
#define AARCH64_ISA_V9_1A (aarch64_isa_flags & AARCH64_FL_V9_1A)
#define AARCH64_ISA_V9_2A (aarch64_isa_flags & AARCH64_FL_V9_2A)
@@ -223,6 +262,32 @@ enum class aarch64_feature : unsigned char {
#define AARCH64_ISA_LS64 (aarch64_isa_flags & AARCH64_FL_LS64)
#define AARCH64_ISA_CSSC (aarch64_isa_flags & AARCH64_FL_CSSC)
+/* The current function is a normal non-streaming function. */
+#define TARGET_NON_STREAMING (AARCH64_ISA_SM_OFF)
+
+/* The current function has a streaming body. */
+#define TARGET_STREAMING (AARCH64_ISA_SM_ON)
+
+/* The current function has a streaming-compatible body. */
+#define TARGET_STREAMING_COMPATIBLE \
+ ((aarch64_isa_flags & AARCH64_FL_SM_STATE) == 0)
+
+/* PSTATE.ZA is enabled in the current function body. */
+#define TARGET_ZA (AARCH64_ISA_ZA_ON)
+/* AARCH64_FL options necessary for system register implementation. */
+
+/* Define AARCH64_FL aliases for architectural features which are protected
+ by -march flags in binutils but which receive no special treatment by GCC.
+
+ Such flags are inherited from the Binutils definition of system registers
+ and are mapped to the architecture in which the feature is implemented. */
+#define AARCH64_FL_RAS AARCH64_FL_V8A
+#define AARCH64_FL_LOR AARCH64_FL_V8_1A
+#define AARCH64_FL_PAN AARCH64_FL_V8_1A
+#define AARCH64_FL_AMU AARCH64_FL_V8_4A
+#define AARCH64_FL_SCXTNUM AARCH64_FL_V8_5A
+#define AARCH64_FL_ID_PFR2 AARCH64_FL_V8_5A
+
/* Crypto is an optional extension to AdvSIMD. */
#define TARGET_CRYPTO (AARCH64_ISA_CRYPTO)
@@ -261,16 +326,35 @@ enum class aarch64_feature : unsigned char {
#define TARGET_SVE2 (AARCH64_ISA_SVE2)
/* SVE2 AES instructions, enabled through +sve2-aes. */
-#define TARGET_SVE2_AES (AARCH64_ISA_SVE2_AES)
+#define TARGET_SVE2_AES (AARCH64_ISA_SVE2_AES && TARGET_NON_STREAMING)
/* SVE2 BITPERM instructions, enabled through +sve2-bitperm. */
-#define TARGET_SVE2_BITPERM (AARCH64_ISA_SVE2_BITPERM)
+#define TARGET_SVE2_BITPERM (AARCH64_ISA_SVE2_BITPERM && TARGET_NON_STREAMING)
/* SVE2 SHA3 instructions, enabled through +sve2-sha3. */
-#define TARGET_SVE2_SHA3 (AARCH64_ISA_SVE2_SHA3)
+#define TARGET_SVE2_SHA3 (AARCH64_ISA_SVE2_SHA3 && TARGET_NON_STREAMING)
/* SVE2 SM4 instructions, enabled through +sve2-sm4. */
-#define TARGET_SVE2_SM4 (AARCH64_ISA_SVE2_SM4)
+#define TARGET_SVE2_SM4 (AARCH64_ISA_SVE2_SM4 && TARGET_NON_STREAMING)
+
+/* SME instructions, enabled through +sme. Note that this does not
+ imply anything about the state of PSTATE.SM. */
+#define TARGET_SME (AARCH64_ISA_SME)
+
+/* Same with streaming mode enabled. */
+#define TARGET_STREAMING_SME (TARGET_STREAMING && TARGET_SME)
+
+/* The FEAT_SME_I16I64 extension to SME, enabled through +sme-i16i64. */
+#define TARGET_SME_I16I64 (AARCH64_ISA_SME_I16I64)
+
+/* The FEAT_SME_F64F64 extension to SME, enabled through +sme-f64f64. */
+#define TARGET_SME_F64F64 (AARCH64_ISA_SME_F64F64)
+
+/* SME2 instructions, enabled through +sme2. */
+#define TARGET_SME2 (AARCH64_ISA_SME2)
+
+/* Same with streaming mode enabled. */
+#define TARGET_STREAMING_SME2 (TARGET_STREAMING && TARGET_SME2)
/* ARMv8.3-A features. */
#define TARGET_ARMV8_3 (AARCH64_ISA_V8_3A)
@@ -420,7 +504,8 @@ enum class aarch64_feature : unsigned char {
1, 1, 1, 1, /* SFP, AP, CC, VG */ \
0, 0, 0, 0, 0, 0, 0, 0, /* P0 - P7 */ \
0, 0, 0, 0, 0, 0, 0, 0, /* P8 - P15 */ \
- 1, 1 /* FFR and FFRT */ \
+ 1, 1, /* FFR and FFRT */ \
+ 1, 1, 1, 1, 1, 1, 1, 1 /* Fake registers */ \
}
/* X30 is marked as caller-saved which is in line with regular function call
@@ -430,7 +515,7 @@ enum class aarch64_feature : unsigned char {
true but not until function epilogues have been generated. This ensures
that X30 is available for use in leaf functions if needed. */
-#define CALL_USED_REGISTERS \
+#define CALL_REALLY_USED_REGISTERS \
{ \
1, 1, 1, 1, 1, 1, 1, 1, /* R0 - R7 */ \
1, 1, 1, 1, 1, 1, 1, 1, /* R8 - R15 */ \
@@ -440,10 +525,11 @@ enum class aarch64_feature : unsigned char {
0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \
1, 1, 1, 1, 1, 1, 1, 1, /* V16 - V23 */ \
1, 1, 1, 1, 1, 1, 1, 1, /* V24 - V31 */ \
- 1, 1, 1, 1, /* SFP, AP, CC, VG */ \
+ 1, 1, 1, 0, /* SFP, AP, CC, VG */ \
1, 1, 1, 1, 1, 1, 1, 1, /* P0 - P7 */ \
1, 1, 1, 1, 1, 1, 1, 1, /* P8 - P15 */ \
- 1, 1 /* FFR and FFRT */ \
+ 1, 1, /* FFR and FFRT */ \
+ 0, 0, 0, 0, 0, 0, 0, 0 /* Fake registers */ \
}
#define REGISTER_NAMES \
@@ -459,7 +545,9 @@ enum class aarch64_feature : unsigned char {
"sfp", "ap", "cc", "vg", \
"p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", \
"p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", \
- "ffr", "ffrt" \
+ "ffr", "ffrt", \
+ "lowering", "tpidr2_block", "sme_state", "tpidr2_setup", \
+ "za_free", "za_saved", "za", "zt0" \
}
/* Generate the register aliases for core register N */
@@ -473,6 +561,8 @@ enum class aarch64_feature : unsigned char {
{"b" # N, V0_REGNUM + (N)}, \
{"z" # N, V0_REGNUM + (N)}
+#define P_ALIASES(N) {"pn" # N, P0_REGNUM + (N)}
+
/* Provide aliases for all of the ISA defined register name forms.
These aliases are convenient for use in the clobber lists of inline
asm statements. */
@@ -493,7 +583,11 @@ enum class aarch64_feature : unsigned char {
V_ALIASES(16), V_ALIASES(17), V_ALIASES(18), V_ALIASES(19), \
V_ALIASES(20), V_ALIASES(21), V_ALIASES(22), V_ALIASES(23), \
V_ALIASES(24), V_ALIASES(25), V_ALIASES(26), V_ALIASES(27), \
- V_ALIASES(28), V_ALIASES(29), V_ALIASES(30), V_ALIASES(31) \
+ V_ALIASES(28), V_ALIASES(29), V_ALIASES(30), V_ALIASES(31), \
+ P_ALIASES(0), P_ALIASES(1), P_ALIASES(2), P_ALIASES(3), \
+ P_ALIASES(4), P_ALIASES(5), P_ALIASES(6), P_ALIASES(7), \
+ P_ALIASES(8), P_ALIASES(9), P_ALIASES(10), P_ALIASES(11), \
+ P_ALIASES(12), P_ALIASES(13), P_ALIASES(14), P_ALIASES(15) \
}
#define EPILOGUE_USES(REGNO) (aarch64_epilogue_uses (REGNO))
@@ -508,7 +602,7 @@ enum class aarch64_feature : unsigned char {
#define FRAME_POINTER_REGNUM SFP_REGNUM
#define STACK_POINTER_REGNUM SP_REGNUM
#define ARG_POINTER_REGNUM AP_REGNUM
-#define FIRST_PSEUDO_REGISTER (FFRT_REGNUM + 1)
+#define FIRST_PSEUDO_REGISTER (LAST_FAKE_REGNUM + 1)
/* The number of argument registers available for each class. */
#define NUM_ARG_REGS 8
@@ -583,9 +677,12 @@ enum class aarch64_feature : unsigned char {
/* Output assembly strings after .cfi_startproc is emitted. */
#define ASM_POST_CFI_STARTPROC aarch64_post_cfi_startproc
-/* For EH returns X4 contains the stack adjustment. */
-#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, R4_REGNUM)
-#define EH_RETURN_HANDLER_RTX aarch64_eh_return_handler_rtx ()
+/* For EH returns X4 is a flag that is set in the EH return
+ code paths and then X5 and X6 contain the stack adjustment
+ and return address respectively. */
+#define EH_RETURN_TAKEN_RTX gen_rtx_REG (Pmode, R4_REGNUM)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, R5_REGNUM)
+#define EH_RETURN_HANDLER_RTX gen_rtx_REG (Pmode, R6_REGNUM)
#undef TARGET_COMPUTE_FRAME_LAYOUT
#define TARGET_COMPUTE_FRAME_LAYOUT aarch64_layout_frame
@@ -611,6 +708,12 @@ enum class aarch64_feature : unsigned char {
&& (REGNO) != R17_REGNUM \
&& (REGNO) != R30_REGNUM) \
+#define W8_W11_REGNUM_P(REGNO) \
+ IN_RANGE (REGNO, R8_REGNUM, R11_REGNUM)
+
+#define W12_W15_REGNUM_P(REGNO) \
+ IN_RANGE (REGNO, R12_REGNUM, R15_REGNUM)
+
#define FP_REGNUM_P(REGNO) \
(((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM))
@@ -628,12 +731,17 @@ enum class aarch64_feature : unsigned char {
#define FP_SIMD_SAVED_REGNUM_P(REGNO) \
(((unsigned) (REGNO - V8_REGNUM)) <= (V23_REGNUM - V8_REGNUM))
+
+#define FAKE_REGNUM_P(REGNO) \
+ IN_RANGE (REGNO, FIRST_FAKE_REGNUM, LAST_FAKE_REGNUM)
/* Register and constant classes. */
enum reg_class
{
NO_REGS,
+ W8_W11_REGS,
+ W12_W15_REGS,
TAILCALL_ADDR_REGS,
STUB_REGS,
GENERAL_REGS,
@@ -648,6 +756,7 @@ enum reg_class
PR_REGS,
FFR_REGS,
PR_AND_FFR_REGS,
+ FAKE_REGS,
ALL_REGS,
LIM_REG_CLASSES /* Last */
};
@@ -657,6 +766,8 @@ enum reg_class
#define REG_CLASS_NAMES \
{ \
"NO_REGS", \
+ "W8_W11_REGS", \
+ "W12_W15_REGS", \
"TAILCALL_ADDR_REGS", \
"STUB_REGS", \
"GENERAL_REGS", \
@@ -671,12 +782,15 @@ enum reg_class
"PR_REGS", \
"FFR_REGS", \
"PR_AND_FFR_REGS", \
+ "FAKE_REGS", \
"ALL_REGS" \
}
#define REG_CLASS_CONTENTS \
{ \
{ 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0x00000f00, 0x00000000, 0x00000000 }, /* W8_W11_REGS */ \
+ { 0x0000f000, 0x00000000, 0x00000000 }, /* W12_W15_REGS */ \
{ 0x00030000, 0x00000000, 0x00000000 }, /* TAILCALL_ADDR_REGS */\
{ 0x3ffcffff, 0x00000000, 0x00000000 }, /* STUB_REGS */ \
{ 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \
@@ -691,6 +805,7 @@ enum reg_class
{ 0x00000000, 0x00000000, 0x000ffff0 }, /* PR_REGS */ \
{ 0x00000000, 0x00000000, 0x00300000 }, /* FFR_REGS */ \
{ 0x00000000, 0x00000000, 0x003ffff0 }, /* PR_AND_FFR_REGS */ \
+ { 0x00000000, 0x00000000, 0x3fc00000 }, /* FAKE_REGS */ \
{ 0xffffffff, 0xffffffff, 0x000fffff } /* ALL_REGS */ \
}
@@ -776,6 +891,13 @@ struct GTY (()) aarch64_frame
vec<unsigned, va_gc_atomic> *saved_fprs;
vec<unsigned, va_gc_atomic> *saved_prs;
+ /* The offset from the base of the frame of a 64-bit slot whose low
+ bit contains the incoming value of PSTATE.SM. This slot must be
+ within reach of the hard frame pointer.
+
+ The offset is -1 if such a slot isn't needed. */
+ poly_int64 old_svcr_offset;
+
/* The number of extra stack bytes taken up by register varargs.
This area is allocated by the callee at the very top of the
frame. This value is rounded up to a multiple of
@@ -883,7 +1005,29 @@ typedef struct GTY (()) machine_function
bool reg_is_wrapped_separately[LAST_SAVED_REGNUM];
/* One entry for each general purpose register. */
rtx call_via[SP_REGNUM];
+
+ /* A pseudo register that points to the function's TPIDR2 block, or null
+ if the function doesn't have a TPIDR2 block. */
+ rtx tpidr2_block;
+
+ /* A pseudo register that points to the function's ZA save buffer,
+ or null if none. */
+ rtx za_save_buffer;
+
+ /* A stack slot that stores the contents of the function's ZT0 state. */
+ rtx zt0_save_buffer;
+
bool label_is_assembled;
+
+ /* True if we've expanded at least one call to a function that changes
+ PSTATE.SM. This should only be used for saving compile time: false
+ guarantees that no such mode switch exists. */
+ bool call_switches_pstate_sm;
+
+ /* Used to generated unique identifiers for each update to ZA by an
+ asm statement. */
+ unsigned int next_asm_update_za_id;
+
/* A set of all decls that have been passed to a vld1 intrinsic in the
current function. This is used to help guide the vector cost model. */
hash_set<tree> *vector_load_decls;
@@ -932,6 +1076,7 @@ enum arm_pcs
typedef struct
{
enum arm_pcs pcs_variant;
+ aarch64_feature_flags isa_mode;
int aapcs_arg_processed; /* No need to lay out this argument again. */
int aapcs_ncrn; /* Next Core register number. */
int aapcs_nextncrn; /* Next next core register number. */
@@ -951,6 +1096,17 @@ typedef struct
stack arg area so far. */
bool silent_p; /* True if we should act silently, rather than
raise an error for invalid calls. */
+
+ /* AARCH64_STATE_* flags that describe whether the function shares ZA
+ and ZT0 with its callers. */
+ unsigned int shared_za_flags;
+ unsigned int shared_zt0_flags;
+
+ /* A list of registers that need to be saved and restored around a
+ change to PSTATE.SM. An auto_vec would be more convenient, but those
+ can't be copied. */
+ unsigned int num_sme_mode_switch_args;
+ rtx sme_mode_switch_args[12];
} CUMULATIVE_ARGS;
#endif
@@ -1317,4 +1473,61 @@ extern poly_uint16 aarch64_sve_vg;
|| ((T) == US_TRUNCATE && (S) == LSHIFTRT) \
|| ((T) == SS_TRUNCATE && (S) == ASHIFTRT))
+#ifndef USED_FOR_TARGET
+
+/* Enumerates the mode-switching "entities" for AArch64. */
+enum class aarch64_mode_entity : int
+{
+ /* An aarch64_tristate_mode that says whether we have created a local
+ save buffer for the current function's ZA state. The only transition
+ is from NO to YES. */
+ HAVE_ZA_SAVE_BUFFER,
+
+ /* An aarch64_local_sme_state that reflects the state of all data
+ controlled by PSTATE.ZA. */
+ LOCAL_SME_STATE
+};
+
+/* Describes the state of all data controlled by PSTATE.ZA */
+enum class aarch64_local_sme_state : int
+{
+ /* ZA is in the off or dormant state. If it is dormant, the contents
+ of ZA belong to a caller. */
+ INACTIVE_CALLER,
+
+ /* ZA is in the off state: PSTATE.ZA is 0 and TPIDR2_EL0 is null. */
+ OFF,
+
+ /* ZA is in the off or dormant state. If it is dormant, the contents
+ of ZA belong to the current function. */
+ INACTIVE_LOCAL,
+
+ /* ZA is in the off state and the current function's ZA contents are
+ stored in the lazy save buffer. This is the state on entry to
+ exception handlers. */
+ SAVED_LOCAL,
+
+ /* ZA is in the active state: PSTATE.ZA is 1 and TPIDR2_EL0 is null.
+ The contents of ZA are live. */
+ ACTIVE_LIVE,
+
+ /* ZA is in the active state: PSTATE.ZA is 1 and TPIDR2_EL0 is null.
+ The contents of ZA are dead. */
+ ACTIVE_DEAD,
+
+ /* ZA could be in multiple states. */
+ ANY
+};
+
+enum class aarch64_tristate_mode : int { NO, YES, MAYBE };
+
+#define OPTIMIZE_MODE_SWITCHING(ENTITY) \
+ aarch64_optimize_mode_switching (aarch64_mode_entity (ENTITY))
+
+#define NUM_MODES_FOR_MODE_SWITCHING \
+ { int (aarch64_tristate_mode::MAYBE), \
+ int (aarch64_local_sme_state::ANY) }
+
+#endif
+
#endif /* GCC_AARCH64_H */
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 7be1de3..a6d5e8c 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -111,6 +111,59 @@
;; "FFR token": a fake register used for representing the scheduling
;; restrictions on FFR-related operations.
(FFRT_REGNUM 85)
+
+ ;; ----------------------------------------------------------------
+ ;; Fake registers
+ ;; ----------------------------------------------------------------
+ ;; These registers represent abstract things, rather than real
+ ;; architected registers.
+
+ ;; Sometimes we use placeholder instructions to mark where later
+ ;; ABI-related lowering is needed. These placeholders read and
+ ;; write this register. Instructions that depend on the lowering
+ ;; read the register.
+ (LOWERING_REGNUM 86)
+
+ ;; Represents the contents of the current function's TPIDR2 block,
+ ;; in abstract form.
+ (TPIDR2_BLOCK_REGNUM 87)
+
+ ;; Holds the value that the current function wants PSTATE.ZA to be.
+ ;; The actual value can sometimes vary, because it does not track
+ ;; changes to PSTATE.ZA that happen during a lazy save and restore.
+ ;; Those effects are instead tracked by ZA_SAVED_REGNUM.
+ (SME_STATE_REGNUM 88)
+
+ ;; Instructions write to this register if they set TPIDR2_EL0 to a
+ ;; well-defined value. Instructions read from the register if they
+ ;; depend on the result of such writes.
+ ;;
+ ;; The register does not model the architected TPIDR2_ELO, just the
+ ;; current function's management of it.
+ (TPIDR2_SETUP_REGNUM 89)
+
+ ;; Represents the property "has an incoming lazy save been committed?".
+ (ZA_FREE_REGNUM 90)
+
+ ;; Represents the property "are the current function's ZA contents
+ ;; stored in the lazy save buffer, rather than in ZA itself?".
+ (ZA_SAVED_REGNUM 91)
+
+ ;; Represents the contents of the current function's ZA state in
+ ;; abstract form. At various times in the function, these contents
+ ;; might be stored in ZA itself, or in the function's lazy save buffer.
+ ;;
+ ;; The contents persist even when the architected ZA is off. Private-ZA
+ ;; functions have no effect on its contents.
+ (ZA_REGNUM 92)
+
+ ;; Similarly represents the contents of the current function's ZT0 state.
+ (ZT0_REGNUM 93)
+
+ (FIRST_FAKE_REGNUM LOWERING_REGNUM)
+ (LAST_FAKE_REGNUM ZT0_REGNUM)
+ ;; ----------------------------------------------------------------
+
;; The pair of scratch registers used for stack probing with -fstack-check.
;; Leave R9 alone as a possible choice for the static chain.
;; Note that the use of these registers is mutually exclusive with the use
@@ -237,9 +290,13 @@
UNSPEC_NZCV
UNSPEC_XPACLRI
UNSPEC_LD1_SVE
+ UNSPEC_LD1_SVE_COUNT
UNSPEC_ST1_SVE
+ UNSPEC_ST1_SVE_COUNT
UNSPEC_LDNT1_SVE
+ UNSPEC_LDNT1_SVE_COUNT
UNSPEC_STNT1_SVE
+ UNSPEC_STNT1_SVE_COUNT
UNSPEC_LD1RQ
UNSPEC_LD1_GATHER
UNSPEC_LDFF1_GATHER
@@ -281,6 +338,8 @@
UNSPEC_UPDATE_FFRT
UNSPEC_RDFFR
UNSPEC_WRFFR
+ UNSPEC_SYSREG_RDI
+ UNSPEC_SYSREG_WDI
;; Represents an SVE-style lane index, in which the indexing applies
;; within the containing 128-bit block.
UNSPEC_SVE_LANE_SELECT
@@ -294,7 +353,12 @@
UNSPEC_TAG_SPACE ; Translate address to MTE tag address space.
UNSPEC_LD1RO
UNSPEC_SALT_ADDR
+ UNSPEC_SAVE_NZCV
+ UNSPEC_RESTORE_NZCV
UNSPECV_PATCHABLE_AREA
+ ;; Wraps a constant integer that should be multiplied by the number
+ ;; of quadwords in an SME vector.
+ UNSPEC_SME_VQ
])
(define_c_enum "unspecv" [
@@ -366,7 +430,8 @@
;; As a convenience, "fp_q" means "fp" + the ability to move between
;; Q registers and is equivalent to "simd".
-(define_enum "arches" [ any rcpc8_4 fp fp_q simd nosimd sve fp16])
+(define_enum "arches" [any rcpc8_4 fp fp_q base_simd nobase_simd
+ simd nosimd sve fp16 sme])
(define_enum_attr "arch" "arches" (const_string "any"))
@@ -394,6 +459,12 @@
(and (eq_attr "arch" "fp")
(match_test "TARGET_FLOAT"))
+ (and (eq_attr "arch" "base_simd")
+ (match_test "TARGET_BASE_SIMD"))
+
+ (and (eq_attr "arch" "nobase_simd")
+ (match_test "!TARGET_BASE_SIMD"))
+
(and (eq_attr "arch" "fp_q, simd")
(match_test "TARGET_SIMD"))
@@ -404,7 +475,10 @@
(match_test "TARGET_FP_F16INST"))
(and (eq_attr "arch" "sve")
- (match_test "TARGET_SVE")))
+ (match_test "TARGET_SVE"))
+
+ (and (eq_attr "arch" "sme")
+ (match_test "TARGET_SME")))
(const_string "yes")
(const_string "no")))
@@ -476,6 +550,22 @@
;; Jumps and other miscellaneous insns
;; -------------------------------------------------------------------
+(define_insn "aarch64_read_sysregdi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI [(match_operand 1 "aarch64_sysreg_string" "")]
+ UNSPEC_SYSREG_RDI))]
+ ""
+ "mrs\t%x0, %1"
+)
+
+(define_insn "aarch64_write_sysregdi"
+ [(unspec_volatile:DI [(match_operand 0 "aarch64_sysreg_string" "")
+ (match_operand:DI 1 "register_operand" "rZ")]
+ UNSPEC_SYSREG_WDI)]
+ ""
+ "msr\t%0, %x1"
+)
+
(define_insn "indirect_jump"
[(set (pc) (match_operand:DI 0 "register_operand" "r"))]
""
@@ -863,16 +953,7 @@
[(clobber (const_int 0))]
""
"
- aarch64_expand_epilogue (false);
- DONE;
- "
-)
-
-(define_expand "sibcall_epilogue"
- [(clobber (const_int 0))]
- ""
- "
- aarch64_expand_epilogue (true);
+ aarch64_expand_epilogue (nullptr);
DONE;
"
)
@@ -916,7 +997,7 @@
(set_attr "sls_length" "retbr")]
)
-(define_insn "*cb<optab><mode>1"
+(define_insn "aarch64_cb<optab><mode>1"
[(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r")
(const_int 0))
(label_ref (match_operand 1 "" ""))
@@ -958,7 +1039,7 @@
operands[1]);
})
-(define_insn "*tb<optab><ALLI:mode><GPI:mode>1"
+(define_insn "@aarch64_tb<optab><ALLI:mode><GPI:mode>"
[(set (pc) (if_then_else
(EQL (zero_extract:GPI (match_operand:ALLI 0 "register_operand" "r")
(const_int 1)
@@ -1045,7 +1126,7 @@
[(parallel
[(call (match_operand 0 "memory_operand")
(match_operand 1 "general_operand"))
- (unspec:DI [(match_operand 2 "const_int_operand")] UNSPEC_CALLEE_ABI)
+ (unspec:DI [(match_operand 2)] UNSPEC_CALLEE_ABI)
(clobber (reg:DI LR_REGNUM))])]
""
"
@@ -1072,7 +1153,7 @@
[(set (match_operand 0 "")
(call (match_operand 1 "memory_operand")
(match_operand 2 "general_operand")))
- (unspec:DI [(match_operand 3 "const_int_operand")] UNSPEC_CALLEE_ABI)
+ (unspec:DI [(match_operand 3)] UNSPEC_CALLEE_ABI)
(clobber (reg:DI LR_REGNUM))])]
""
"
@@ -1099,7 +1180,7 @@
[(parallel
[(call (match_operand 0 "memory_operand")
(match_operand 1 "general_operand"))
- (unspec:DI [(match_operand 2 "const_int_operand")] UNSPEC_CALLEE_ABI)
+ (unspec:DI [(match_operand 2)] UNSPEC_CALLEE_ABI)
(return)])]
""
{
@@ -1113,7 +1194,7 @@
[(set (match_operand 0 "")
(call (match_operand 1 "memory_operand")
(match_operand 2 "general_operand")))
- (unspec:DI [(match_operand 3 "const_int_operand")] UNSPEC_CALLEE_ABI)
+ (unspec:DI [(match_operand 3)] UNSPEC_CALLEE_ABI)
(return)])]
""
{
@@ -1233,22 +1314,23 @@
"(register_operand (operands[0], <MODE>mode)
|| aarch64_reg_or_zero (operands[1], <MODE>mode))"
{@ [cons: =0, 1; attrs: type, arch]
- [w, Z ; neon_move , simd ] movi\t%0.<Vbtype>, #0
- [r, r ; mov_reg , * ] mov\t%w0, %w1
- [r, M ; mov_imm , * ] mov\t%w0, %1
- [w, D<hq>; neon_move , simd ] << aarch64_output_scalar_simd_mov_immediate (operands[1], <MODE>mode);
+ [w, Z ; neon_move , simd ] movi\t%0.<Vbtype>, #0
+ [r, r ; mov_reg , * ] mov\t%w0, %w1
+ [r, M ; mov_imm , * ] mov\t%w0, %1
+ [w, D<hq>; neon_move , simd ] << aarch64_output_scalar_simd_mov_immediate (operands[1], <MODE>mode);
/* The "mov_imm" type for CNT is just a placeholder. */
- [r, Usv ; mov_imm , sve ] << aarch64_output_sve_cnt_immediate ("cnt", "%x0", operands[1]);
- [r, m ; load_4 , * ] ldr<size>\t%w0, %1
- [w, m ; load_4 , * ] ldr\t%<size>0, %1
- [m, r Z ; store_4 , * ] str<size>\\t%w1, %0
- [m, w ; store_4 , * ] str\t%<size>1, %0
- [r, w ; neon_to_gp<q> , simd ] umov\t%w0, %1.<v>[0]
- [r, w ; neon_to_gp<q> , nosimd] fmov\t%w0, %s1
- [w, r Z ; neon_from_gp<q>, simd ] dup\t%0.<Vallxd>, %w1
- [w, r Z ; neon_from_gp<q>, nosimd] fmov\t%s0, %w1
- [w, w ; neon_dup , simd ] dup\t%<Vetype>0, %1.<v>[0]
- [w, w ; neon_dup , nosimd] fmov\t%s0, %s1
+ [r, Usv ; mov_imm , sve ] << aarch64_output_sve_cnt_immediate ("cnt", "%x0", operands[1]);
+ [r, Usr ; mov_imm , sve ] << aarch64_output_sve_rdvl (operands[1]);
+ [r, m ; load_4 , * ] ldr<size>\t%w0, %1
+ [w, m ; load_4 , * ] ldr\t%<size>0, %1
+ [m, r Z ; store_4 , * ] str<size>\\t%w1, %0
+ [m, w ; store_4 , * ] str\t%<size>1, %0
+ [r, w ; neon_to_gp<q> , base_simd ] umov\t%w0, %1.<v>[0]
+ [r, w ; neon_to_gp<q> , nobase_simd] fmov\t%w0, %s1
+ [w, r Z ; neon_from_gp<q>, simd ] dup\t%0.<Vallxd>, %w1
+ [w, r Z ; neon_from_gp<q>, nosimd ] fmov\t%s0, %w1
+ [w, w ; neon_dup , simd ] dup\t%<Vetype>0, %1.<v>[0]
+ [w, w ; neon_dup , nosimd ] fmov\t%s0, %s1
}
)
@@ -1298,6 +1380,8 @@
[r , n ; mov_imm , * ,16] #
/* The "mov_imm" type for CNT is just a placeholder. */
[r , Usv; mov_imm , sve , 4] << aarch64_output_sve_cnt_immediate ("cnt", "%x0", operands[1]);
+ [r , Usr; mov_imm , sve, 4] << aarch64_output_sve_rdvl (operands[1]);
+ [r , UsR; mov_imm , sme, 4] << aarch64_output_rdsvl (operands[1]);
[r , m ; load_4 , * , 4] ldr\t%w0, %1
[w , m ; load_4 , fp , 4] ldr\t%s0, %1
[m , r Z; store_4 , * , 4] str\t%w1, %0
@@ -1333,6 +1417,8 @@
[r, n ; mov_imm , * ,16] #
/* The "mov_imm" type for CNT is just a placeholder. */
[r, Usv; mov_imm , sve , 4] << aarch64_output_sve_cnt_immediate ("cnt", "%x0", operands[1]);
+ [r, Usr; mov_imm , sve, 4] << aarch64_output_sve_rdvl (operands[1]);
+ [r, UsR; mov_imm , sme, 4] << aarch64_output_rdsvl (operands[1]);
[r, m ; load_8 , * , 4] ldr\t%x0, %1
[w, m ; load_8 , fp , 4] ldr\t%d0, %1
[m, r Z; store_8 , * , 4] str\t%x1, %0
@@ -1411,9 +1497,9 @@
(define_insn "*movti_aarch64"
[(set (match_operand:TI 0
- "nonimmediate_operand" "= r,w,w,w, r,w,r,m,m,w,m")
+ "nonimmediate_operand" "= r,w,w,w, r,w,w,r,m,m,w,m")
(match_operand:TI 1
- "aarch64_movti_operand" " rUti,Z,Z,r, w,w,m,r,Z,m,w"))]
+ "aarch64_movti_operand" " rUti,Z,Z,r, w,w,w,m,r,Z,m,w"))]
"(register_operand (operands[0], TImode)
|| aarch64_reg_or_zero (operands[1], TImode))"
"@
@@ -1423,16 +1509,17 @@
#
#
mov\\t%0.16b, %1.16b
+ mov\\t%Z0.d, %Z1.d
ldp\\t%0, %H0, %1
stp\\t%1, %H1, %0
stp\\txzr, xzr, %0
ldr\\t%q0, %1
str\\t%q1, %0"
- [(set_attr "type" "multiple,neon_move,f_mcr,f_mcr,f_mrc,neon_logic_q, \
+ [(set_attr "type" "multiple,neon_move,f_mcr,f_mcr,f_mrc,neon_logic_q,*,\
load_16,store_16,store_16,\
load_16,store_16")
- (set_attr "length" "8,4,4,8,8,4,4,4,4,4,4")
- (set_attr "arch" "*,simd,*,*,*,simd,*,*,*,fp,fp")]
+ (set_attr "length" "8,4,4,8,8,4,4,4,4,4,4,4")
+ (set_attr "arch" "*,simd,*,*,*,simd,sve,*,*,*,fp,fp")]
)
;; Split a TImode register-register or register-immediate move into
@@ -1559,13 +1646,14 @@
(define_insn "*mov<mode>_aarch64"
[(set (match_operand:TFD 0
- "nonimmediate_operand" "=w,?r ,w ,?r,w,?w,w,m,?r,m ,m")
+ "nonimmediate_operand" "=w,w,?r ,w ,?r,w,?w,w,m,?r,m ,m")
(match_operand:TFD 1
- "general_operand" " w,?rY,?r,w ,Y,Y ,m,w,m ,?r,Y"))]
+ "general_operand" " w,w,?rY,?r,w ,Y,Y ,m,w,m ,?r,Y"))]
"TARGET_FLOAT && (register_operand (operands[0], <MODE>mode)
|| aarch64_reg_or_fp_zero (operands[1], <MODE>mode))"
"@
mov\\t%0.16b, %1.16b
+ mov\\t%Z0.d, %Z1.d
#
#
#
@@ -1576,10 +1664,10 @@
ldp\\t%0, %H0, %1
stp\\t%1, %H1, %0
stp\\txzr, xzr, %0"
- [(set_attr "type" "logic_reg,multiple,f_mcr,f_mrc,neon_move_q,f_mcr,\
+ [(set_attr "type" "logic_reg,*,multiple,f_mcr,f_mrc,neon_move_q,f_mcr,\
f_loadd,f_stored,load_16,store_16,store_16")
- (set_attr "length" "4,8,8,8,4,4,4,4,4,4,4")
- (set_attr "arch" "simd,*,*,*,simd,*,*,*,*,*,*")]
+ (set_attr "length" "4,4,8,8,8,4,4,4,4,4,4,4")
+ (set_attr "arch" "simd,sve,*,*,*,simd,*,*,*,*,*,*")]
)
(define_split
@@ -1630,7 +1718,7 @@
(match_operand:BLK 1 "memory_operand")
(match_operand:DI 2 "general_operand")
(match_operand:DI 3 "immediate_operand")]
- "!STRICT_ALIGNMENT || TARGET_MOPS"
+ ""
{
if (aarch64_expand_cpymem (operands))
DONE;
@@ -1727,7 +1815,7 @@
(match_operand:QI 2 "nonmemory_operand")) ;; Value
(use (match_operand:DI 1 "general_operand")) ;; Length
(match_operand 3 "immediate_operand")] ;; Align
- "TARGET_SIMD || TARGET_MOPS"
+ ""
{
if (aarch64_expand_setmem (operands))
DONE;
@@ -1773,7 +1861,7 @@
(match_operand:TX 1 "aarch64_mem_pair_operand" "Ump"))
(set (match_operand:TX2 2 "register_operand" "=w")
(match_operand:TX2 3 "memory_operand" "m"))]
- "TARGET_SIMD
+ "TARGET_BASE_SIMD
&& rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
@@ -1821,11 +1909,11 @@
(match_operand:TX 1 "register_operand" "w"))
(set (match_operand:TX2 2 "memory_operand" "=m")
(match_operand:TX2 3 "register_operand" "w"))]
- "TARGET_SIMD &&
- rtx_equal_p (XEXP (operands[2], 0),
- plus_constant (Pmode,
- XEXP (operands[0], 0),
- GET_MODE_SIZE (TFmode)))"
+ "TARGET_BASE_SIMD
+ && rtx_equal_p (XEXP (operands[2], 0),
+ plus_constant (Pmode,
+ XEXP (operands[0], 0),
+ GET_MODE_SIZE (TFmode)))"
"stp\\t%q1, %q3, %z0"
[(set_attr "type" "neon_stp_q")
(set_attr "fp" "yes")]
@@ -1873,7 +1961,7 @@
(set (match_operand:TX 3 "register_operand" "=w")
(mem:TX (plus:P (match_dup 1)
(match_operand:P 5 "const_int_operand" "n"))))])]
- "TARGET_SIMD && INTVAL (operands[5]) == GET_MODE_SIZE (<TX:MODE>mode)"
+ "TARGET_BASE_SIMD && INTVAL (operands[5]) == GET_MODE_SIZE (<TX:MODE>mode)"
"ldp\\t%q2, %q3, [%1], %4"
[(set_attr "type" "neon_ldp_q")]
)
@@ -1923,7 +2011,7 @@
(set (mem:TX (plus:P (match_dup 0)
(match_operand:P 5 "const_int_operand" "n")))
(match_operand:TX 3 "register_operand" "w"))])]
- "TARGET_SIMD
+ "TARGET_BASE_SIMD
&& INTVAL (operands[5])
== INTVAL (operands[4]) + GET_MODE_SIZE (<TX:MODE>mode)"
"stp\\t%q2, %q3, [%0, %4]!"
@@ -2103,6 +2191,7 @@
[ r , rk , Uaa ; multiple , * ] #
[ r , 0 , Uai ; alu_imm , sve ] << aarch64_output_sve_scalar_inc_dec (operands[2]);
[ rk , rk , Uav ; alu_imm , sve ] << aarch64_output_sve_addvl_addpl (operands[2]);
+ [ rk , rk , UaV ; alu_imm , sme ] << aarch64_output_addsvl_addspl (operands[2]);
}
;; The "alu_imm" types for INC/DEC and ADDVL/ADDPL are just placeholders.
)
@@ -7341,7 +7430,8 @@
{
if (TARGET_SVE)
{
- rtx abi = gen_int_mode (aarch64_tlsdesc_abi_id (), DImode);
+ rtx abi = aarch64_gen_callee_cookie (AARCH64_ISA_MODE,
+ aarch64_tlsdesc_abi_id ());
rtx_insn *call
= emit_call_insn (gen_tlsdesc_small_sve_<mode> (operands[0], abi));
RTL_CONST_CALL_P (call) = 1;
@@ -8030,6 +8120,21 @@
[(set (attr "length") (symbol_ref "INTVAL (operands[0])"))]
)
+(define_insn "aarch64_save_nzcv"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(reg:CC CC_REGNUM)] UNSPEC_SAVE_NZCV))]
+ ""
+ "mrs\t%0, nzcv"
+)
+
+(define_insn "aarch64_restore_nzcv"
+ [(set (reg:CC CC_REGNUM)
+ (unspec:CC [(match_operand:DI 0 "register_operand" "r")]
+ UNSPEC_RESTORE_NZCV))]
+ ""
+ "msr\tnzcv, %0"
+)
+
;; AdvSIMD Stuff
(include "aarch64-simd.md")
@@ -8044,3 +8149,6 @@
;; SVE2.
(include "aarch64-sve2.md")
+
+;; SME and extensions
+(include "aarch64-sme.md")
diff --git a/gcc/config/aarch64/arm_acle.h b/gcc/config/aarch64/arm_acle.h
index 7599a32..71ada87 100644
--- a/gcc/config/aarch64/arm_acle.h
+++ b/gcc/config/aarch64/arm_acle.h
@@ -314,6 +314,36 @@ __rndrrs (uint64_t *__res)
#pragma GCC pop_options
+#define __arm_rsr(__regname) \
+ __builtin_aarch64_rsr (__regname)
+
+#define __arm_rsrp(__regname) \
+ __builtin_aarch64_rsrp (__regname)
+
+#define __arm_rsr64(__regname) \
+ __builtin_aarch64_rsr64 (__regname)
+
+#define __arm_rsrf(__regname) \
+ __builtin_aarch64_rsrf (__regname)
+
+#define __arm_rsrf64(__regname) \
+ __builtin_aarch64_rsrf64 (__regname)
+
+#define __arm_wsr(__regname, __value) \
+ __builtin_aarch64_wsr (__regname, __value)
+
+#define __arm_wsrp(__regname, __value) \
+ __builtin_aarch64_wsrp (__regname, __value)
+
+#define __arm_wsr64(__regname, __value) \
+ __builtin_aarch64_wsr64 (__regname, __value)
+
+#define __arm_wsrf(__regname, __value) \
+ __builtin_aarch64_wsrf (__regname, __value)
+
+#define __arm_wsrf64(__regname, __value) \
+ __builtin_aarch64_wsrf64 (__regname, __value)
+
#ifdef __cplusplus
}
#endif
diff --git a/gcc/config/aarch64/arm_sme.h b/gcc/config/aarch64/arm_sme.h
new file mode 100644
index 0000000..5ddd49f
--- /dev/null
+++ b/gcc/config/aarch64/arm_sme.h
@@ -0,0 +1,45 @@
+/* AArch64 SME intrinsics include file.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _ARM_SME_H_
+#define _ARM_SME_H_
+
+#include <arm_sve.h>
+#pragma GCC aarch64 "arm_sme.h"
+
+void __arm_za_disable(void) __arm_streaming_compatible;
+
+void *__arm_sc_memcpy(void *, const void *, __SIZE_TYPE__)
+ __arm_streaming_compatible;
+
+void *__arm_sc_memmove(void *, const void *, __SIZE_TYPE__)
+ __arm_streaming_compatible;
+
+void *__arm_sc_memset(void *, int, __SIZE_TYPE__)
+ __arm_streaming_compatible;
+
+void *__arm_sc_memchr(void *, int, __SIZE_TYPE__)
+ __arm_streaming_compatible;
+
+#endif
diff --git a/gcc/config/aarch64/atomics.md b/gcc/config/aarch64/atomics.md
index 055a873..3ca7f23 100644
--- a/gcc/config/aarch64/atomics.md
+++ b/gcc/config/aarch64/atomics.md
@@ -39,7 +39,7 @@
(define_mode_attr cas_short_expected_pred
[(QI "aarch64_reg_or_imm") (HI "aarch64_plushi_operand")])
(define_mode_attr cas_short_expected_imm
- [(QI "n") (HI "Uph")])
+ [(QI "n") (HI "Uih")])
(define_insn_and_split "@aarch64_compare_and_swap<mode>"
[(set (reg:CC CC_REGNUM) ;; bool out
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
index b3922bc..8b65cab 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -21,6 +21,12 @@
(define_register_constraint "k" "STACK_REG"
"@internal The stack register.")
+(define_register_constraint "Uci" "W8_W11_REGS"
+ "@internal r8-r11, which can be used to index ZA.")
+
+(define_register_constraint "Ucj" "W12_W15_REGS"
+ "@internal r12-r15, which can be used to index ZA.")
+
(define_register_constraint "Ucs" "TAILCALL_ADDR_REGS"
"@internal Registers suitable for an indirect tail call")
@@ -36,17 +42,32 @@
(define_register_constraint "w" "FP_REGS"
"Floating point and SIMD vector registers.")
+(define_register_constraint "x" "FP_LO_REGS"
+ "Floating point and SIMD vector registers V0 - V15.")
+
+(define_register_constraint "y" "FP_LO8_REGS"
+ "Floating point and SIMD vector registers V0 - V7.")
+
+(define_register_constraint "Uw2" "FP_REGS"
+ "Even floating point and SIMD vector registers."
+ "regno % 2 == 0")
+
+(define_register_constraint "Uw4" "FP_REGS"
+ "4-tuple-aligned floating point and SIMD vector registers."
+ "regno % 4 == 0")
+
(define_register_constraint "Upa" "PR_REGS"
"SVE predicate registers p0 - p15.")
+(define_register_constraint "Up2" "PR_REGS"
+ "An even SVE predicate register, p0 - p14."
+ "regno % 2 == 0")
+
(define_register_constraint "Upl" "PR_LO_REGS"
"SVE predicate registers p0 - p7.")
-(define_register_constraint "x" "FP_LO_REGS"
- "Floating point and SIMD vector registers V0 - V15.")
-
-(define_register_constraint "y" "FP_LO8_REGS"
- "Floating point and SIMD vector registers V0 - V7.")
+(define_register_constraint "Uph" "PR_HI_REGS"
+ "SVE predicate registers p8 - p15.")
(define_constraint "c"
"@internal The condition code register."
@@ -74,6 +95,12 @@
a single ADDVL or ADDPL."
(match_operand 0 "aarch64_sve_addvl_addpl_immediate"))
+(define_constraint "UaV"
+ "@internal
+ A constraint that matches a VG-based constant that can be added by
+ a single ADDSVL or ADDSPL."
+ (match_operand 0 "aarch64_addsvl_addspl_immediate"))
+
(define_constraint "Uat"
"@internal
A constraint that matches a VG-based constant that can be added by
@@ -219,6 +246,18 @@
(and (match_code "const_int")
(match_test "aarch64_high_bits_all_ones_p (ival)")))
+(define_constraint "Usr"
+ "@internal
+ A constraint that matches a value produced by RDVL."
+ (and (match_code "const_poly_int")
+ (match_test "aarch64_sve_rdvl_immediate_p (op)")))
+
+(define_constraint "UsR"
+ "@internal
+ A constraint that matches a value produced by RDSVL."
+ (and (match_code "const")
+ (match_test "aarch64_rdsvl_immediate_p (op)")))
+
(define_constraint "Usv"
"@internal
A constraint that matches a VG-based constant that can be loaded by
@@ -260,7 +299,7 @@
(and (match_code "const_int")
(match_test "(unsigned) exact_log2 (ival) <= 4")))
-(define_constraint "Uph"
+(define_constraint "Uih"
"@internal
A constraint that matches HImode integers zero extendable to
SImode plus_operand."
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index a920de9..f204850 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -423,20 +423,16 @@
;; Iterators for single modes, for "@" patterns.
(define_mode_iterator VNx16QI_ONLY [VNx16QI])
+(define_mode_iterator VNx16SI_ONLY [VNx16SI])
(define_mode_iterator VNx8HI_ONLY [VNx8HI])
(define_mode_iterator VNx8BF_ONLY [VNx8BF])
+(define_mode_iterator VNx8SI_ONLY [VNx8SI])
+(define_mode_iterator VNx8DI_ONLY [VNx8DI])
(define_mode_iterator VNx4SI_ONLY [VNx4SI])
(define_mode_iterator VNx4SF_ONLY [VNx4SF])
(define_mode_iterator VNx2DI_ONLY [VNx2DI])
(define_mode_iterator VNx2DF_ONLY [VNx2DF])
-
-;; All SVE vector structure modes.
-(define_mode_iterator SVE_STRUCT [VNx32QI VNx16HI VNx8SI VNx4DI
- VNx16BF VNx16HF VNx8SF VNx4DF
- VNx48QI VNx24HI VNx12SI VNx6DI
- VNx24BF VNx24HF VNx12SF VNx6DF
- VNx64QI VNx32HI VNx16SI VNx8DI
- VNx32BF VNx32HF VNx16SF VNx8DF])
+(define_mode_iterator VNx1TI_ONLY [VNx1TI])
;; All fully-packed SVE vector modes.
(define_mode_iterator SVE_FULL [VNx16QI VNx8HI VNx4SI VNx2DI
@@ -455,6 +451,12 @@
;; elements.
(define_mode_iterator SVE_FULL_BHSI [VNx16QI VNx8HI VNx4SI])
+;; Pairs of the above.
+(define_mode_iterator SVE_FULL_BHSIx2 [VNx32QI VNx16HI VNx8SI])
+
+;; Fully-packed SVE vector modes that have 16-bit float elements.
+(define_mode_iterator SVE_FULL_HF [VNx8BF VNx8HF])
+
;; Fully-packed SVE vector modes that have 16-bit, 32-bit or 64-bit elements.
(define_mode_iterator SVE_FULL_HSD [VNx8HI VNx4SI VNx2DI
VNx8BF VNx8HF VNx4SF VNx2DF])
@@ -480,6 +482,9 @@
;; Fully-packed SVE integer vector modes that have 32-bit or 64-bit elements.
(define_mode_iterator SVE_FULL_SDI [VNx4SI VNx2DI])
+;; 2x and 4x tuples of the above, excluding 2x DI.
+(define_mode_iterator SVE_FULL_SIx2_SDIx4 [VNx8SI VNx16SI VNx8DI])
+
;; Fully-packed SVE floating-point vector modes that have 32-bit or 64-bit
;; elements.
(define_mode_iterator SVE_FULL_SDF [VNx4SF VNx2DF])
@@ -488,6 +493,10 @@
(define_mode_iterator SVE_MATMULF [(VNx4SF "TARGET_SVE_F32MM")
(VNx2DF "TARGET_SVE_F64MM")])
+;; Fully-packed SVE vector modes that have 32-bit or smaller elements.
+(define_mode_iterator SVE_FULL_BHS [VNx16QI VNx8HI VNx4SI
+ VNx8BF VNx8HF VNx4SF])
+
;; Fully-packed SVE vector modes that have 32-bit elements.
(define_mode_iterator SVE_FULL_S [VNx4SI VNx4SF])
@@ -509,6 +518,26 @@
VNx2DI
VNx2DF])
+;; All SVE 2-vector modes.
+(define_mode_iterator SVE_FULLx2 [VNx32QI VNx16HI VNx8SI VNx4DI
+ VNx16BF VNx16HF VNx8SF VNx4DF])
+
+;; All SVE 3-vector modes.
+(define_mode_iterator SVE_FULLx3 [VNx48QI VNx24HI VNx12SI VNx6DI
+ VNx24BF VNx24HF VNx12SF VNx6DF])
+
+;; All SVE 4-vector modes.
+(define_mode_iterator SVE_FULLx4 [VNx64QI VNx32HI VNx16SI VNx8DI
+ VNx32BF VNx32HF VNx16SF VNx8DF])
+
+(define_mode_iterator SVE_FULLx24 [SVE_FULLx2 SVE_FULLx4])
+
+;; All SVE vector structure modes.
+(define_mode_iterator SVE_STRUCT [SVE_FULLx2 SVE_FULLx3 SVE_FULLx4])
+
+;; All SVE vector and structure modes.
+(define_mode_iterator SVE_ALL_STRUCT [SVE_ALL SVE_STRUCT])
+
;; All SVE integer vector modes.
(define_mode_iterator SVE_I [VNx16QI VNx8QI VNx4QI VNx2QI
VNx8HI VNx4HI VNx2HI
@@ -520,6 +549,8 @@
VNx4SI VNx2SI
VNx2DI])
+(define_mode_iterator SVE_DIx24 [VNx4DI VNx8DI])
+
;; SVE modes with 2 or 4 elements.
(define_mode_iterator SVE_24 [VNx2QI VNx2HI VNx2HF VNx2BF VNx2SI VNx2SF
VNx2DI VNx2DF
@@ -577,6 +608,52 @@
;; Bfloat16 modes to which V4SF can be converted
(define_mode_iterator V4SF_TO_BF [V4BF V8BF])
+(define_mode_iterator SVE_BHSx24 [VNx32QI VNx16HI VNx8SI
+ VNx16BF VNx16HF VNx8SF
+ VNx64QI VNx32HI VNx16SI
+ VNx32BF VNx32HF VNx16SF])
+
+(define_mode_iterator SVE_Ix24 [VNx32QI VNx16HI VNx8SI VNx4DI
+ VNx64QI VNx32HI VNx16SI VNx8DI])
+
+(define_mode_iterator SVE_Fx24 [VNx16HF VNx8SF VNx4DF
+ VNx32HF VNx16SF VNx8DF])
+
+(define_mode_iterator SVE_SFx24 [VNx8SF VNx16SF])
+
+;; The modes used to represent different ZA access sizes.
+(define_mode_iterator SME_ZA_I [VNx16QI VNx8HI VNx4SI VNx2DI VNx1TI])
+(define_mode_iterator SME_ZA_SDI [VNx4SI (VNx2DI "TARGET_SME_I16I64")])
+
+(define_mode_iterator SME_ZA_SDF_I [VNx4SI (VNx2DI "TARGET_SME_F64F64")])
+
+(define_mode_iterator SME_ZA_BIx24 [VNx32QI VNx64QI])
+
+(define_mode_iterator SME_ZA_BHIx124 [VNx16QI VNx32QI VNx64QI
+ VNx8HI VNx16HI VNx32HI])
+
+(define_mode_iterator SME_ZA_BHIx24 [VNx32QI VNx64QI VNx16HI VNx32HI])
+
+(define_mode_iterator SME_ZA_HFx124 [VNx8BF VNx16BF VNx32BF
+ VNx8HF VNx16HF VNx32HF])
+
+(define_mode_iterator SME_ZA_HFx24 [VNx16BF VNx32BF VNx16HF VNx32HF])
+
+(define_mode_iterator SME_ZA_HIx124 [VNx8HI VNx16HI VNx32HI])
+
+(define_mode_iterator SME_ZA_HIx24 [VNx16HI VNx32HI])
+
+(define_mode_iterator SME_ZA_SDIx24 [VNx8SI (VNx4DI "TARGET_SME_I16I64")
+ VNx16SI (VNx8DI "TARGET_SME_I16I64")])
+
+(define_mode_iterator SME_ZA_SDFx24 [VNx8SF (VNx4DF "TARGET_SME_F64F64")
+ VNx16SF (VNx8DF "TARGET_SME_F64F64")])
+
+;; The modes for which outer product instructions are supported.
+(define_mode_iterator SME_MOP_BHI [VNx16QI (VNx8HI "TARGET_SME_I16I64")])
+(define_mode_iterator SME_MOP_HSDF [VNx8BF VNx8HF VNx4SF
+ (VNx2DF "TARGET_SME_F64F64")])
+
;; ------------------------------------------------------------------
;; Unspec enumerations for Advance SIMD. These could well go into
;; aarch64.md but for their use in int_iterators here.
@@ -709,6 +786,7 @@
UNSPEC_IORF ; Used in aarch64-sve.md.
UNSPEC_XORF ; Used in aarch64-sve.md.
UNSPEC_REVB ; Used in aarch64-sve.md.
+ UNSPEC_REVD ; Used in aarch64-sve2.md.
UNSPEC_REVH ; Used in aarch64-sve.md.
UNSPEC_REVW ; Used in aarch64-sve.md.
UNSPEC_REVBHW ; Used in aarch64-sve.md.
@@ -823,6 +901,7 @@
UNSPEC_CMLA180_CONJ ; Used in aarch64-sve2.md.
UNSPEC_CMUL ; Used in aarch64-sve2.md.
UNSPEC_CMUL_CONJ ; Used in aarch64-sve2.md.
+ UNSPEC_CNTP_C ; Used in aarch64-sve2.md.
UNSPEC_COND_FCVTLT ; Used in aarch64-sve2.md.
UNSPEC_COND_FCVTNT ; Used in aarch64-sve2.md.
UNSPEC_COND_FCVTX ; Used in aarch64-sve2.md.
@@ -843,10 +922,14 @@
UNSPEC_HISTSEG ; Used in aarch64-sve2.md.
UNSPEC_MATCH ; Used in aarch64-sve2.md.
UNSPEC_NMATCH ; Used in aarch64-sve2.md.
+ UNSPEC_PEXT ; Used in aarch64-sve2.md.
+ UNSPEC_PEXTx2 ; Used in aarch64-sve2.md.
UNSPEC_PMULLB ; Used in aarch64-sve2.md.
UNSPEC_PMULLB_PAIR ; Used in aarch64-sve2.md.
UNSPEC_PMULLT ; Used in aarch64-sve2.md.
UNSPEC_PMULLT_PAIR ; Used in aarch64-sve2.md.
+ UNSPEC_PSEL ; Used in aarch64-sve2.md.
+ UNSPEC_PTRUE_C ; Used in aarch64-sve2.md.
UNSPEC_RADDHNB ; Used in aarch64-sve2.md.
UNSPEC_RADDHNT ; Used in aarch64-sve2.md.
UNSPEC_RSHRNB ; Used in aarch64-sve2.md.
@@ -880,8 +963,12 @@
UNSPEC_SQRDCMLAH180 ; Used in aarch64-sve2.md.
UNSPEC_SQRDCMLAH270 ; Used in aarch64-sve2.md.
UNSPEC_SQRDCMLAH90 ; Used in aarch64-sve2.md.
+ UNSPEC_SQRSHR ; Used in aarch64-sve2.md.
+ UNSPEC_SQRSHRN ; Used in aarch64-sve2.md.
UNSPEC_SQRSHRNB ; Used in aarch64-sve2.md.
UNSPEC_SQRSHRNT ; Used in aarch64-sve2.md.
+ UNSPEC_SQRSHRU ; Used in aarch64-sve2.md.
+ UNSPEC_SQRSHRUN ; Used in aarch64-sve2.md.
UNSPEC_SQRSHRUNB ; Used in aarch64-sve2.md.
UNSPEC_SQRSHRUNT ; Used in aarch64-sve2.md.
UNSPEC_SQSHRNB ; Used in aarch64-sve2.md.
@@ -916,6 +1003,8 @@
UNSPEC_UMULHS ; Used in aarch64-sve2.md.
UNSPEC_UMULLB ; Used in aarch64-sve2.md.
UNSPEC_UMULLT ; Used in aarch64-sve2.md.
+ UNSPEC_UQRSHR ; Used in aarch64-sve2.md.
+ UNSPEC_UQRSHRN ; Used in aarch64-sve2.md.
UNSPEC_UQRSHRNB ; Used in aarch64-sve2.md.
UNSPEC_UQRSHRNT ; Used in aarch64-sve2.md.
UNSPEC_UQSHRNB ; Used in aarch64-sve2.md.
@@ -929,15 +1018,79 @@
UNSPEC_USUBWB ; Used in aarch64-sve2.md.
UNSPEC_USUBWT ; Used in aarch64-sve2.md.
UNSPEC_USDOT ; Used in aarch64-simd.md.
+ UNSPEC_UZP ; Used in aarch64-sve2.md.
+ UNSPEC_UZPQ ; Used in aarch64-sve2.md.
+ UNSPEC_ZIP ; Used in aarch64-sve2.md.
+ UNSPEC_ZIPQ ; Used in aarch64-sve2.md.
UNSPEC_SUDOT ; Used in aarch64-simd.md.
UNSPEC_BFDOT ; Used in aarch64-simd.md.
UNSPEC_BFMLALB ; Used in aarch64-sve.md.
UNSPEC_BFMLALT ; Used in aarch64-sve.md.
+ UNSPEC_BFMLSLB ; Used in aarch64-sve.md.
+ UNSPEC_BFMLSLT ; Used in aarch64-sve.md.
UNSPEC_BFMMLA ; Used in aarch64-sve.md.
UNSPEC_BFCVTN ; Used in aarch64-simd.md.
UNSPEC_BFCVTN2 ; Used in aarch64-simd.md.
UNSPEC_BFCVT ; Used in aarch64-simd.md.
UNSPEC_FCVTXN ; Used in aarch64-simd.md.
+
+ ;; All used in aarch64-sve2.md
+ UNSPEC_FCVTN
+ UNSPEC_FDOT
+ UNSPEC_SQCVT
+ UNSPEC_SQCVTN
+ UNSPEC_SQCVTU
+ UNSPEC_SQCVTUN
+ UNSPEC_UQCVT
+ UNSPEC_UQCVTN
+
+ ;; All used in aarch64-sme.md
+ UNSPEC_SME_ADD
+ UNSPEC_SME_ADD_WRITE
+ UNSPEC_SME_ADDHA
+ UNSPEC_SME_ADDVA
+ UNSPEC_SME_BMOPA
+ UNSPEC_SME_BMOPS
+ UNSPEC_SME_FADD
+ UNSPEC_SME_FDOT
+ UNSPEC_SME_FVDOT
+ UNSPEC_SME_FMLA
+ UNSPEC_SME_FMLS
+ UNSPEC_SME_FMOPA
+ UNSPEC_SME_FMOPS
+ UNSPEC_SME_FSUB
+ UNSPEC_SME_LD1_HOR
+ UNSPEC_SME_LD1_VER
+ UNSPEC_SME_READ
+ UNSPEC_SME_READ_HOR
+ UNSPEC_SME_READ_VER
+ UNSPEC_SME_SDOT
+ UNSPEC_SME_SVDOT
+ UNSPEC_SME_SMLA
+ UNSPEC_SME_SMLS
+ UNSPEC_SME_SMOPA
+ UNSPEC_SME_SMOPS
+ UNSPEC_SME_ST1_HOR
+ UNSPEC_SME_ST1_VER
+ UNSPEC_SME_SUB
+ UNSPEC_SME_SUB_WRITE
+ UNSPEC_SME_SUDOT
+ UNSPEC_SME_SUVDOT
+ UNSPEC_SME_SUMOPA
+ UNSPEC_SME_SUMOPS
+ UNSPEC_SME_UDOT
+ UNSPEC_SME_UVDOT
+ UNSPEC_SME_UMLA
+ UNSPEC_SME_UMLS
+ UNSPEC_SME_UMOPA
+ UNSPEC_SME_UMOPS
+ UNSPEC_SME_USDOT
+ UNSPEC_SME_USVDOT
+ UNSPEC_SME_USMOPA
+ UNSPEC_SME_USMOPS
+ UNSPEC_SME_WRITE
+ UNSPEC_SME_WRITE_HOR
+ UNSPEC_SME_WRITE_VER
])
;; ------------------------------------------------------------------
@@ -1074,9 +1227,15 @@
;; element.
(define_mode_attr elem_bits [(VNx16BI "8") (VNx8BI "16")
(VNx4BI "32") (VNx2BI "64")
- (VNx16QI "8") (VNx8HI "16")
- (VNx4SI "32") (VNx2DI "64")
- (VNx8HF "16") (VNx4SF "32") (VNx2DF "64")])
+ (VNx16QI "8") (VNx32QI "8") (VNx64QI "8")
+ (VNx8HI "16") (VNx16HI "16") (VNx32HI "16")
+ (VNx8HF "16") (VNx16HF "16") (VNx32HF "16")
+ (VNx8BF "16") (VNx16BF "16") (VNx32BF "16")
+ (VNx4SI "32") (VNx8SI "32") (VNx16SI "32")
+ (VNx4SF "32") (VNx8SF "32") (VNx16SF "32")
+ (VNx2DI "64") (VNx4DI "64") (VNx8DI "64")
+ (VNx2DF "64") (VNx4DF "64") (VNx8DF "64")
+ (VNx1TI "128")])
;; The number of bits in a vector container.
(define_mode_attr container_bits [(VNx16QI "8")
@@ -1202,6 +1361,15 @@
(VNx4SF "s") (VNx2SF "s")
(VNx2DI "d")
(VNx2DF "d")
+ (VNx1TI "q")
+ (VNx32QI "b") (VNx64QI "b")
+ (VNx16HI "h") (VNx32HI "h")
+ (VNx16HF "h") (VNx32HF "h")
+ (VNx16BF "h") (VNx32BF "h")
+ (VNx8SI "s") (VNx16SI "s")
+ (VNx8SF "s") (VNx16SF "s")
+ (VNx4DI "d") (VNx8DI "d")
+ (VNx4DF "d") (VNx8DF "d")
(BF "h") (V4BF "h") (V8BF "h")
(HF "h")
(SF "s") (DF "d")
@@ -1220,6 +1388,7 @@
(VNx4SF "w") (VNx2SF "w")
(VNx2DI "d")
(VNx2DF "d")
+ (VNx1TI "q")
(VNx32QI "b") (VNx48QI "b") (VNx64QI "b")
(VNx16HI "h") (VNx24HI "h") (VNx32HI "h")
(VNx16HF "h") (VNx24HF "h") (VNx32HF "h")
@@ -1474,7 +1643,9 @@
;; Narrowed modes of vector modes.
(define_mode_attr VNARROW [(VNx8HI "VNx16QI")
(VNx4SI "VNx8HI") (VNx4SF "VNx8HF")
- (VNx2DI "VNx4SI") (VNx2DF "VNx4SF")])
+ (VNx2DI "VNx4SI") (VNx2DF "VNx4SF")
+ (VNx8SI "VNx8HI") (VNx16SI "VNx16QI")
+ (VNx8DI "VNx8HI")])
;; Register suffix narrowed modes for VQN.
(define_mode_attr Vntype [(V8HI "8b") (V4SI "4h")
@@ -1502,7 +1673,25 @@
(V16QI "V16HI") (V8HI "V8SI")
(V2SI "V2DI") (V4SI "V4DI")
(V2DI "V2TI") (DI "TI")
- (HI "SI") (SI "DI")])
+ (HI "SI") (SI "DI")
+ (VNx16QI "VNx16HI")
+ (VNx8HI "VNx8SI")
+ (VNx4SI "VNx4DI")
+ (VNx32QI "VNx32HI")
+ (VNx16HI "VNx16SI")
+ (VNx8SI "VNx8DI")])
+
+(define_mode_attr v2xwide [(V8QI "v8hi") (V4HI "v4si")
+ (V16QI "v16hi") (V8HI "v8si")
+ (V2SI "v2di") (V4SI "v4di")
+ (V2DI "v2ti") (DI "ti")
+ (HI "si") (SI "di")
+ (VNx16QI "vnx16hi")
+ (VNx8HI "vnx8si")
+ (VNx4SI "vnx4di")
+ (VNx32QI "vnx32hi")
+ (VNx16HI "vnx16si")
+ (VNx8SI "vnx8di")])
;; Predicate mode associated with VWIDE.
(define_mode_attr VWIDE_PRED [(VNx8HF "VNx4BI") (VNx4SF "VNx2BI")])
@@ -1546,7 +1735,9 @@
;; SVE vector after narrowing.
(define_mode_attr Ventype [(VNx8HI "b")
(VNx4SI "h") (VNx4SF "h")
- (VNx2DI "s") (VNx2DF "s")])
+ (VNx2DI "s") (VNx2DF "s")
+ (VNx8SI "h") (VNx16SI "b")
+ (VNx8DI "h")])
;; SVE vector after widening.
(define_mode_attr Vewtype [(VNx16QI "h")
@@ -1642,6 +1833,7 @@
(VNx8BF "VNx8HI")
(VNx4SI "VNx4SI") (VNx4SF "VNx4SI")
(VNx2DI "VNx2DI") (VNx2DF "VNx2DI")
+ (VNx8SF "VNx8SI") (VNx16SF "VNx16SI")
])
;; Lower case mode with floating-point values replaced by like-sized integers.
@@ -1659,6 +1851,7 @@
(VNx8BF "vnx8hi")
(VNx4SI "vnx4si") (VNx4SF "vnx4si")
(VNx2DI "vnx2di") (VNx2DF "vnx2di")
+ (VNx8SF "vnx8si") (VNx16SF "vnx16si")
])
;; Floating-point equivalent of selected modes.
@@ -1992,7 +2185,11 @@
(VNx32HF "16") (VNx16SF "16") (VNx8DF "16")])
;; The type of a subvector in an SVE_STRUCT.
-(define_mode_attr VSINGLE [(VNx32QI "VNx16QI")
+(define_mode_attr VSINGLE [(VNx16QI "VNx16QI")
+ (VNx8BF "VNx8BF")
+ (VNx8HF "VNx8HF")
+ (VNx8HI "VNx8HI")
+ (VNx32QI "VNx16QI")
(VNx16HI "VNx8HI") (VNx16HF "VNx8HF")
(VNx16BF "VNx8BF")
(VNx8SI "VNx4SI") (VNx8SF "VNx4SF")
@@ -2009,7 +2206,8 @@
(VNx8DI "VNx2DI") (VNx8DF "VNx2DF")])
;; ...and again in lower case.
-(define_mode_attr vsingle [(VNx32QI "vnx16qi")
+(define_mode_attr vsingle [(VNx8HI "vnx8hi")
+ (VNx32QI "vnx16qi")
(VNx16HI "vnx8hi") (VNx16HF "vnx8hf")
(VNx16BF "vnx8bf")
(VNx8SI "vnx4si") (VNx8SF "vnx4sf")
@@ -2036,6 +2234,7 @@
(VNx4SF "VNx4BI") (VNx2SF "VNx2BI")
(VNx2DI "VNx2BI")
(VNx2DF "VNx2BI")
+ (VNx1TI "VNx2BI")
(VNx32QI "VNx16BI")
(VNx16HI "VNx8BI") (VNx16HF "VNx8BI")
(VNx16BF "VNx8BI")
@@ -2091,6 +2290,47 @@
(V4HF "<Vetype>[%4]") (V8HF "<Vetype>[%4]")
])
+(define_mode_attr za32_offset_range [(VNx16QI "0_to_12_step_4")
+ (VNx8BF "0_to_14_step_2")
+ (VNx8HF "0_to_14_step_2")
+ (VNx8HI "0_to_14_step_2")
+ (VNx32QI "0_to_4_step_4")
+ (VNx16BF "0_to_6_step_2")
+ (VNx16HF "0_to_6_step_2")
+ (VNx16HI "0_to_6_step_2")
+ (VNx64QI "0_to_4_step_4")
+ (VNx32BF "0_to_6_step_2")
+ (VNx32HF "0_to_6_step_2")
+ (VNx32HI "0_to_6_step_2")])
+
+(define_mode_attr za64_offset_range [(VNx8HI "0_to_12_step_4")
+ (VNx16HI "0_to_4_step_4")
+ (VNx32HI "0_to_4_step_4")])
+
+(define_mode_attr za32_long [(VNx16QI "ll") (VNx32QI "ll") (VNx64QI "ll")
+ (VNx8HI "l") (VNx16HI "l") (VNx32HI "l")])
+
+(define_mode_attr za32_last_offset [(VNx16QI "3") (VNx32QI "3") (VNx64QI "3")
+ (VNx8HI "1") (VNx16HI "1") (VNx32HI "1")])
+
+(define_mode_attr vg_modifier [(VNx16QI "")
+ (VNx32QI ", vgx2")
+ (VNx64QI ", vgx4")
+ (VNx8BF "")
+ (VNx16BF ", vgx2")
+ (VNx32BF ", vgx4")
+ (VNx8HF "")
+ (VNx16HF ", vgx2")
+ (VNx32HF ", vgx4")
+ (VNx8HI "")
+ (VNx16HI ", vgx2")
+ (VNx32HI ", vgx4")])
+
+(define_mode_attr z_suffix [(VNx16QI ".b") (VNx32QI "") (VNx64QI "")
+ (VNx8BF ".h") (VNx16BF "") (VNx32BF "")
+ (VNx8HF ".h") (VNx16HF "") (VNx32HF "")
+ (VNx8HI ".h") (VNx16HI "") (VNx32HI "")])
+
;; The number of bytes controlled by a predicate
(define_mode_attr data_bytes [(VNx16BI "1") (VNx8BI "2")
(VNx4BI "4") (VNx2BI "8")])
@@ -2120,6 +2360,30 @@
(V8HI "vec") (V2SI "vec") (V4SI "vec")
(V2DI "vec") (DI "offset")])
+(define_mode_attr b [(VNx8BF "b") (VNx8HF "") (VNx4SF "") (VNx2DF "")
+ (VNx16BF "b") (VNx16HF "")
+ (VNx32BF "b") (VNx32HF "")])
+
+(define_mode_attr aligned_operand [(VNx16QI "register_operand")
+ (VNx8HI "register_operand")
+ (VNx8BF "register_operand")
+ (VNx8HF "register_operand")
+ (VNx32QI "aligned_register_operand")
+ (VNx16HI "aligned_register_operand")
+ (VNx16BF "aligned_register_operand")
+ (VNx16HF "aligned_register_operand")
+ (VNx64QI "aligned_register_operand")
+ (VNx32HI "aligned_register_operand")
+ (VNx32BF "aligned_register_operand")
+ (VNx32HF "aligned_register_operand")])
+
+(define_mode_attr aligned_fpr [(VNx16QI "w") (VNx8HI "w")
+ (VNx8BF "w") (VNx8HF "w")
+ (VNx32QI "Uw2") (VNx16HI "Uw2")
+ (VNx16BF "Uw2") (VNx16HF "Uw2")
+ (VNx64QI "Uw4") (VNx32HI "Uw4")
+ (VNx32BF "Uw4") (VNx32HF "Uw4")])
+
;; -------------------------------------------------------------------
;; Code Iterators
;; -------------------------------------------------------------------
@@ -2249,6 +2513,10 @@
;; SVE integer binary operations that have an immediate form.
(define_code_iterator SVE_INT_BINARY_IMM [mult smax smin umax umin])
+(define_code_iterator SVE_INT_BINARY_MULTI [smax smin umax umin])
+
+(define_code_iterator SVE_INT_BINARY_SINGLE [plus smax smin umax umin])
+
;; SVE floating-point operations with an unpredicated all-register form.
(define_code_iterator SVE_UNPRED_FP_BINARY [plus minus mult])
@@ -2697,25 +2965,37 @@
(define_int_iterator SVE_FP_UNARY [UNSPEC_FRECPE UNSPEC_RSQRTE])
-(define_int_iterator SVE_FP_UNARY_INT [UNSPEC_FEXPA])
+(define_int_iterator SVE_FP_UNARY_INT [(UNSPEC_FEXPA "TARGET_NON_STREAMING")])
(define_int_iterator SVE_INT_SHIFT_IMM [UNSPEC_ASRD
(UNSPEC_SQSHLU "TARGET_SVE2")
(UNSPEC_SRSHR "TARGET_SVE2")
(UNSPEC_URSHR "TARGET_SVE2")])
+(define_int_iterator SVE_INT_BINARY_MULTI [UNSPEC_SQDMULH
+ UNSPEC_SRSHL UNSPEC_URSHL])
+
(define_int_iterator SVE_FP_BINARY [UNSPEC_FRECPS UNSPEC_RSQRTS])
(define_int_iterator SVE_FP_BINARY_INT [UNSPEC_FTSMUL UNSPEC_FTSSEL])
-(define_int_iterator SVE_BFLOAT_TERNARY_LONG [UNSPEC_BFDOT
- UNSPEC_BFMLALB
- UNSPEC_BFMLALT
- UNSPEC_BFMMLA])
+(define_int_iterator SVE_FP_BINARY_MULTI [UNSPEC_FMAX UNSPEC_FMAXNM
+ UNSPEC_FMIN UNSPEC_FMINNM])
+
+(define_int_iterator SVE_BFLOAT_TERNARY_LONG
+ [UNSPEC_BFDOT
+ UNSPEC_BFMLALB
+ UNSPEC_BFMLALT
+ (UNSPEC_BFMLSLB "TARGET_SME2 && TARGET_STREAMING_SME")
+ (UNSPEC_BFMLSLT "TARGET_SME2 && TARGET_STREAMING_SME")
+ (UNSPEC_BFMMLA "TARGET_NON_STREAMING")])
-(define_int_iterator SVE_BFLOAT_TERNARY_LONG_LANE [UNSPEC_BFDOT
- UNSPEC_BFMLALB
- UNSPEC_BFMLALT])
+(define_int_iterator SVE_BFLOAT_TERNARY_LONG_LANE
+ [UNSPEC_BFDOT
+ UNSPEC_BFMLALB
+ UNSPEC_BFMLALT
+ (UNSPEC_BFMLSLB "TARGET_SME2 && TARGET_STREAMING_SME")
+ (UNSPEC_BFMLSLT "TARGET_SME2 && TARGET_STREAMING_SME")])
(define_int_iterator SVE_INT_REDUCTION [UNSPEC_ANDV
UNSPEC_IORV
@@ -2859,6 +3139,11 @@
(define_int_iterator SVE2_WHILE_PTR [UNSPEC_WHILERW UNSPEC_WHILEWR])
+(define_int_iterator SVE_WHILE_ORDER [UNSPEC_WHILEGE UNSPEC_WHILEGT
+ UNSPEC_WHILEHI UNSPEC_WHILEHS
+ UNSPEC_WHILELE UNSPEC_WHILELO
+ UNSPEC_WHILELS UNSPEC_WHILELT])
+
(define_int_iterator SVE_SHIFT_WIDE [UNSPEC_ASHIFT_WIDE
UNSPEC_ASHIFTRT_WIDE
UNSPEC_LSHIFTRT_WIDE])
@@ -2970,6 +3255,13 @@
UNSPEC_UQRSHRNT
UNSPEC_UQSHRNT])
+(define_int_iterator SVE2_INT_SHIFT_IMM_NARROWxN [UNSPEC_SQRSHR
+ UNSPEC_SQRSHRN
+ UNSPEC_SQRSHRU
+ UNSPEC_SQRSHRUN
+ UNSPEC_UQRSHR
+ UNSPEC_UQRSHRN])
+
(define_int_iterator SVE2_INT_SHIFT_INSERT [UNSPEC_SLI UNSPEC_SRI])
(define_int_iterator SVE2_INT_CADD [UNSPEC_CADD90
@@ -3113,6 +3405,16 @@
(define_int_iterator SVE2_PMULL_PAIR [UNSPEC_PMULLB_PAIR UNSPEC_PMULLT_PAIR])
+(define_int_iterator SVE_QCVTxN [UNSPEC_SQCVT UNSPEC_SQCVTN
+ UNSPEC_SQCVTU UNSPEC_SQCVTUN
+ UNSPEC_UQCVT UNSPEC_UQCVTN])
+
+(define_int_iterator SVE2_SFx24_UNARY [UNSPEC_FRINTA UNSPEC_FRINTM
+ UNSPEC_FRINTN UNSPEC_FRINTP])
+
+(define_int_iterator SVE2_x24_PERMUTE [UNSPEC_ZIP UNSPEC_UZP])
+(define_int_iterator SVE2_x24_PERMUTEQ [UNSPEC_ZIPQ UNSPEC_UZPQ])
+
(define_int_iterator FCADD [UNSPEC_FCADD90
UNSPEC_FCADD270])
@@ -3148,6 +3450,51 @@
(define_int_iterator FCMUL_OP [UNSPEC_FCMUL
UNSPEC_FCMUL_CONJ])
+(define_int_iterator UNSPEC_REVD_ONLY [UNSPEC_REVD])
+
+(define_int_iterator SME_LD1 [UNSPEC_SME_LD1_HOR UNSPEC_SME_LD1_VER])
+(define_int_iterator SME_READ [UNSPEC_SME_READ_HOR UNSPEC_SME_READ_VER])
+(define_int_iterator SME_ST1 [UNSPEC_SME_ST1_HOR UNSPEC_SME_ST1_VER])
+(define_int_iterator SME_WRITE [UNSPEC_SME_WRITE_HOR UNSPEC_SME_WRITE_VER])
+
+(define_int_iterator SME_BINARY_SDI [UNSPEC_SME_ADDHA UNSPEC_SME_ADDVA])
+
+(define_int_iterator SME_INT_MOP [UNSPEC_SME_SMOPA UNSPEC_SME_SMOPS
+ UNSPEC_SME_SUMOPA UNSPEC_SME_SUMOPS
+ UNSPEC_SME_UMOPA UNSPEC_SME_UMOPS
+ UNSPEC_SME_USMOPA UNSPEC_SME_USMOPS])
+
+(define_int_iterator SME2_INT_MOP [UNSPEC_SME_SMOPA UNSPEC_SME_SMOPS
+ UNSPEC_SME_UMOPA UNSPEC_SME_UMOPS])
+
+(define_int_iterator SME_FP_MOP [UNSPEC_SME_FMOPA UNSPEC_SME_FMOPS])
+
+(define_int_iterator SME2_BMOP [UNSPEC_SME_BMOPA UNSPEC_SME_BMOPS])
+
+(define_int_iterator SME_BINARY_SLICE_SDI [UNSPEC_SME_ADD UNSPEC_SME_SUB])
+
+(define_int_iterator SME_BINARY_SLICE_SDF [UNSPEC_SME_FADD UNSPEC_SME_FSUB])
+
+(define_int_iterator SME_BINARY_WRITE_SLICE_SDI [UNSPEC_SME_ADD_WRITE
+ UNSPEC_SME_SUB_WRITE])
+
+(define_int_iterator SME_INT_DOTPROD [UNSPEC_SME_SDOT UNSPEC_SME_UDOT
+ UNSPEC_SME_USDOT])
+
+(define_int_iterator SME_INT_DOTPROD_LANE [UNSPEC_SME_SDOT UNSPEC_SME_SVDOT
+ UNSPEC_SME_UDOT UNSPEC_SME_UVDOT
+ UNSPEC_SME_SUDOT UNSPEC_SME_SUVDOT
+ UNSPEC_SME_USDOT UNSPEC_SME_USVDOT])
+
+(define_int_iterator SME_FP_DOTPROD [UNSPEC_SME_FDOT])
+
+(define_int_iterator SME_FP_DOTPROD_LANE [UNSPEC_SME_FDOT UNSPEC_SME_FVDOT])
+
+(define_int_iterator SME_INT_TERNARY_SLICE [UNSPEC_SME_SMLA UNSPEC_SME_SMLS
+ UNSPEC_SME_UMLA UNSPEC_SME_UMLS])
+
+(define_int_iterator SME_FP_TERNARY_SLICE [UNSPEC_SME_FMLA UNSPEC_SME_FMLS])
+
;; Iterators for atomic operations.
(define_int_iterator ATOMIC_LDOP
@@ -3164,6 +3511,10 @@
(define_int_iterator SUBDI_BITS [8 16 32])
+(define_int_iterator BHSD_BITS [8 16 32 64])
+
+(define_int_iterator LUTI_BITS [2 4])
+
;; -------------------------------------------------------------------
;; Int Iterators Attributes.
;; -------------------------------------------------------------------
@@ -3185,6 +3536,7 @@
(UNSPEC_RSQRTS "frsqrts")
(UNSPEC_RBIT "rbit")
(UNSPEC_REVB "revb")
+ (UNSPEC_REVD "revd")
(UNSPEC_REVH "revh")
(UNSPEC_REVW "revw")
(UNSPEC_UMAXV "umax")
@@ -3222,8 +3574,60 @@
(UNSPEC_PMULLT "pmullt")
(UNSPEC_PMULLT_PAIR "pmullt_pair")
(UNSPEC_SMATMUL "smatmul")
+ (UNSPEC_UZP "uzp")
+ (UNSPEC_UZPQ "uzpq")
+ (UNSPEC_ZIP "zip")
+ (UNSPEC_ZIPQ "zipq")
+ (UNSPEC_SME_ADD "add")
+ (UNSPEC_SME_ADD_WRITE "add_write")
+ (UNSPEC_SME_ADDHA "addha")
+ (UNSPEC_SME_ADDVA "addva")
+ (UNSPEC_SME_BMOPA "bmopa")
+ (UNSPEC_SME_BMOPS "bmops")
+ (UNSPEC_SME_FADD "fadd")
+ (UNSPEC_SME_FDOT "fdot")
+ (UNSPEC_SME_FVDOT "fvdot")
+ (UNSPEC_SME_FMLA "fmla")
+ (UNSPEC_SME_FMLS "fmls")
+ (UNSPEC_SME_FMOPA "fmopa")
+ (UNSPEC_SME_FMOPS "fmops")
+ (UNSPEC_SME_FSUB "fsub")
+ (UNSPEC_SME_LD1_HOR "ld1_hor")
+ (UNSPEC_SME_LD1_VER "ld1_ver")
+ (UNSPEC_SME_READ_HOR "read_hor")
+ (UNSPEC_SME_READ_VER "read_ver")
+ (UNSPEC_SME_SDOT "sdot")
+ (UNSPEC_SME_SVDOT "svdot")
+ (UNSPEC_SME_SMLA "smla")
+ (UNSPEC_SME_SMLS "smls")
+ (UNSPEC_SME_SMOPA "smopa")
+ (UNSPEC_SME_SMOPS "smops")
+ (UNSPEC_SME_ST1_HOR "st1_hor")
+ (UNSPEC_SME_ST1_VER "st1_ver")
+ (UNSPEC_SME_SUB "sub")
+ (UNSPEC_SME_SUB_WRITE "sub_write")
+ (UNSPEC_SME_SUDOT "sudot")
+ (UNSPEC_SME_SUVDOT "suvdot")
+ (UNSPEC_SME_SUMOPA "sumopa")
+ (UNSPEC_SME_SUMOPS "sumops")
+ (UNSPEC_SME_UDOT "udot")
+ (UNSPEC_SME_UVDOT "uvdot")
+ (UNSPEC_SME_UMLA "umla")
+ (UNSPEC_SME_UMLS "umls")
+ (UNSPEC_SME_UMOPA "umopa")
+ (UNSPEC_SME_UMOPS "umops")
+ (UNSPEC_SME_USDOT "usdot")
+ (UNSPEC_SME_USVDOT "usvdot")
+ (UNSPEC_SME_USMOPA "usmopa")
+ (UNSPEC_SME_USMOPS "usmops")
+ (UNSPEC_SME_WRITE_HOR "write_hor")
+ (UNSPEC_SME_WRITE_VER "write_ver")
(UNSPEC_SQCADD90 "sqcadd90")
(UNSPEC_SQCADD270 "sqcadd270")
+ (UNSPEC_SQCVT "sqcvt")
+ (UNSPEC_SQCVTN "sqcvtn")
+ (UNSPEC_SQCVTU "sqcvtu")
+ (UNSPEC_SQCVTUN "sqcvtun")
(UNSPEC_SQRDCMLAH "sqrdcmlah")
(UNSPEC_SQRDCMLAH90 "sqrdcmlah90")
(UNSPEC_SQRDCMLAH180 "sqrdcmlah180")
@@ -3231,6 +3635,8 @@
(UNSPEC_TRN1Q "trn1q")
(UNSPEC_TRN2Q "trn2q")
(UNSPEC_UMATMUL "umatmul")
+ (UNSPEC_UQCVT "uqcvt")
+ (UNSPEC_UQCVTN "uqcvtn")
(UNSPEC_USMATMUL "usmatmul")
(UNSPEC_UZP1Q "uzp1q")
(UNSPEC_UZP2Q "uzp2q")
@@ -3460,7 +3866,9 @@
(UNSPEC_TRN1 "trn1") (UNSPEC_TRN2 "trn2")
(UNSPEC_TRN1Q "trn1") (UNSPEC_TRN2Q "trn2")
(UNSPEC_UZP1 "uzp1") (UNSPEC_UZP2 "uzp2")
- (UNSPEC_UZP1Q "uzp1") (UNSPEC_UZP2Q "uzp2")])
+ (UNSPEC_UZP1Q "uzp1") (UNSPEC_UZP2Q "uzp2")
+ (UNSPEC_UZP "uzp") (UNSPEC_UZPQ "uzp")
+ (UNSPEC_ZIP "zip") (UNSPEC_ZIPQ "zip")])
; op code for REV instructions (size within which elements are reversed).
(define_int_attr rev_op [(UNSPEC_REV64 "64") (UNSPEC_REV32 "32")
@@ -3638,8 +4046,12 @@
(UNSPEC_SQRDMLSH "sqrdmlsh")
(UNSPEC_SQRDMULH "sqrdmulh")
(UNSPEC_SQRSHL "sqrshl")
+ (UNSPEC_SQRSHR "sqrshr")
+ (UNSPEC_SQRSHRN "sqrshrn")
(UNSPEC_SQRSHRNB "sqrshrnb")
(UNSPEC_SQRSHRNT "sqrshrnt")
+ (UNSPEC_SQRSHRU "sqrshru")
+ (UNSPEC_SQRSHRUN "sqrshrun")
(UNSPEC_SQRSHRUNB "sqrshrunb")
(UNSPEC_SQRSHRUNT "sqrshrunt")
(UNSPEC_SQSHL "sqshl")
@@ -3684,6 +4096,8 @@
(UNSPEC_UMULLB "umullb")
(UNSPEC_UMULLT "umullt")
(UNSPEC_UQRSHL "uqrshl")
+ (UNSPEC_UQRSHR "uqrshr")
+ (UNSPEC_UQRSHRN "uqrshrn")
(UNSPEC_UQRSHRNB "uqrshrnb")
(UNSPEC_UQRSHRNT "uqrshrnt")
(UNSPEC_UQSHL "uqshl")
@@ -3740,6 +4154,8 @@
(define_int_attr sve_fp_op [(UNSPEC_BFDOT "bfdot")
(UNSPEC_BFMLALB "bfmlalb")
(UNSPEC_BFMLALT "bfmlalt")
+ (UNSPEC_BFMLSLB "bfmlslb")
+ (UNSPEC_BFMLSLT "bfmlslt")
(UNSPEC_BFMMLA "bfmmla")
(UNSPEC_FRECPE "frecpe")
(UNSPEC_FRECPS "frecps")
@@ -3800,6 +4216,9 @@
(UNSPEC_COND_FMULX "fmulx")
(UNSPEC_COND_FSUB "fsubr")])
+(define_int_attr sme_int_op [(UNSPEC_SME_ADD_WRITE "add")
+ (UNSPEC_SME_SUB_WRITE "sub")])
+
(define_int_attr rot [(UNSPEC_CADD90 "90")
(UNSPEC_CADD270 "270")
(UNSPEC_CDOT "0")
@@ -3967,6 +4386,24 @@
(define_int_attr unspec [(UNSPEC_WHILERW "UNSPEC_WHILERW")
(UNSPEC_WHILEWR "UNSPEC_WHILEWR")])
+(define_int_attr hv [(UNSPEC_SME_LD1_HOR "h")
+ (UNSPEC_SME_LD1_VER "v")
+ (UNSPEC_SME_READ_HOR "h")
+ (UNSPEC_SME_READ_VER "v")
+ (UNSPEC_SME_ST1_HOR "h")
+ (UNSPEC_SME_ST1_VER "v")
+ (UNSPEC_SME_WRITE_HOR "h")
+ (UNSPEC_SME_WRITE_VER "v")])
+
+(define_int_attr has_16bit_form [(UNSPEC_SME_SDOT "true")
+ (UNSPEC_SME_SVDOT "true")
+ (UNSPEC_SME_UDOT "true")
+ (UNSPEC_SME_UVDOT "true")
+ (UNSPEC_SME_SUDOT "false")
+ (UNSPEC_SME_SUVDOT "false")
+ (UNSPEC_SME_USDOT "false")
+ (UNSPEC_SME_USVDOT "false")])
+
;; Iterators and attributes for fpcr fpsr getter setters
(define_int_iterator GET_FPSCR
@@ -3981,4 +4418,4 @@
(UNSPECV_GET_FPCR "fpcr")
(UNSPECV_SET_FPCR "fpcr")])
-(define_int_attr bits_etype [(8 "b") (16 "h") (32 "s")])
+(define_int_attr bits_etype [(8 "b") (16 "h") (32 "s") (64 "d")])
diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
index a73724a..9af2810 100644
--- a/gcc/config/aarch64/predicates.md
+++ b/gcc/config/aarch64/predicates.md
@@ -20,6 +20,10 @@
(include "../arm/common.md")
+(define_predicate "aarch64_sysreg_string"
+ (and (match_code "const_string")
+ (match_test "aarch64_valid_sysreg_name_p (XSTR (op, 0))")))
+
(define_special_predicate "cc_register"
(and (match_code "reg")
(and (match_test "REGNO (op) == CC_REGNUM")
@@ -42,6 +46,30 @@
(and (match_code "const_int")
(match_test "op == CONST0_RTX (mode)")))
+(define_predicate "const_0_to_7_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
+
+(define_predicate "const_0_to_4_step_4_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 4)")
+ (match_test "(INTVAL (op) & 3) == 0")))
+
+(define_predicate "const_0_to_6_step_2_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 6)")
+ (match_test "(INTVAL (op) & 1) == 0")))
+
+(define_predicate "const_0_to_12_step_4_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 12)")
+ (match_test "(INTVAL (op) & 3) == 0")))
+
+(define_predicate "const_0_to_14_step_2_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 14)")
+ (match_test "(INTVAL (op) & 1) == 0")))
+
(define_predicate "const_1_to_3_operand"
(match_code "const_int,const_vector")
{
@@ -184,11 +212,17 @@
(and (match_code "const_poly_int")
(match_test "aarch64_add_offset_temporaries (op) == 1")))
+(define_predicate "aarch64_addsvl_addspl_immediate"
+ (and (match_code "const")
+ (match_test "aarch64_addsvl_addspl_immediate_p (op)")))
+
(define_predicate "aarch64_pluslong_operand"
(ior (match_operand 0 "register_operand")
(match_operand 0 "aarch64_pluslong_immediate")
(and (match_test "TARGET_SVE")
- (match_operand 0 "aarch64_sve_plus_immediate"))))
+ (match_operand 0 "aarch64_sve_plus_immediate"))
+ (and (match_test "TARGET_SME")
+ (match_operand 0 "aarch64_addsvl_addspl_immediate"))))
(define_predicate "aarch64_pluslong_or_poly_operand"
(ior (match_operand 0 "aarch64_pluslong_operand")
@@ -558,8 +592,7 @@
;; Shifts with a range 1-bit_size (aarch64_simd_shift_imm_offset)
;; Shifts with a range 0-bit_size (aarch64_simd_shift_imm_bitsize)
(define_predicate "aarch64_simd_shift_imm_qi"
- (and (match_code "const_int")
- (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
+ (match_operand 0 "const_0_to_7_operand"))
(define_predicate "aarch64_simd_shift_imm_hi"
(and (match_code "const_int")
diff --git a/gcc/config/aarch64/t-aarch64 b/gcc/config/aarch64/t-aarch64
index a9a244a..0d96ae3 100644
--- a/gcc/config/aarch64/t-aarch64
+++ b/gcc/config/aarch64/t-aarch64
@@ -20,7 +20,10 @@
TM_H += $(srcdir)/config/aarch64/aarch64-fusion-pairs.def \
$(srcdir)/config/aarch64/aarch64-tuning-flags.def \
- $(srcdir)/config/aarch64/aarch64-option-extensions.def
+ $(srcdir)/config/aarch64/aarch64-option-extensions.def \
+ $(srcdir)/config/aarch64/aarch64-cores.def \
+ $(srcdir)/config/aarch64/aarch64-isa-modes.def \
+ $(srcdir)/config/aarch64/aarch64-arches.def
OPTIONS_H_EXTRA += $(srcdir)/config/aarch64/aarch64-cores.def \
$(srcdir)/config/aarch64/aarch64-arches.def
@@ -60,6 +63,7 @@ aarch64-sve-builtins.o: $(srcdir)/config/aarch64/aarch64-sve-builtins.cc \
$(srcdir)/config/aarch64/aarch64-sve-builtins.def \
$(srcdir)/config/aarch64/aarch64-sve-builtins-base.def \
$(srcdir)/config/aarch64/aarch64-sve-builtins-sve2.def \
+ $(srcdir)/config/aarch64/aarch64-sve-builtins-sme.def \
$(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) $(RTL_H) \
$(TM_P_H) memmodel.h insn-codes.h $(OPTABS_H) $(RECOG_H) $(DIAGNOSTIC_H) \
$(EXPR_H) $(BASIC_BLOCK_H) $(FUNCTION_H) fold-const.h $(GIMPLE_H) \
@@ -69,7 +73,8 @@ aarch64-sve-builtins.o: $(srcdir)/config/aarch64/aarch64-sve-builtins.cc \
$(srcdir)/config/aarch64/aarch64-sve-builtins.h \
$(srcdir)/config/aarch64/aarch64-sve-builtins-shapes.h \
$(srcdir)/config/aarch64/aarch64-sve-builtins-base.h \
- $(srcdir)/config/aarch64/aarch64-sve-builtins-sve2.h
+ $(srcdir)/config/aarch64/aarch64-sve-builtins-sve2.h \
+ $(srcdir)/config/aarch64/aarch64-sve-builtins-sme.h
$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
$(srcdir)/config/aarch64/aarch64-sve-builtins.cc
@@ -110,6 +115,19 @@ aarch64-sve-builtins-sve2.o: \
$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
$(srcdir)/config/aarch64/aarch64-sve-builtins-sve2.cc
+aarch64-sve-builtins-sme.o: \
+ $(srcdir)/config/aarch64/aarch64-sve-builtins-sme.cc \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) $(RTL_H) \
+ $(TM_P_H) memmodel.h insn-codes.h $(OPTABS_H) $(RECOG_H) \
+ $(EXPR_H) $(BASIC_BLOCK_H) $(FUNCTION_H) fold-const.h $(GIMPLE_H) \
+ gimple-iterator.h gimplify.h explow.h $(EMIT_RTL_H) \
+ $(srcdir)/config/aarch64/aarch64-sve-builtins.h \
+ $(srcdir)/config/aarch64/aarch64-sve-builtins-shapes.h \
+ $(srcdir)/config/aarch64/aarch64-sve-builtins-sme.h \
+ $(srcdir)/config/aarch64/aarch64-sve-builtins-functions.h
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/aarch64/aarch64-sve-builtins-sme.cc
+
aarch64-builtin-iterators.h: $(srcdir)/config/aarch64/geniterators.sh \
$(srcdir)/config/aarch64/iterators.md
$(SHELL) $(srcdir)/config/aarch64/geniterators.sh \
@@ -183,9 +201,12 @@ MULTILIB_DIRNAMES = $(subst $(comma), ,$(TM_MULTILIB_CONFIG))
insn-conditions.md: s-check-sve-md
s-check-sve-md: $(srcdir)/config/aarch64/check-sve-md.awk \
$(srcdir)/config/aarch64/aarch64-sve.md \
- $(srcdir)/config/aarch64/aarch64-sve2.md
+ $(srcdir)/config/aarch64/aarch64-sve2.md \
+ $(srcdir)/config/aarch64/aarch64-sme.md
$(AWK) -f $(srcdir)/config/aarch64/check-sve-md.awk \
$(srcdir)/config/aarch64/aarch64-sve.md
$(AWK) -f $(srcdir)/config/aarch64/check-sve-md.awk \
$(srcdir)/config/aarch64/aarch64-sve2.md
+ $(AWK) -f $(srcdir)/config/aarch64/check-sve-md.awk \
+ $(srcdir)/config/aarch64/aarch64-sme.md
$(STAMP) s-check-sve-md
diff --git a/gcc/config/aarch64/tuning_models/ampere1.h b/gcc/config/aarch64/tuning_models/ampere1.h
index 8d2a1c6..a144e8f 100644
--- a/gcc/config/aarch64/tuning_models/ampere1.h
+++ b/gcc/config/aarch64/tuning_models/ampere1.h
@@ -104,7 +104,7 @@ static const struct tune_params ampere1_tunings =
2, /* min_div_recip_mul_df. */
0, /* max_case_values. */
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
- (AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
+ (AARCH64_EXTRA_TUNE_AVOID_CROSS_LOOP_FMA), /* tune_flags. */
&ampere1_prefetch_tune,
AARCH64_LDP_STP_POLICY_ALIGNED, /* ldp_policy_model. */
AARCH64_LDP_STP_POLICY_ALIGNED /* stp_policy_model. */
diff --git a/gcc/config/aarch64/tuning_models/ampere1a.h b/gcc/config/aarch64/tuning_models/ampere1a.h
index c419ffb..f688ed0 100644
--- a/gcc/config/aarch64/tuning_models/ampere1a.h
+++ b/gcc/config/aarch64/tuning_models/ampere1a.h
@@ -50,13 +50,13 @@ static const struct tune_params ampere1a_tunings =
"32:16", /* loop_align. */
2, /* int_reassoc_width. */
4, /* fp_reassoc_width. */
- 1, /* fma_reassoc_width. */
+ 4, /* fma_reassoc_width. */
2, /* vec_reassoc_width. */
2, /* min_div_recip_mul_sf. */
2, /* min_div_recip_mul_df. */
0, /* max_case_values. */
tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
- (AARCH64_EXTRA_TUNE_NONE), /* tune_flags. */
+ (AARCH64_EXTRA_TUNE_AVOID_CROSS_LOOP_FMA), /* tune_flags. */
&ampere1_prefetch_tune,
AARCH64_LDP_STP_POLICY_ALIGNED, /* ldp_policy_model. */
AARCH64_LDP_STP_POLICY_ALIGNED /* stp_policy_model. */
diff --git a/gcc/config/aarch64/tuning_models/ampere1b.h b/gcc/config/aarch64/tuning_models/ampere1b.h
new file mode 100644
index 0000000..a98b6a9
--- /dev/null
+++ b/gcc/config/aarch64/tuning_models/ampere1b.h
@@ -0,0 +1,115 @@
+/* Tuning model description for the Ampere1B core.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_AARCH64_H_AMPERE1B
+#define GCC_AARCH64_H_AMPERE1B
+
+#include "generic.h"
+
+static const cpu_prefetch_tune ampere1b_prefetch_tune =
+{
+ 48, /* num_slots */
+ 64, /* l1_cache_size */
+ 64, /* l1_cache_line_size */
+ 2048, /* l2_cache_size */
+ true, /* prefetch_dynamic_strides */
+ -1, /* minimum_stride */
+ -1 /* default_opt_level */
+};
+
+static const advsimd_vec_cost ampere1b_advsimd_vector_cost =
+{
+ 1, /* int_stmt_cost */
+ 3, /* fp_stmt_cost */
+ 0, /* ld2_st2_permute_cost */
+ 0, /* ld3_st3_permute_cost */
+ 0, /* ld4_st4_permute_cost */
+ 2, /* permute_cost */
+ 8, /* reduc_i8_cost */
+ 6, /* reduc_i16_cost */
+ 4, /* reduc_i32_cost */
+ 2, /* reduc_i64_cost */
+ 9, /* reduc_f16_cost */
+ 6, /* reduc_f32_cost */
+ 3, /* reduc_f64_cost */
+ 5, /* store_elt_extra_cost */
+ 5, /* vec_to_scalar_cost */
+ 5, /* scalar_to_vec_cost */
+ 4, /* align_load_cost */
+ 4, /* unalign_load_cost */
+ 1, /* unalign_store_cost */
+ 1 /* store_cost */
+};
+
+/* Ampere-1B costs for vector insn classes. */
+static const struct cpu_vector_cost ampere1b_vector_cost =
+{
+ 1, /* scalar_int_stmt_cost */
+ 3, /* scalar_fp_stmt_cost */
+ 4, /* scalar_load_cost */
+ 1, /* scalar_store_cost */
+ 1, /* cond_taken_branch_cost */
+ 1, /* cond_not_taken_branch_cost */
+ &ampere1b_advsimd_vector_cost, /* advsimd */
+ nullptr, /* sve */
+ nullptr /* issue_info */
+};
+
+static const struct tune_params ampere1b_tunings =
+{
+ &ampere1b_extra_costs,
+ &generic_addrcost_table,
+ &generic_regmove_cost,
+ &ampere1b_vector_cost,
+ &generic_branch_cost,
+ &generic_approx_modes,
+ SVE_NOT_IMPLEMENTED, /* sve_width */
+ { 3, /* load_int. */
+ 1, /* store_int. */
+ 4, /* load_fp. */
+ 4, /* store_fp. */
+ 4, /* load_pred. */
+ 4 /* store_pred. */
+ }, /* memmov_cost. */
+ 4, /* issue_rate */
+ (AARCH64_FUSE_ADRP_ADD | AARCH64_FUSE_AES_AESMC |
+ AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_MOVK_MOVK |
+ AARCH64_FUSE_ALU_BRANCH /* adds, ands, bics, ccmp, ccmn */ |
+ AARCH64_FUSE_CMP_BRANCH | AARCH64_FUSE_ALU_CBZ |
+ AARCH64_FUSE_ADDSUB_2REG_CONST1),
+ /* fusible_ops */
+ "32", /* function_align. */
+ "4", /* jump_align. */
+ "32:16", /* loop_align. */
+ 2, /* int_reassoc_width. */
+ 4, /* fp_reassoc_width. */
+ 4, /* fma_reassoc_width. */
+ 2, /* vec_reassoc_width. */
+ 2, /* min_div_recip_mul_sf. */
+ 2, /* min_div_recip_mul_df. */
+ 0, /* max_case_values. */
+ tune_params::AUTOPREFETCHER_STRONG, /* autoprefetcher_model. */
+ (AARCH64_EXTRA_TUNE_CHEAP_SHIFT_EXTEND |
+ AARCH64_EXTRA_TUNE_AVOID_CROSS_LOOP_FMA), /* tune_flags. */
+ &ampere1b_prefetch_tune,
+ AARCH64_LDP_STP_POLICY_ALIGNED, /* ldp_policy_model. */
+ AARCH64_LDP_STP_POLICY_ALIGNED /* stp_policy_model. */
+};
+
+#endif /* GCC_AARCH64_H_AMPERE1B */
diff --git a/gcc/config/alpha/alpha.cc b/gcc/config/alpha/alpha.cc
index db6b34b..6aa9378 100644
--- a/gcc/config/alpha/alpha.cc
+++ b/gcc/config/alpha/alpha.cc
@@ -7482,14 +7482,13 @@ common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
return NULL_TREE;
}
-static const struct attribute_spec vms_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (vms_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ COMMON_OBJECT, 0, 1, true, false, false, false, common_object_handler,
- NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ NULL }
+});
void
vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
diff --git a/gcc/config/arc/arc.cc b/gcc/config/arc/arc.cc
index 70ee410..3f4eb5a 100644
--- a/gcc/config/arc/arc.cc
+++ b/gcc/config/arc/arc.cc
@@ -187,44 +187,6 @@ static tree arc_handle_secure_attribute (tree *, tree, tree, int, bool *);
static tree arc_handle_uncached_attribute (tree *, tree, tree, int, bool *);
static tree arc_handle_aux_attribute (tree *, tree, tree, int, bool *);
-/* Initialized arc_attribute_table to NULL since arc doesnot have any
- machine specific supported attributes. */
-const struct attribute_spec arc_attribute_table[] =
-{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
- affects_type_identity, handler, exclude } */
- { "interrupt", 1, 1, true, false, false, true,
- arc_handle_interrupt_attribute, NULL },
- /* Function calls made to this symbol must be done indirectly, because
- it may lie outside of the 21/25 bit addressing range of a normal function
- call. */
- { "long_call", 0, 0, false, true, true, false, NULL, NULL },
- /* Whereas these functions are always known to reside within the 25 bit
- addressing range of unconditionalized bl. */
- { "medium_call", 0, 0, false, true, true, false, NULL, NULL },
- /* And these functions are always known to reside within the 21 bit
- addressing range of blcc. */
- { "short_call", 0, 0, false, true, true, false, NULL, NULL },
- /* Function which are not having the prologue and epilogue generated
- by the compiler. */
- { "naked", 0, 0, true, false, false, false, arc_handle_fndecl_attribute,
- NULL },
- /* Functions calls made using jli instruction. The pointer in JLI
- table is found latter. */
- { "jli_always", 0, 0, false, true, true, false, NULL, NULL },
- /* Functions calls made using jli instruction. The pointer in JLI
- table is given as input parameter. */
- { "jli_fixed", 1, 1, false, true, true, false, arc_handle_jli_attribute,
- NULL },
- /* Call a function using secure-mode. */
- { "secure_call", 1, 1, false, true, true, false, arc_handle_secure_attribute,
- NULL },
- /* Bypass caches using .di flag. */
- { "uncached", 0, 0, false, true, false, false, arc_handle_uncached_attribute,
- NULL },
- { "aux", 0, 1, true, false, false, false, arc_handle_aux_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
static int arc_comp_type_attributes (const_tree, const_tree);
static void arc_file_start (void);
static void arc_internal_label (FILE *, const char *, unsigned long);
@@ -773,6 +735,42 @@ static rtx arc_legitimize_address_0 (rtx, rtx, machine_mode mode);
#include "target-def.h"
+TARGET_GNU_ATTRIBUTES (arc_attribute_table,
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
+ affects_type_identity, handler, exclude } */
+ { "interrupt", 1, 1, true, false, false, true,
+ arc_handle_interrupt_attribute, NULL },
+ /* Function calls made to this symbol must be done indirectly, because
+ it may lie outside of the 21/25 bit addressing range of a normal function
+ call. */
+ { "long_call", 0, 0, false, true, true, false, NULL, NULL },
+ /* Whereas these functions are always known to reside within the 25 bit
+ addressing range of unconditionalized bl. */
+ { "medium_call", 0, 0, false, true, true, false, NULL, NULL },
+ /* And these functions are always known to reside within the 21 bit
+ addressing range of blcc. */
+ { "short_call", 0, 0, false, true, true, false, NULL, NULL },
+ /* Function which are not having the prologue and epilogue generated
+ by the compiler. */
+ { "naked", 0, 0, true, false, false, false, arc_handle_fndecl_attribute,
+ NULL },
+ /* Functions calls made using jli instruction. The pointer in JLI
+ table is found latter. */
+ { "jli_always", 0, 0, false, true, true, false, NULL, NULL },
+ /* Functions calls made using jli instruction. The pointer in JLI
+ table is given as input parameter. */
+ { "jli_fixed", 1, 1, false, true, true, false, arc_handle_jli_attribute,
+ NULL },
+ /* Call a function using secure-mode. */
+ { "secure_call", 1, 1, false, true, true, false, arc_handle_secure_attribute,
+ NULL },
+ /* Bypass caches using .di flag. */
+ { "uncached", 0, 0, false, true, false, false, arc_handle_uncached_attribute,
+ NULL },
+ { "aux", 0, 1, true, false, false, false, arc_handle_aux_attribute, NULL }
+});
+
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
#undef TARGET_ASM_ALIGNED_SI_OP
diff --git a/gcc/config/arc/arc.md b/gcc/config/arc/arc.md
index 4ae3a67..bf9f88e 100644
--- a/gcc/config/arc/arc.md
+++ b/gcc/config/arc/arc.md
@@ -669,26 +669,26 @@ archs4x, archs4xd"
|| (satisfies_constraint_Cm3 (operands[1])
&& memory_operand (operands[0], QImode))"
"@
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- ldb%? %0,%1
- stb%? %1,%0
- ldb%? %0,%1
- xldb%U1 %0,%1
- ldb%U1%V1 %0,%1
- xstb%U0 %1,%0
- stb%U0%V0 %1,%0
- stb%U0%V0 %1,%0
- stb%U0%V0 %1,%0
- stb%U0%V0 %1,%0"
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ ldb%?\\t%0,%1
+ stb%?\\t%1,%0
+ ldb%?\\t%0,%1
+ xldb%U1\\t%0,%1
+ ldb%U1%V1\\t%0,%1
+ xstb%U0\\t%1,%0
+ stb%U0%V0\\t%1,%0
+ stb%U0%V0\\t%1,%0
+ stb%U0%V0\\t%1,%0
+ stb%U0%V0\\t%1,%0"
[(set_attr "type" "move,move,move,move,move,move,move,move,move,move,load,store,load,load,load,store,store,store,store,store")
(set_attr "iscompact" "maybe,maybe,maybe,true,true,false,false,false,maybe_limm,false,true,true,true,false,false,false,false,false,false,false")
(set_attr "predicable" "yes,no,yes,no,no,yes,no,yes,yes,yes,no,no,no,no,no,no,no,no,no,no")
@@ -713,26 +713,26 @@ archs4x, archs4xd"
|| (satisfies_constraint_Cm3 (operands[1])
&& memory_operand (operands[0], HImode))"
"@
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- mov%? %0,%1
- ld%_%? %0,%1
- st%_%? %1,%0
- xld%_%U1 %0,%1
- ld%_%U1%V1 %0,%1
- xst%_%U0 %1,%0
- st%_%U0%V0 %1,%0
- st%_%U0%V0 %1,%0
- st%_%U0%V0 %1,%0
- st%_%U0%V0 %1,%0"
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ mov%?\\t%0,%1
+ ld%_%?\\t%0,%1
+ st%_%?\\t%1,%0
+ xld%_%U1\\t%0,%1
+ ld%_%U1%V1\\t%0,%1
+ xst%_%U0\\t%1,%0
+ st%_%U0%V0\\t%1,%0
+ st%_%U0%V0\\t%1,%0
+ st%_%U0%V0\\t%1,%0
+ st%_%U0%V0\\t%1,%0"
[(set_attr "type" "move,move,move,move,move,move,move,move,move,move,move,load,store,load,load,store,store,store,store,store")
(set_attr "iscompact" "maybe,maybe,maybe,true,true,false,false,false,maybe_limm,maybe_limm,false,true,true,false,false,false,false,false,false,false")
(set_attr "predicable" "yes,no,yes,no,no,yes,no,yes,yes,yes,yes,no,no,no,no,no,no,no,no,no")
@@ -818,7 +818,7 @@ archs4x, archs4xd"
(plus:SI (reg:SI SP_REG)
(match_operand 1 "immediate_operand" "Cal")))))]
"reload_completed"
- "ld.a %0,[sp,%1]"
+ "ld.a\\t%0,[sp,%1]"
[(set_attr "type" "load")
(set_attr "length" "8")])
@@ -830,7 +830,7 @@ archs4x, archs4xd"
(unspec:SI [(match_operand:SI 1 "register_operand" "c")]
UNSPEC_ARC_DIRECT))]
""
- "st%U0 %1,%0\;st%U0.di %1,%0"
+ "st%U0\\t%1,%0\;st%U0.di\\t%1,%0"
[(set_attr "type" "store")])
;; Combiner patterns for compare with zero
@@ -944,7 +944,7 @@ archs4x, archs4xd"
(set (match_operand:SI 0 "register_operand" "=w")
(match_dup 3))]
""
- "%O3.f %0,%1"
+ "%O3.f\\t%0,%1"
[(set_attr "type" "compare")
(set_attr "cond" "set_zn")
(set_attr "length" "4")])
@@ -987,15 +987,15 @@ archs4x, archs4xd"
switch (which_alternative)
{
case 0: case 2: case 3: case 7:
- return \"tst%? %1,%2\";
+ return \"tst%?\\t%1,%2\";
case 1:
- return \"btst%? %1,%z2\";
+ return \"btst%?\\t%1,%z2\";
case 4:
- return \"bmsk%?.f 0,%1,%Z2\";
+ return \"bmsk%?.f\\t0,%1,%Z2\";
case 5:
- return \"bclr%?.f 0,%1,%M2\";
+ return \"bclr%?.f\\t0,%1,%M2\";
case 6:
- return \"asr.f 0,%1,%p2\";
+ return \"asr.f\\t0,%1,%p2\";
default:
gcc_unreachable ();
}
@@ -1026,7 +1026,7 @@ archs4x, archs4xd"
&& (INTVAL (operands[3]) + INTVAL (operands[2]) <= 11
|| (INTVAL (operands[3]) <= 11
&& INTVAL (operands[3]) + INTVAL (operands[2]) == 32))"
- "tst %1,((1<<%2)-1)<<%3"
+ "tst\\t%1,((1<<%2)-1)<<%3"
[(set_attr "type" "compare")
(set_attr "cond" "set_zn")
(set_attr "length" "4")])
@@ -1058,11 +1058,11 @@ archs4x, archs4xd"
(clobber (match_scratch:SI 4 "=X,X,X,Rrq,X"))]
""
"@
- btst%? %1,%3
- btst %1,%3
- bmsk.f 0,%1,%2-1
- movb.f.cl %4,%1,%3,%3,%2
- and.f 0,%1,((1<<%2)-1)<<%3"
+ btst%?\\t%1,%3
+ btst\\t%1,%3
+ bmsk.f\\t0,%1,%2-1
+ movb.f.cl\\t%4,%1,%3,%3,%2
+ and.f\\t0,%1,((1<<%2)-1)<<%3"
[(set_attr "iscompact" "maybe,false,false,false,false")
(set_attr "type" "compare,compare,compare,shift,compare")
(set_attr "cond" "set_zn")
@@ -1146,7 +1146,7 @@ archs4x, archs4xd"
(set (match_operand:SI 0 "register_operand" "=w,w,w")
(match_dup 4))]
""
- "%O4.f %0,%1,%2 ; non-mult commutative"
+ "%O4.f\\t%0,%1,%2 ; non-mult commutative"
[(set_attr "type" "compare,compare,compare")
(set_attr "cond" "set_zn,set_zn,set_zn")
(set_attr "length" "4,4,8")])
@@ -1164,7 +1164,7 @@ archs4x, archs4xd"
(set (match_operand:SI 0 "register_operand" "=W,W,W")
(match_dup 4))]
"!TARGET_ARC600_FAMILY"
- "%O4.f %0,%1,%2 ; mult commutative"
+ "%O4.f\\t%0,%1,%2 ; mult commutative"
[(set_attr "type" "compare,compare,compare")
(set_attr "cond" "set_zn,set_zn,set_zn")
(set_attr "length" "4,4,8")])
@@ -1248,7 +1248,7 @@ archs4x, archs4xd"
(set (match_operand:SI 0 "register_operand" "=w,w,w")
(and:SI (match_dup 1) (not:SI (match_dup 2))))]
""
- "bic.f %0,%1,%2"
+ "bic.f\\t%0,%1,%2"
[(set_attr "type" "compare,compare,compare")
(set_attr "cond" "set_zn,set_zn,set_zn")
(set_attr "length" "4,4,8")])
@@ -1640,13 +1640,13 @@ archs4x, archs4xd"
{
if (rtx_equal_p (operands[1], const0_rtx) && GET_CODE (operands[3]) == NE
&& IN_RANGE (REGNO (operands[0]) ^ 4, 4, 11))
- return "sub%?.ne %0,%0,%0";
+ return "sub%?.ne\\t%0,%0,%0";
/* ??? might be good for speed on ARC600 too, *if* properly scheduled. */
if ((optimize_size && (!TARGET_ARC600_FAMILY))
&& rtx_equal_p (operands[1], constm1_rtx)
&& GET_CODE (operands[3]) == LTU)
- return "sbc.cs %0,%0,%0";
- return "mov.%d3 %0,%1";
+ return "sbc.cs\\t%0,%0,%0";
+ return "mov.%d3\\t%0,%1";
}
[(set_attr "type" "cmove,cmove")
(set_attr "length" "4,8")])
@@ -1734,11 +1734,11 @@ archs4x, archs4xd"
the first register operand 0 is the same as the second register of
operand 1, we must copy in the opposite order. */
if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
- return \"mov.%d3 %R0,%R1\;mov.%d3 %0,%1\";
+ return \"mov.%d3\\t%R0,%R1\;mov.%d3\\t%0,%1\";
else
- return \"mov.%d3 %0,%1\;mov.%d3 %R0,%R1\";
+ return \"mov.%d3\\t%0,%1\;mov.%d3\\t%R0,%R1\";
case 1 :
- return \"mov.%d3 %L0,%L1\;mov.%d3 %H0,%H1\";
+ return \"mov.%d3\\t%L0,%L1\;mov.%d3\\t%H0,%H1\";
}
@@ -1755,8 +1755,8 @@ archs4x, archs4xd"
(match_operand:SF 2 "register_operand" "0,0")))]
""
"@
- mov.%d3 %0,%1
- mov.%d3 %0,%1 ; %A1"
+ mov.%d3\\t%0,%1
+ mov.%d3\\t%0,%1 ; %A1"
[(set_attr "type" "cmove,cmove")])
(define_insn "*movdfcc_insn"
@@ -1776,12 +1776,11 @@ archs4x, archs4xd"
the first register operand 0 is the same as the second register of
operand 1, we must copy in the opposite order. */
if (REGNO (operands[0]) == REGNO (operands[2]) + 1)
- return \"mov.%d1 %R0,%R2\;mov.%d1 %0,%2\";
+ return \"mov.%d1\\t%R0,%R2\;mov.%d1\\t%0,%2\";
else
- return \"mov.%d1 %0,%2\;mov.%d1 %R0,%R2\";
+ return \"mov.%d1\\t%0,%2\;mov.%d1\\t%R0,%R2\";
case 1 :
- return \"mov.%d1 %L0,%L2\;mov.%d1 %H0,%H2; %A2 \";
-
+ return \"mov.%d1\\t%L0,%L2\;mov.%d1\\t%H0,%H2; %A2\";
}
}"
[(set_attr "type" "cmove,cmove")
@@ -1944,7 +1943,7 @@ archs4x, archs4xd"
[(set (match_operand:SI 0 "dest_reg_operand" "=q,w,w")
(abs:SI (match_operand:SI 1 "nonmemory_operand" "q,cL,Cal")))]
""
- "abs%? %0,%1"
+ "abs%?\\t%0,%1"
[(set_attr "type" "two_cycle_core")
(set_attr "length" "*,4,8")
(set_attr "iscompact" "true,false,false")])
@@ -2031,7 +2030,7 @@ archs4x, archs4xd"
(mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "0,r,0, 0, r"))
(match_operand:HI 2 "short_const_int_operand" "L,L,I,C16,C16")))]
"TARGET_MPYW"
- "mpyw%? %0,%1,%2"
+ "mpyw%?\\t%0,%1,%2"
[(set_attr "length" "4,4,4,8,8")
(set_attr "iscompact" "false")
(set_attr "type" "mul16_em")
@@ -2044,7 +2043,7 @@ archs4x, archs4xd"
(mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "0,0,r"))
(sign_extend:SI (match_operand:HI 2 "nonmemory_operand" "q,r,r"))))]
"TARGET_MPYW"
- "mpyw%? %0,%1,%2"
+ "mpyw%?\\t%0,%1,%2"
[(set_attr "length" "*,4,4")
(set_attr "iscompact" "maybe,false,false")
(set_attr "type" "mul16_em")
@@ -2071,7 +2070,7 @@ archs4x, archs4xd"
(mult:SI (zero_extend:SI (match_operand:HI 1 "register_operand" "%0, r, 0, 0, r"))
(match_operand:HI 2 "short_unsigned_const_operand" " L, L,J12,J16,J16")))]
"TARGET_MPYW"
- "mpyuw%? %0,%1,%2"
+ "mpyuw%?\\t%0,%1,%2"
[(set_attr "length" "4,4,4,8,8")
(set_attr "iscompact" "false")
(set_attr "type" "mul16_em")
@@ -2084,7 +2083,7 @@ archs4x, archs4xd"
(mult:SI (zero_extend:SI (match_operand:HI 1 "register_operand" "%0, 0, r"))
(zero_extend:SI (match_operand:HI 2 "register_operand" "q, r, r"))))]
"TARGET_MPYW"
- "mpyuw%? %0,%1,%2"
+ "mpyuw%?\\t%0,%1,%2"
[(set_attr "length" "*,4,4")
(set_attr "iscompact" "maybe,false,false")
(set_attr "type" "mul16_em")
@@ -2155,7 +2154,7 @@ archs4x, archs4xd"
(const_int 0))))
(clobber (match_operand:SI 3 "acc1_operand" ""))]
"TARGET_MULMAC_32BY16_SET"
- "mululw 0, %0, %1"
+ "mululw\\t0,%0,%1"
[(set_attr "length" "4,4,8")
(set_attr "type" "mulmac_600")
(set_attr "predicable" "no")
@@ -2173,7 +2172,7 @@ archs4x, archs4xd"
(match_dup 2)))
(clobber (match_operand:SI 3 "acc1_operand" ""))]
"TARGET_MULMAC_32BY16_SET"
- "machlw%? 0, %0, %1"
+ "machlw%?\\t0,%0,%1"
[(set_attr "length" "4,4,8")
(set_attr "type" "mulmac_600, mulmac_600, mulmac_600")
(set_attr "predicable" "no, no, yes")
@@ -2242,7 +2241,7 @@ archs4x, archs4xd"
(sign_extend:DI (match_operand:SI 0 "register_operand" "%q, c,c, c"))
(sign_extend:DI (match_operand:SI 1 "nonmemory_operand" "q,cL,L,C32"))))]
"TARGET_MUL64_SET"
- "mul64%? \t0, %0, %1"
+ "mul64%?\\t0,%0,%1"
[(set_attr "length" "*,4,4,8")
(set_attr "iscompact" "maybe,false,false,false")
(set_attr "type" "multi,multi,multi,multi")
@@ -2277,7 +2276,7 @@ archs4x, archs4xd"
(zero_extend:DI (match_operand:SI 0 "register_operand" "%c,c,c"))
(zero_extend:DI (match_operand:SI 1 "nonmemory_operand" "cL,L,C32"))))]
"TARGET_MUL64_SET"
- "mulu64%? \t0, %0, %1"
+ "mulu64%?\\t0,%0,%1"
[(set_attr "length" "4,4,8")
(set_attr "iscompact" "false")
(set_attr "type" "umulti")
@@ -2401,7 +2400,7 @@ archs4x, archs4xd"
(const_int 0))))
]
"TARGET_MULMAC_32BY16_SET"
- "mullw%? 0, %0, %1"
+ "mullw%?\\t0,%0,%1"
[(set_attr "length" "4,4,8")
(set_attr "type" "mulmac_600")
(set_attr "predicable" "no,no,yes")
@@ -2429,7 +2428,7 @@ archs4x, archs4xd"
(reg:DI MUL32x16_REG))
(const_int 32) (const_int 32)))]
"TARGET_MULMAC_32BY16_SET"
- "machlw%? %0, %1, %2"
+ "machlw%?\\t%0,%1,%2"
[(set_attr "length" "4,4,8")
(set_attr "type" "mulmac_600")
(set_attr "predicable" "no,no,yes")
@@ -2616,7 +2615,7 @@ archs4x, archs4xd"
(const_int 0))))
]
"TARGET_MULMAC_32BY16_SET"
- "mululw 0, %0, %1"
+ "mululw\\t0,%0,%1"
[(set_attr "length" "4,4,8")
(set_attr "type" "mulmac_600")
(set_attr "predicable" "no")
@@ -2643,7 +2642,7 @@ archs4x, archs4xd"
(reg:DI MUL32x16_REG))
(const_int 32) (const_int 32)))]
"TARGET_MULMAC_32BY16_SET"
- "machulw%? %0, %1, %2"
+ "machulw%?\\t%0,%1,%2"
[(set_attr "length" "4,4,8")
(set_attr "type" "mulmac_600")
(set_attr "predicable" "no,no,yes")
@@ -2849,7 +2848,7 @@ archs4x, archs4xd"
; (plus:SI (ltu:SI (reg:CC_C CC_REG) (const_int 0))
; (match_operand:SI 1 "register_operand" "c")))]
; ""
-; "adc %0,%1,0"
+; "adc\\t%0,%1,0"
; [(set_attr "cond" "use")
; (set_attr "type" "cc_arith")
; (set_attr "length" "4")])
@@ -2955,7 +2954,7 @@ archs4x, archs4xd"
(ltu:SI (match_operand:CC_C 2 "cc_use_register")
(const_int 0))))]
""
- "sbc %0,%1,0"
+ "sbc\\t%0,%1,0"
[(set_attr "cond" "use")
(set_attr "type" "cc_arith")
(set_attr "length" "4")])
@@ -3202,38 +3201,38 @@ archs4x, archs4xd"
switch (which_alternative)
{
case 0: case 5: case 10: case 11: case 16: case 17: case 18:
- return "and%? %0,%1,%2";
+ return "and%?\\t%0,%1,%2";
case 1: case 6:
- return "and%? %0,%2,%1";
+ return "and%?\\t%0,%2,%1";
case 2:
- return "bmsk%? %0,%1,%Z2";
+ return "bmsk%?\\t%0,%1,%Z2";
case 7: case 12:
if (satisfies_constraint_C2p (operands[2]))
{
operands[2] = GEN_INT ((~INTVAL (operands[2])));
- return "bmskn%? %0,%1,%Z2";
+ return "bmskn%?\\t%0,%1,%Z2";
}
else
{
- return "bmsk%? %0,%1,%Z2";
+ return "bmsk%?\\t%0,%1,%Z2";
}
case 3: case 8: case 13:
- return "bclr%? %0,%1,%M2";
+ return "bclr%?\\t%0,%1,%M2";
case 4:
return (INTVAL (operands[2]) == 0xff
- ? "extb%? %0,%1" : "ext%_%? %0,%1");
- case 9: case 14: return \"bic%? %0,%1,%n2-1\";
+ ? "extb%?\\t%0,%1" : "ext%_%?\\t%0,%1");
+ case 9: case 14: return \"bic%?\\t%0,%1,%n2-1\";
case 15:
- return "movb.cl %0,%1,%p2,%p2,%x2";
+ return "movb.cl\\t%0,%1,%p2,%p2,%x2";
case 19:
const char *tmpl;
if (satisfies_constraint_Ucm (operands[1]))
tmpl = (INTVAL (operands[2]) == 0xff
- ? "xldb%U1 %0,%1" : "xld%_%U1 %0,%1");
+ ? "xldb%U1\\t%0,%1" : "xld%_%U1\\t%0,%1");
else
- tmpl = INTVAL (operands[2]) == 0xff ? "ldb %0,%1" : "ld%_ %0,%1";
+ tmpl = INTVAL (operands[2]) == 0xff ? "ldb\\t%0,%1" : "ld%_\\t%0,%1";
if (TARGET_BIG_ENDIAN)
{
@@ -3279,7 +3278,7 @@ archs4x, archs4xd"
(match_operand:SI 2 "nonmemory_operand" "0,0,0,0,r,r,Cal")))]
""
"@
- bic%?\\t%0, %2, %1 ;;constraint 0
+ bic%?\\t%0,%2,%1 ;;constraint 0
bic%?\\t%0,%2,%1 ;;constraint 1
bic\\t%0,%2,%1 ;;constraint 2, FIXME: will it ever get generated ???
bic%?\\t%0,%2,%1 ;;constraint 3, FIXME: will it ever get generated ???
@@ -3362,7 +3361,7 @@ archs4x, archs4xd"
[(set (match_operand:SI 0 "dest_reg_operand" "=q,w")
(not:SI (match_operand:SI 1 "register_operand" "q,c")))]
""
- "not%? %0,%1"
+ "not%?\\t%0,%1"
[(set_attr "type" "unary,unary")
(set_attr "iscompact" "true,false")])
@@ -3783,7 +3782,7 @@ archs4x, archs4xd"
(compare:CC (match_operand:SI 0 "register_operand" "q, q, h, c, c, q,c")
(match_operand:SI 1 "nonmemory_operand" "cO,hO,Cm1,cI,cL,Cal,Cal")))]
""
- "cmp%? %0,%B1"
+ "cmp%?\\t%0,%B1"
[(set_attr "type" "compare")
(set_attr "iscompact" "true,true,true,false,false,true_limm,false")
(set_attr "predicable" "no,no,no,no,yes,no,yes")
@@ -3796,7 +3795,7 @@ archs4x, archs4xd"
(compare:CC_ZN (match_operand:SI 0 "register_operand" "q,c")
(const_int 0)))]
""
- "tst%? %0,%0"
+ "tst%?\\t%0,%0"
[(set_attr "type" "compare,compare")
(set_attr "iscompact" "true,false")
(set_attr "predicable" "no,yes")
@@ -3812,7 +3811,7 @@ archs4x, archs4xd"
(match_operand:SI 1 "nonmemory_operand" "L,Lc"))
(const_int 0)))]
""
- "btst%? %0,%1"
+ "btst%?\\t%0,%1"
[(set_attr "iscompact" "true,false")
(set_attr "predicable" "no,yes")
(set_attr "cond" "set")
@@ -3825,8 +3824,8 @@ archs4x, archs4xd"
(match_operand:SI 1 "p2_immediate_operand" "O,n")))]
""
"@
- cmp%? %0,%1
- bxor.f 0,%0,%z1"
+ cmp%?\\t%0,%1
+ bxor.f\\t0,%0,%z1"
[(set_attr "type" "compare,compare")
(set_attr "iscompact" "true,false")
(set_attr "cond" "set,set_zn")
@@ -3837,7 +3836,7 @@ archs4x, archs4xd"
(compare:CC_C (match_operand:SI 0 "register_operand" "q, q, h, c, q, c")
(match_operand:SI 1 "nonmemory_operand" "cO,hO,Cm1,cI,Cal,Cal")))]
""
- "cmp%? %0,%1"
+ "cmp%?\\t%0,%1"
[(set_attr "type" "compare")
(set_attr "iscompact" "true,true,true,false,true_limm,false")
(set_attr "cond" "set")
@@ -3947,7 +3946,7 @@ archs4x, archs4xd"
(set (match_operand:SI 0 "dest_reg_operand" "=w,w")
(match_operand:SI 1 "nonmemory_operand" "LRac,?Cal")))]
""
- "mov.%d3 %0,%1"
+ "mov.%d3\\t%0,%1"
[(set_attr "type" "cmove")
(set_attr "length" "4,8")])
@@ -3983,9 +3982,9 @@ archs4x, archs4xd"
(match_operand:SI 2 "nonmemory_operand" "cL,0,0"))))]
""
"@
- sub.%d4 %0,%1,%2
- rsub.%d4 %0,%2,%1
- rsub.%d4 %0,%2,%1"
+ sub.%d4\\t%0,%1,%2
+ rsub.%d4\\t%0,%2,%1
+ rsub.%d4\\t%0,%2,%1"
[(set_attr "cond" "use")
(set_attr "type" "cmove")
(set_attr "length" "4,4,8")])
@@ -3999,7 +3998,7 @@ archs4x, archs4xd"
[(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "cL,Cal")])))]
""
- "%O3.%d5 %0,%1,%2"
+ "%O3.%d5\\t%0,%1,%2"
[(set_attr "cond" "use")
(set_attr "type" "cmove")
(set_attr "length" "4,8")])
@@ -4138,11 +4137,11 @@ archs4x, archs4xd"
[(set (pc) (match_operand:SI 0 "nonmemory_operand" "L,I,Cal,q,r"))]
""
"@
- j%!%* %0
- j%!%* %0
- j%!%* %0
- j%!%* [%0]
- j%!%* [%0]"
+ j%!%*\\t%0
+ j%!%*\\t%0
+ j%!%*\\t%0
+ j%!%*\\t[%0]
+ j%!%*\\t[%0]"
[(set_attr "type" "jump")
(set_attr "iscompact" "false,false,false,maybe,false")
(set_attr "cond" "canuse,canuse_limm,canuse,canuse,canuse")])
@@ -4306,15 +4305,15 @@ archs4x, archs4xd"
(clobber (reg:SI 31))]
""
"@
- jl%!%* [%0]
- jl%!%* [%0]
- jli_s %J0
- sjli %J0
- bl%!%* %P0
- bl%!%* %P0
- jl%!%* %0
- jl%* %0
- jl%! %0"
+ jl%!%*\\t[%0]
+ jl%!%*\\t[%0]
+ jli_s\\t%J0
+ sjli\\t%J0
+ bl%!%*\\t%P0
+ bl%!%*\\t%P0
+ jl%!%*\\t%0
+ jl%*\\t%0
+ jl%!\\t%0"
[(set_attr "type" "call,call,call_no_delay_slot,call_no_delay_slot,call,call,call,call,call_no_delay_slot")
(set_attr "iscompact" "maybe,*,true,*,*,*,*,*,*")
(set_attr "predicable" "no,yes,no,no,yes,no,yes,no,yes")
@@ -4350,15 +4349,15 @@ archs4x, archs4xd"
(clobber (reg:SI 31))]
""
"@
- jl%!%* [%1]
- jl%!%* [%1]
- jli_s %J1
- sjli %J1
- bl%!%* %P1;1
- bl%!%* %P1;1
- jl%!%* %1
- jl%* %1
- jl%! %1"
+ jl%!%*\\t[%1]
+ jl%!%*\\t[%1]
+ jli_s\\t%J1
+ sjli\\t%J1
+ bl%!%*\\t%P1;1
+ bl%!%*\\t%P1;1
+ jl%!%*\\t%1
+ jl%*\\t%1
+ jl%!\\t%1"
[(set_attr "type" "call,call,call_no_delay_slot,call_no_delay_slot,call,call,call,call,call_no_delay_slot")
(set_attr "iscompact" "maybe,*,true,false,*,*,*,*,*")
(set_attr "predicable" "no,yes,no,no,yes,no,yes,no,yes")
@@ -4681,9 +4680,9 @@ archs4x, archs4xd"
UNSPEC_ARC_DIVAW))]
"TARGET_ARC700 || TARGET_EA_SET"
"@
- divaw \t%0, %1, %2
- divaw \t%0, %1, %2
- divaw \t%0, %1, %2"
+ divaw\\t%0,%1,%2
+ divaw\\t%0,%1,%2
+ divaw\\t%0,%1,%2"
[(set_attr "length" "4,8,8")
(set_attr "type" "divaw,divaw,divaw")])
@@ -4692,9 +4691,9 @@ archs4x, archs4xd"
VUNSPEC_ARC_FLAG)]
""
"@
- flag%? %0
- flag %0
- flag%? %0"
+ flag%?\\t%0
+ flag\\t%0
+ flag%?\\t%0"
[(set_attr "length" "4,4,8")
(set_attr "type" "misc,misc,misc")
(set_attr "predicable" "yes,no,yes")
@@ -4744,7 +4743,7 @@ archs4x, archs4xd"
[(unspec_volatile [(match_operand:SI 0 "nonmemory_operand" "Lr")]
VUNSPEC_ARC_SLEEP)]
""
- "sleep %0"
+ "sleep\\t%0"
[(set_attr "length" "4")
(set_attr "type" "misc")])
@@ -4768,8 +4767,8 @@ archs4x, archs4xd"
""
"*
if (check_if_valid_regno_const (operands, 1))
- return \"mov \tr%1, %0\";
- return \"mov \tr%1, %0\";
+ return \"mov\\tr%1,%0\";
+ return \"mov\\tr%1,%0\";
"
[(set_attr "length" "4")
(set_attr "type" "unary")])
@@ -4857,7 +4856,7 @@ archs4x, archs4xd"
{
if (which_alternative == 0)
{
- return \"trap_s %0\";
+ return \"trap_s\\t%0\";
}
/* Keep this message in sync with the one in arc.cc:arc_expand_builtin,
@@ -5062,7 +5061,7 @@ archs4x, archs4xd"
return \"br%d0%*\\t%1,%B2,%l3\";
/* FALLTHRU */
case 6: case 10:
- case 12:return \"cmp%? %1, %B2\\n\\tb%d0%*\\t%l3 ;br%d0 out of range\";
+ case 12:return \"cmp%?\\t%1,%B2\\n\\tb%d0%*\\t%l3 ;br%d0 out of range\";
default: fprintf (stderr, \"unexpected length %d\\n\", get_attr_length (insn)); fflush (stderr); gcc_unreachable ();
}
"
@@ -5124,9 +5123,9 @@ archs4x, archs4xd"
switch (get_attr_length (insn))
{
case 4: return (GET_CODE (operands[3]) == EQ
- ? \"bbit0%* %1,%2,%0\" : \"bbit1%* %1,%2,%0\");
+ ? \"bbit0%*\\t%1,%2,%0\" : \"bbit1%*\\t%1,%2,%0\");
case 6:
- case 8: return \"btst%? %1,%2\n\tb%d3%* %0; bbit out of range\";
+ case 8: return \"btst%?\\t%1,%2\n\tb%d3%*\\t%0; bbit out of range\";
default: gcc_unreachable ();
}
}
@@ -5423,7 +5422,7 @@ archs4x, archs4xd"
[(set (match_operand:SI 0 "register_operand" "= r,r")
(bswap:SI (match_operand:SI 1 "nonmemory_operand" "rL,Cal")))]
"TARGET_V2 && TARGET_SWAP"
- "swape %0, %1"
+ "swape\\t%0,%1"
[(set_attr "length" "4,8")
(set_attr "type" "two_cycle_core")])
@@ -5456,9 +5455,9 @@ archs4x, archs4xd"
"TARGET_HS"
{
if (INTVAL (operands[2]))
- return "prefetchw [%0, %1]";
+ return "prefetchw\\t[%0, %1]";
else
- return "prefetch [%0, %1]";
+ return "prefetch\\t[%0, %1]";
}
[(set_attr "type" "load")
(set_attr "length" "4,4,8")])
@@ -5471,9 +5470,9 @@ archs4x, archs4xd"
{
operands[0] = gen_rtx_MEM (SImode, operands[0]);
if (INTVAL (operands[1]))
- return "prefetchw%U0 %0";
+ return "prefetchw%U0\\t%0";
else
- return "prefetch%U0 %0";
+ return "prefetch%U0\\t%0";
}
[(set_attr "type" "load")
(set_attr "length" "8")])
@@ -5483,7 +5482,7 @@ archs4x, archs4xd"
(div:SI (match_operand:SI 1 "nonmemory_operand" "0,r,Cal,0,r,0, 0, r")
(match_operand:SI 2 "nonmemory_operand" "r,r, r,L,L,I,Cal,Cal")))]
"TARGET_DIVREM"
- "div%? %0, %1, %2"
+ "div%?\\t%0,%1,%2"
[(set_attr "length" "4,4,8,4,4,4,8,8")
(set_attr "iscompact" "false")
(set_attr "type" "div_rem")
@@ -5496,7 +5495,7 @@ archs4x, archs4xd"
(udiv:SI (match_operand:SI 1 "nonmemory_operand" "0,r,Cal,0,r,0, 0, r")
(match_operand:SI 2 "nonmemory_operand" "r,r, r,L,L,I,Cal,Cal")))]
"TARGET_DIVREM"
- "divu%? %0, %1, %2"
+ "divu%?\\t%0,%1,%2"
[(set_attr "length" "4,4,8,4,4,4,8,8")
(set_attr "iscompact" "false")
(set_attr "type" "div_rem")
@@ -5509,7 +5508,7 @@ archs4x, archs4xd"
(mod:SI (match_operand:SI 1 "nonmemory_operand" "0,r,Cal,0,r,0, 0, r")
(match_operand:SI 2 "nonmemory_operand" "r,r, r,L,L,I,Cal,Cal")))]
"TARGET_DIVREM"
- "rem%? %0, %1, %2"
+ "rem%?\\t%0,%1,%2"
[(set_attr "length" "4,4,8,4,4,4,8,8")
(set_attr "iscompact" "false")
(set_attr "type" "div_rem")
@@ -5522,7 +5521,7 @@ archs4x, archs4xd"
(umod:SI (match_operand:SI 1 "nonmemory_operand" "0,r,Cal,0,r,0, 0, r")
(match_operand:SI 2 "nonmemory_operand" "r,r, r,L,L,I,Cal,Cal")))]
"TARGET_DIVREM"
- "remu%? %0, %1, %2"
+ "remu%?\\t%0,%1,%2"
[(set_attr "length" "4,4,8,4,4,4,8,8")
(set_attr "iscompact" "false")
(set_attr "type" "div_rem")
@@ -5538,7 +5537,7 @@ archs4x, archs4xd"
(arcCC_cond:SI (match_operand:SI 1 "register_operand" "0,r,0,r,0,0,r")
(match_operand:SI 2 "nonmemory_operand" "r,r,L,L,I,n,n")))]
"TARGET_V2 && TARGET_CODE_DENSITY"
- "set<code>%? %0, %1, %2"
+ "set<code>%?\\t%0,%1,%2"
[(set_attr "length" "4,4,4,4,4,8,8")
(set_attr "iscompact" "false")
(set_attr "type" "compare")
@@ -5551,7 +5550,7 @@ archs4x, archs4xd"
(ltu:SI (match_operand:SI 1 "register_operand" "0,r,0,r,0, 0, r")
(match_operand:SI 2 "nonmemory_operand" "r,r,L,L,I, n, n")))]
"TARGET_V2 && TARGET_CODE_DENSITY"
- "setlo%? %0, %1, %2"
+ "setlo%?\\t%0,%1,%2"
[(set_attr "length" "4,4,4,4,4,8,8")
(set_attr "iscompact" "false")
(set_attr "type" "compare")
@@ -5564,7 +5563,7 @@ archs4x, archs4xd"
(geu:SI (match_operand:SI 1 "register_operand" "0,r,0,r,0, 0, r")
(match_operand:SI 2 "nonmemory_operand" "r,r,L,L,I, n, n")))]
"TARGET_V2 && TARGET_CODE_DENSITY"
- "seths%? %0, %1, %2"
+ "seths%?\\t%0,%1,%2"
[(set_attr "length" "4,4,4,4,4,8,8")
(set_attr "iscompact" "false")
(set_attr "type" "compare")
@@ -5578,7 +5577,7 @@ archs4x, archs4xd"
(gtu:SI (match_operand:SI 1 "register_operand" "r,r, r,r")
(match_operand:SI 2 "nonmemory_operand" "0,r,C62,n")))]
"TARGET_V2 && TARGET_CODE_DENSITY"
- "setlo%? %0, %2, %1"
+ "setlo%?\\t%0,%2,%1"
"reload_completed
&& CONST_INT_P (operands[2])
&& satisfies_constraint_C62 (operands[2])"
@@ -5601,7 +5600,7 @@ archs4x, archs4xd"
(leu:SI (match_operand:SI 1 "register_operand" "r,r, r,r")
(match_operand:SI 2 "nonmemory_operand" "0,r,C62,n")))]
"TARGET_V2 && TARGET_CODE_DENSITY"
- "seths%? %0, %2, %1"
+ "seths%?\\t%0,%2,%1"
"reload_completed
&& CONST_INT_P (operands[2])
&& satisfies_constraint_C62 (operands[2])"
@@ -5664,9 +5663,9 @@ archs4x, archs4xd"
VUNSPEC_ARC_KFLAG)]
"TARGET_V2"
"@
- kflag%? %0
- kflag %0
- kflag%? %0"
+ kflag%?\\t%0
+ kflag\\t%0
+ kflag%?\\t%0"
[(set_attr "length" "4,4,8")
(set_attr "type" "misc,misc,misc")
(set_attr "predicable" "yes,no,yes")
@@ -5677,7 +5676,7 @@ archs4x, archs4xd"
(unspec_volatile:SI [(match_operand:SI 1 "immediate_operand" "N")]
VUNSPEC_ARC_CLRI))]
"TARGET_V2"
- "clri %0"
+ "clri\\t%0"
[(set_attr "length" "4")
(set_attr "type" "misc")])
@@ -5939,7 +5938,7 @@ archs4x, archs4xd"
(match_operand:SI 2 "const_int_operand" "n")
(match_operand:SI 3 "const_int_operand" "n")))]
"TARGET_NPS_BITOPS && INTVAL (operands[2]) + INTVAL (operands[3]) <= 32"
- "movb.cl %0,%1,0,%3,%2"
+ "movb.cl\\t%0,%1,0,%3,%2"
[(set_attr "type" "shift")
(set_attr "length" "4")])
@@ -5965,8 +5964,8 @@ archs4x, archs4xd"
&& (register_operand (operands[3], SImode)
|| satisfies_constraint_C18 (operands[1]))"
"@
- movbi %0,%0,%3,%2,%1
- movb %0,%0,%3,%2,0,%1"
+ movbi\\t%0,%0,%3,%2,%1
+ movb\\t%0,%0,%3,%2,0,%1"
[(set_attr "type" "shift")
(set_attr "length" "4")])
@@ -5978,7 +5977,7 @@ archs4x, archs4xd"
(match_dup 1)
(match_operand:SI 4 "const_int_operand" "n")))]
"TARGET_NPS_BITOPS"
- "movb %0,%0,%3,%2,%4,%1"
+ "movb\\t%0,%0,%3,%2,%4,%1"
[(set_attr "type" "shift")
(set_attr "length" "4")])
@@ -5990,7 +5989,7 @@ archs4x, archs4xd"
(match_dup 1)
(match_operand:SI 4 "const_int_operand" "n")))]
"TARGET_NPS_BITOPS"
- "movb %0,%0,%3,%2,%4,%1"
+ "movb\\t%0,%0,%3,%2,%4,%1"
[(set_attr "type" "shift")
(set_attr "length" "4")])
@@ -6002,7 +6001,7 @@ archs4x, archs4xd"
(match_operand:SI 4 "const_int_operand" "n")))]
"TARGET_NPS_BITOPS
&& INTVAL (operands[4]) + INTVAL (operands[1]) <= 32"
- "movb %0,%0,%3,%2,%4,%1"
+ "movb\\t%0,%0,%3,%2,%4,%1"
[(set_attr "type" "shift")
(set_attr "length" "4")])
@@ -6017,7 +6016,7 @@ archs4x, archs4xd"
(match_operand:SI 4 "const_int_operand" "n")))]
"TARGET_NPS_BITOPS
&& INTVAL (operands[4]) + INTVAL (operands[1]) <= 32"
- "movb %0,%0,%3,%2,%4,%1"
+ "movb\\t%0,%0,%3,%2,%4,%1"
[(set_attr "type" "shift")
(set_attr "length" "4")])
@@ -6048,7 +6047,7 @@ archs4x, archs4xd"
(match_operand:SI 7 "const_int_operand" "n")))]
"TARGET_NPS_BITOPS"
{
- output_asm_insn ("mrgb %0,%0,%6,%2,%3,%1,%5,%7,%4", operands);
+ output_asm_insn ("mrgb\\t%0,%0,%6,%2,%3,%1,%5,%7,%4", operands);
/* The ;%? updates the known unalignment. */
return arc_short_long (insn, ";%?", "nop_s");
}
@@ -6167,7 +6166,7 @@ archs4x, archs4xd"
(ashift:SI (match_operand:SI 1 "register_operand" "q,c")
(const_int 1)))]
""
- "asl%? %0,%1"
+ "asl%?\\t%0,%1"
[(set_attr "type" "unary")
(set_attr "iscompact" "maybe,false")
(set_attr "length" "*,4")
@@ -6189,7 +6188,7 @@ archs4x, archs4xd"
(lshiftrt:SI (match_operand:SI 1 "register_operand" "q,c")
(const_int 1)))]
""
- "lsr%? %0,%1"
+ "lsr%?\\t%0,%1"
[(set_attr "type" "unary")
(set_attr "iscompact" "maybe,false")
(set_attr "predicable" "no,no")])
@@ -6211,7 +6210,7 @@ archs4x, archs4xd"
(ashiftrt:SI (match_operand:SI 1 "register_operand" "q,c")
(const_int 1)))]
""
- "asr%? %0,%1"
+ "asr%?\\t%0,%1"
[(set_attr "type" "unary")
(set_attr "iscompact" "maybe,false")
(set_attr "predicable" "no,no")])
@@ -6373,7 +6372,7 @@ archs4x, archs4xd"
(sign_extend:DI (match_dup 2)))
(reg:DI ARCV2_ACC)))]
"TARGET_PLUS_MACD"
- "macd %0,%1,%2"
+ "macd\\t%0,%1,%2"
[(set_attr "length" "4,4,8")
(set_attr "type" "multi")
(set_attr "predicable" "yes,no,no")
@@ -6416,7 +6415,7 @@ archs4x, archs4xd"
(reg:DI ARCV2_ACC))))
(clobber (reg:DI ARCV2_ACC))]
"TARGET_PLUS_DMPY"
- "mac %0,%1,%2"
+ "mac\\t%0,%1,%2"
[(set_attr "length" "4,8")
(set_attr "type" "multi")
(set_attr "predicable" "no")
@@ -6475,7 +6474,7 @@ archs4x, archs4xd"
(zero_extend:DI (match_dup 2)))
(reg:DI ARCV2_ACC)))]
"TARGET_PLUS_MACD"
- "macdu %0,%1,%2"
+ "macdu\\t%0,%1,%2"
[(set_attr "length" "4,4,8")
(set_attr "type" "multi")
(set_attr "predicable" "yes,no,no")
@@ -6518,7 +6517,7 @@ archs4x, archs4xd"
(reg:DI ARCV2_ACC))))
(clobber (reg:DI ARCV2_ACC))]
"TARGET_PLUS_DMPY"
- "macu %0,%1,%2"
+ "macu\\t%0,%1,%2"
[(set_attr "length" "4,8")
(set_attr "type" "multi")
(set_attr "predicable" "no")
diff --git a/gcc/config/arm/aarch-common-protos.h b/gcc/config/arm/aarch-common-protos.h
index f8cb656..6e44d29 100644
--- a/gcc/config/arm/aarch-common-protos.h
+++ b/gcc/config/arm/aarch-common-protos.h
@@ -155,7 +155,7 @@ struct cpu_cost_table
rtx_insn *arm_md_asm_adjust (vec<rtx> &outputs, vec<rtx> & /*inputs*/,
vec<machine_mode> & /*input_modes*/,
- vec<const char *> &constraints,
+ vec<const char *> &constraints, vec<rtx> &,
vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs,
location_t loc);
diff --git a/gcc/config/arm/aarch-common.cc b/gcc/config/arm/aarch-common.cc
index 5b96ff4..d68b704 100644
--- a/gcc/config/arm/aarch-common.cc
+++ b/gcc/config/arm/aarch-common.cc
@@ -534,7 +534,8 @@ arm_mac_accumulator_is_mul_result (rtx producer, rtx consumer)
rtx_insn *
arm_md_asm_adjust (vec<rtx> &outputs, vec<rtx> & /*inputs*/,
vec<machine_mode> & /*input_modes*/,
- vec<const char *> &constraints, vec<rtx> & /*clobbers*/,
+ vec<const char *> &constraints,
+ vec<rtx> & /*uses*/, vec<rtx> & /*clobbers*/,
HARD_REG_SET & /*clobbered_regs*/, location_t loc)
{
bool saw_asm_flag = false;
diff --git a/gcc/config/arm/arm.cc b/gcc/config/arm/arm.cc
index 25a1ad7..6e3e2e8 100644
--- a/gcc/config/arm/arm.cc
+++ b/gcc/config/arm/arm.cc
@@ -328,11 +328,11 @@ static HOST_WIDE_INT arm_constant_alignment (const_tree, HOST_WIDE_INT);
static rtx_insn *thumb1_md_asm_adjust (vec<rtx> &, vec<rtx> &,
vec<machine_mode> &,
vec<const char *> &, vec<rtx> &,
- HARD_REG_SET &, location_t);
+ vec<rtx> &, HARD_REG_SET &, location_t);
static const char *arm_identify_fpu_from_isa (sbitmap);
/* Table of machine attributes. */
-static const struct attribute_spec arm_attribute_table[] =
+static const attribute_spec arm_gnu_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -380,8 +380,17 @@ static const struct attribute_spec arm_attribute_table[] =
arm_handle_cmse_nonsecure_entry, NULL },
{ "cmse_nonsecure_call", 0, 0, false, false, false, true,
arm_handle_cmse_nonsecure_call, NULL },
- { "Advanced SIMD type", 1, 1, false, true, false, true, NULL, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+ { "Advanced SIMD type", 1, 1, false, true, false, true, NULL, NULL }
+};
+
+static const scoped_attribute_specs arm_gnu_attribute_table =
+{
+ "gnu", { arm_gnu_attributes }
+};
+
+static const scoped_attribute_specs *const arm_attribute_table[] =
+{
+ &arm_gnu_attribute_table
};
/* Initialize the GCC target structure. */
@@ -34637,7 +34646,8 @@ arm_stack_protect_guard (void)
rtx_insn *
thumb1_md_asm_adjust (vec<rtx> &outputs, vec<rtx> & /*inputs*/,
vec<machine_mode> & /*input_modes*/,
- vec<const char *> &constraints, vec<rtx> & /*clobbers*/,
+ vec<const char *> &constraints,
+ vec<rtx> &, vec<rtx> & /*clobbers*/,
HARD_REG_SET & /*clobbered_regs*/, location_t /*loc*/)
{
for (unsigned i = 0, n = outputs.length (); i < n; ++i)
diff --git a/gcc/config/avr/avr.cc b/gcc/config/avr/avr.cc
index a297f4e..c5e9ccf 100644
--- a/gcc/config/avr/avr.cc
+++ b/gcc/config/avr/avr.cc
@@ -10442,7 +10442,7 @@ avr_eval_addr_attrib (rtx x)
/* AVR attributes. */
-static const struct attribute_spec avr_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (avr_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -10467,9 +10467,8 @@ static const struct attribute_spec avr_attribute_table[] =
{ "address", 1, 1, true, false, false, false,
avr_handle_addr_attribute, NULL },
{ "absdata", 0, 0, true, false, false, false,
- avr_handle_absdata_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ avr_handle_absdata_attribute, NULL }
+});
/* Return true if we support address space AS for the architecture in effect
@@ -15086,6 +15085,7 @@ static rtx_insn *
avr_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
vec<machine_mode> & /*input_modes*/,
vec<const char *> &/*constraints*/,
+ vec<rtx> &/*uses*/,
vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs,
location_t /*loc*/)
{
diff --git a/gcc/config/bfin/bfin.cc b/gcc/config/bfin/bfin.cc
index 5718bab..c02136f 100644
--- a/gcc/config/bfin/bfin.cc
+++ b/gcc/config/bfin/bfin.cc
@@ -4896,7 +4896,7 @@ bfin_handle_l2_attribute (tree *node, tree ARG_UNUSED (name),
}
/* Table of valid machine attributes. */
-static const struct attribute_spec bfin_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (bfin_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -4921,9 +4921,8 @@ static const struct attribute_spec bfin_attribute_table[] =
bfin_handle_l1_data_attribute, NULL },
{ "l1_data_B", 0, 0, true, false, false, false,
bfin_handle_l1_data_attribute, NULL },
- { "l2", 0, 0, true, false, false, false, bfin_handle_l2_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ { "l2", 0, 0, true, false, false, false, bfin_handle_l2_attribute, NULL }
+});
/* Implementation of TARGET_ASM_INTEGER. When using FD-PIC, we need to
tell the assembler to generate pointers to function descriptors in
diff --git a/gcc/config/bpf/bpf.cc b/gcc/config/bpf/bpf.cc
index 223a43c..f7a5c77 100644
--- a/gcc/config/bpf/bpf.cc
+++ b/gcc/config/bpf/bpf.cc
@@ -140,7 +140,7 @@ bpf_handle_preserve_access_index_attribute (tree *node, tree name,
/* Target-specific attributes. */
-static const struct attribute_spec bpf_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (bpf_attribute_table,
{
/* Syntax: { name, min_len, max_len, decl_required, type_required,
function_type_required, affects_type_identity, handler,
@@ -157,11 +157,8 @@ static const struct attribute_spec bpf_attribute_table[] =
/* Support for `naked' function attribute. */
{ "naked", 0, 1, false, false, false, false,
- bpf_handle_fndecl_attribute, NULL },
-
- /* The last attribute spec is set to be NULL. */
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ bpf_handle_fndecl_attribute, NULL }
+});
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE bpf_attribute_table
@@ -1100,6 +1097,61 @@ bpf_debug_unwind_info ()
#undef TARGET_ASM_ALIGNED_DI_OP
#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
+/* Implement target hook TARGET_ASM_NAMED_SECTION. */
+
+static void
+bpf_asm_named_section (const char *name, unsigned int flags,
+ tree decl)
+{
+ /* In BPF section names are used to encode the kind of BPF program
+ and other metadata, involving all sort of non alphanumeric
+ characters. This includes for example names like /foo//bar/baz.
+ This makes it necessary to quote section names to make sure the
+ assembler doesn't get confused. For example, the example above
+ would be interpreted unqouted as a section name "/foo" followed
+ by a line comment "//bar/baz".
+
+ Note that we only quote the section name if it contains any
+ character not in the set [0-9a-zA-Z_]. This is because
+ default_elf_asm_named_section generally expects unquoted names
+ and checks for particular names like
+ __patchable_function_entries. */
+
+ bool needs_quoting = false;
+
+ for (const char *p = name; *p != '\0'; ++p)
+ if (!(*p == '_'
+ || (*p >= '0' && *p <= '9')
+ || (*p >= 'a' && *p <= 'z')
+ || (*p >= 'A' && *p <= 'Z')))
+ needs_quoting = true;
+
+ if (needs_quoting)
+ {
+ char *quoted_name
+ = (char *) xcalloc (1, strlen (name) * 2 + 2);
+ char *q = quoted_name;
+
+ *(q++) = '"';
+ for (const char *p = name; *p != '\0'; ++p)
+ {
+ if (*p == '"' || *p == '\\')
+ *(q++) = '\\';
+ *(q++) = *p;
+ }
+ *(q++) = '"';
+ *(q++) = '\0';
+
+ default_elf_asm_named_section (quoted_name, flags, decl);
+ free (quoted_name);
+ }
+ else
+ default_elf_asm_named_section (name, flags, decl);
+}
+
+#undef TARGET_ASM_NAMED_SECTION
+#define TARGET_ASM_NAMED_SECTION bpf_asm_named_section
+
/* Implement target hook small_register_classes_for_mode_p. */
static bool
@@ -1117,6 +1169,22 @@ bpf_small_register_classes_for_mode_p (machine_mode mode)
#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
bpf_small_register_classes_for_mode_p
+static bool
+bpf_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
+ unsigned int align ATTRIBUTE_UNUSED,
+ enum by_pieces_operation op,
+ bool speed_p)
+{
+ if (op != COMPARE_BY_PIECES)
+ return default_use_by_pieces_infrastructure_p (size, align, op, speed_p);
+
+ return size <= COMPARE_MAX_PIECES;
+}
+
+#undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
+#define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
+ bpf_use_by_pieces_infrastructure_p
+
/* Finally, build the GCC target. */
struct gcc_target targetm = TARGET_INITIALIZER;
diff --git a/gcc/config/bpf/bpf.h b/gcc/config/bpf/bpf.h
index 82702aa..d175e99 100644
--- a/gcc/config/bpf/bpf.h
+++ b/gcc/config/bpf/bpf.h
@@ -393,7 +393,7 @@ enum reg_class
/*** The Overall Framework of an Assembler File. */
-#define ASM_COMMENT_START ";"
+#define ASM_COMMENT_START "#"
/* Output to assembler file text saying following lines
may contain character constants, extra white space, comments, etc. */
@@ -489,6 +489,11 @@ enum reg_class
locations. */
#define MOVE_MAX 8
+/* Allow upto 1024 bytes moves to occur using by_pieces
+ infrastructure. This mimics clang behaviour when using
+ __builtin_memcmp. */
+#define COMPARE_MAX_PIECES 1024
+
/* An alias for the machine mode for pointers. */
#define Pmode DImode
diff --git a/gcc/config/bpf/core-builtins.cc b/gcc/config/bpf/core-builtins.cc
index a224847..1376c93 100644
--- a/gcc/config/bpf/core-builtins.cc
+++ b/gcc/config/bpf/core-builtins.cc
@@ -1473,8 +1473,6 @@ tree
bpf_resolve_overloaded_core_builtin (location_t loc, tree fndecl,
void *arglist)
{
- remove_parser_plugin ();
-
if (!bpf_require_core_support ())
return error_mark_node;
@@ -1613,7 +1611,7 @@ core_mark_as_access_index (tree expr)
|| TREE_CODE (expr) == INDIRECT_REF)
expr = TREE_OPERAND (expr, 0);
- if (bpf_enum_mappings->get (expr) == NULL)
+ if (core_access_index_map->get (expr) == NULL)
core_access_index_map->put (expr, NULL_TREE);
}
@@ -1688,6 +1686,7 @@ make_gimple_core_safe_access_index (tree *tp,
static unsigned int
execute_lower_bpf_core (void)
{
+ remove_parser_plugin ();
if (!TARGET_BPF_CORE)
return 0;
diff --git a/gcc/config/cris/cris.cc b/gcc/config/cris/cris.cc
index 8b0f82e..7705c25 100644
--- a/gcc/config/cris/cris.cc
+++ b/gcc/config/cris/cris.cc
@@ -152,7 +152,8 @@ static void cris_function_arg_advance (cumulative_args_t,
const function_arg_info &);
static rtx_insn *cris_md_asm_adjust (vec<rtx> &, vec<rtx> &,
vec<machine_mode> &, vec<const char *> &,
- vec<rtx> &, HARD_REG_SET &, location_t);
+ vec<rtx> &, vec<rtx> &,
+ HARD_REG_SET &, location_t);
static void cris_option_override (void);
@@ -3646,7 +3647,8 @@ cris_function_arg_advance (cumulative_args_t ca_v,
static rtx_insn *
cris_md_asm_adjust (vec<rtx> &outputs, vec<rtx> &inputs,
vec<machine_mode> & /*input_modes*/,
- vec<const char *> &constraints, vec<rtx> &clobbers,
+ vec<const char *> &constraints,
+ vec<rtx> &/*uses*/, vec<rtx> &clobbers,
HARD_REG_SET &clobbered_regs, location_t /*loc*/)
{
/* For the time being, all asms clobber condition codes.
diff --git a/gcc/config/csky/csky.cc b/gcc/config/csky/csky.cc
index 731f47c..ac089fe 100644
--- a/gcc/config/csky/csky.cc
+++ b/gcc/config/csky/csky.cc
@@ -211,16 +211,15 @@ const int csky_debugger_regno[FIRST_PSEUDO_REGISTER] =
/* Table of machine attributes. */
static tree csky_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
static tree csky_handle_isr_attribute (tree *, tree, tree, int, bool *);
-static const struct attribute_spec csky_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (csky_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "naked", 0, 0, true, false, false, false, csky_handle_fndecl_attribute, NULL },
/* Interrupt Service Routines have special prologue and epilogue requirements. */
{ "interrupt", 0, 1, false, false, false, false, csky_handle_isr_attribute, NULL },
- { "isr", 0, 1, false, false, false, false, csky_handle_isr_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ { "isr", 0, 1, false, false, false, false, csky_handle_isr_attribute, NULL }
+});
/* A C structure for machine-specific, per-function data.
This is added to the cfun structure. */
diff --git a/gcc/config/epiphany/epiphany.cc b/gcc/config/epiphany/epiphany.cc
index 68e748c..e10e64d 100644
--- a/gcc/config/epiphany/epiphany.cc
+++ b/gcc/config/epiphany/epiphany.cc
@@ -458,7 +458,7 @@ epiphany_init_reg_tables (void)
They unmask them while calling an interruptible
function, though. */
-static const struct attribute_spec epiphany_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (epiphany_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -468,9 +468,8 @@ static const struct attribute_spec epiphany_attribute_table[] =
epiphany_handle_forwarder_attribute, NULL },
{ "long_call", 0, 0, false, true, true, false, NULL, NULL },
{ "short_call", 0, 0, false, true, true, false, NULL, NULL },
- { "disinterrupt", 0, 0, false, true, true, true, NULL, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ { "disinterrupt", 0, 0, false, true, true, true, NULL, NULL }
+});
/* Handle an "interrupt" attribute; arguments as in
struct attribute_spec.handler. */
diff --git a/gcc/config/frv/frv.h b/gcc/config/frv/frv.h
index 9795611..93a7c6d 100644
--- a/gcc/config/frv/frv.h
+++ b/gcc/config/frv/frv.h
@@ -1241,6 +1241,7 @@ typedef struct frv_stack {
#if ! __FRV_FDPIC__
#define TRANSFER_FROM_TRAMPOLINE \
extern int Twrite (int, const void *, unsigned); \
+extern void exit (int); \
\
void \
__trampoline_setup (short * addr, int size, int fnaddr, int sc) \
@@ -1284,6 +1285,7 @@ __asm__("\n" \
#else
#define TRANSFER_FROM_TRAMPOLINE \
extern int Twrite (int, const void *, unsigned); \
+extern void exit (int); \
\
void \
__trampoline_setup (addr, size, fnaddr, sc) \
diff --git a/gcc/config/gcn/driver-gcn.cc b/gcc/config/gcn/driver-gcn.cc
deleted file mode 100644
index 837633a..0000000
--- a/gcc/config/gcn/driver-gcn.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Subroutines for the gcc driver.
- Copyright (C) 2018-2023 Free Software Foundation, Inc.
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 3, or (at your option)
-any later version.
-
-GCC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>. */
-
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
-#include "tm.h"
-
-const char *
-last_arg_spec_function (int argc, const char **argv)
-{
- if (argc == 0)
- return NULL;
-
- return argv[argc-1];
-}
diff --git a/gcc/config/gcn/gcn-builtins.def b/gcc/config/gcn/gcn-builtins.def
index 636a8e7..471457d 100644
--- a/gcc/config/gcn/gcn-builtins.def
+++ b/gcc/config/gcn/gcn-builtins.def
@@ -164,6 +164,8 @@ DEF_BUILTIN (FIRST_CALL_THIS_THREAD_P, -1, "first_call_this_thread_p", B_INSN,
_A1 (GCN_BTI_BOOL), gcn_expand_builtin_1)
DEF_BUILTIN (KERNARG_PTR, -1, "kernarg_ptr", B_INSN, _A1 (GCN_BTI_VOIDPTR),
gcn_expand_builtin_1)
+DEF_BUILTIN (DISPATCH_PTR, -1, "dispatch_ptr", B_INSN, _A1 (GCN_BTI_VOIDPTR),
+ gcn_expand_builtin_1)
DEF_BUILTIN (GET_STACK_LIMIT, -1, "get_stack_limit", B_INSN,
_A1 (GCN_BTI_VOIDPTR), gcn_expand_builtin_1)
diff --git a/gcc/config/gcn/gcn-hsa.h b/gcc/config/gcn/gcn-hsa.h
index aa1294c..4d72299 100644
--- a/gcc/config/gcn/gcn-hsa.h
+++ b/gcc/config/gcn/gcn-hsa.h
@@ -88,7 +88,7 @@ extern unsigned int gcn_local_sym_hash (const char *name);
/* Use LLVM assembler and linker options. */
#define ASM_SPEC "-triple=amdgcn--amdhsa " \
- "%:last_arg(%{march=*:-mcpu=%*}) " \
+ "%{march=*:-mcpu=%*} " \
"%{!march=*|march=fiji:--amdhsa-code-object-version=3} " \
"%{" NO_XNACK XNACKOPT "}" \
"%{" NO_SRAM_ECC SRAMOPT "} " \
@@ -102,12 +102,6 @@ extern unsigned int gcn_local_sym_hash (const char *name);
#define ENDFILE_SPEC ""
#define STANDARD_STARTFILE_PREFIX_2 ""
-/* The LLVM assembler rejects multiple -mcpu options, so we must drop
- all but the last. */
-extern const char *last_arg_spec_function (int argc, const char **argv);
-#define EXTRA_SPEC_FUNCTIONS \
- { "last_arg", last_arg_spec_function },
-
#undef LOCAL_INCLUDE_DIR
/* FIXME: Review debug info settings.
diff --git a/gcc/config/gcn/gcn.cc b/gcc/config/gcn/gcn.cc
index 22d2b6e..031b405 100644
--- a/gcc/config/gcn/gcn.cc
+++ b/gcc/config/gcn/gcn.cc
@@ -110,7 +110,8 @@ gcn_init_machine_status (void)
f = ggc_cleared_alloc<machine_function> ();
- if (TARGET_GCN3)
+ // FIXME: re-enable global addressing with safety for LDS-flat addresses
+ //if (TARGET_GCN3)
f->use_flat_addressing = true;
return f;
@@ -358,14 +359,12 @@ gcn_handle_amdgpu_hsa_kernel_attribute (tree *node, tree name,
Create target-specific __attribute__ types. */
-static const struct attribute_spec gcn_attribute_table[] = {
+TARGET_GNU_ATTRIBUTES (gcn_attribute_table, {
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
affects_type_identity } */
{"amdgpu_hsa_kernel", 0, GCN_KERNEL_ARG_TYPES, false, true,
- true, true, gcn_handle_amdgpu_hsa_kernel_attribute, NULL},
- /* End element. */
- {NULL, 0, 0, false, false, false, false, NULL, NULL}
-};
+ true, true, gcn_handle_amdgpu_hsa_kernel_attribute, NULL}
+});
/* }}} */
/* {{{ Registers and modes. */
@@ -4881,6 +4880,19 @@ gcn_expand_builtin_1 (tree exp, rtx target, rtx /*subtarget */ ,
}
return ptr;
}
+ case GCN_BUILTIN_DISPATCH_PTR:
+ {
+ rtx ptr;
+ if (cfun->machine->args.reg[DISPATCH_PTR_ARG] >= 0)
+ ptr = gen_rtx_REG (DImode,
+ cfun->machine->args.reg[DISPATCH_PTR_ARG]);
+ else
+ {
+ ptr = gen_reg_rtx (DImode);
+ emit_move_insn (ptr, const0_rtx);
+ }
+ return ptr;
+ }
case GCN_BUILTIN_FIRST_CALL_THIS_THREAD_P:
{
/* Stash a marker in the unused upper 16 bits of s[0:1] to indicate
diff --git a/gcc/config/gcn/gcn.opt b/gcc/config/gcn/gcn.opt
index 7a852c5..e5db6df 100644
--- a/gcc/config/gcn/gcn.opt
+++ b/gcc/config/gcn/gcn.opt
@@ -44,11 +44,11 @@ EnumValue
Enum(gpu_type) String(gfx1030) Value(PROCESSOR_GFX1030)
march=
-Target RejectNegative Joined ToLower Enum(gpu_type) Var(gcn_arch) Init(PROCESSOR_FIJI)
+Target RejectNegative Negative(march=) Joined ToLower Enum(gpu_type) Var(gcn_arch) Init(PROCESSOR_FIJI)
Specify the name of the target GPU.
mtune=
-Target RejectNegative Joined ToLower Enum(gpu_type) Var(gcn_tune) Init(PROCESSOR_FIJI)
+Target RejectNegative Negative(mtune=) Joined ToLower Enum(gpu_type) Var(gcn_tune) Init(PROCESSOR_FIJI)
Specify the name of the target GPU.
m32
diff --git a/gcc/config/gcn/t-gcn-hsa b/gcc/config/gcn/t-gcn-hsa
index 18db707..e2aec71 100644
--- a/gcc/config/gcn/t-gcn-hsa
+++ b/gcc/config/gcn/t-gcn-hsa
@@ -16,10 +16,6 @@
GTM_H += $(HASH_TABLE_H)
-driver-gcn.o: $(srcdir)/config/gcn/driver-gcn.cc
- $(COMPILE) $<
- $(POSTCOMPILE)
-
CFLAGS-mkoffload.o += $(DRIVER_DEFINES) \
-DGCC_INSTALL_NAME=\"$(GCC_INSTALL_NAME)\"
mkoffload.o: $(srcdir)/config/gcn/mkoffload.cc
diff --git a/gcc/config/h8300/h8300.cc b/gcc/config/h8300/h8300.cc
index 4bbb1b7..5936cdc 100644
--- a/gcc/config/h8300/h8300.cc
+++ b/gcc/config/h8300/h8300.cc
@@ -4909,7 +4909,7 @@ h8300_insert_attributes (tree node, tree *attributes)
tiny_data: This variable lives in the tiny data area and can be
referenced with 16-bit absolute memory references. */
-static const struct attribute_spec h8300_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (h8300_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -4926,9 +4926,8 @@ static const struct attribute_spec h8300_attribute_table[] =
{ "eightbit_data", 0, 0, true, false, false, false,
h8300_handle_eightbit_data_attribute, NULL },
{ "tiny_data", 0, 0, true, false, false, false,
- h8300_handle_tiny_data_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ h8300_handle_tiny_data_attribute, NULL }
+});
/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
diff --git a/gcc/config/i386/gnu.h b/gcc/config/i386/gnu.h
index 8dc6d9e..e776144 100644
--- a/gcc/config/i386/gnu.h
+++ b/gcc/config/i386/gnu.h
@@ -27,12 +27,12 @@ along with GCC. If not, see <http://www.gnu.org/licenses/>.
#undef STARTFILE_SPEC
#if defined HAVE_LD_PIE
#define STARTFILE_SPEC \
- "%{!shared: %{pg|p|profile:%{static:gcrt0.o%s;:gcrt1.o%s};pie:Scrt1.o%s;static:crt0.o%s;:crt1.o%s}} \
- crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+ "%{!shared: %{pg|p|profile:%{static-pie:grcrt0.o%s;static:gcrt0.o%s;:gcrt1.o%s};static-pie:rcrt0.o%s;static:crt0.o%s;" PIE_SPEC ":Scrt1.o%s;:crt1.o%s}} \
+ crti.o%s %{static:crtbeginT.o%s;shared|static-pie|" PIE_SPEC ":crtbeginS.o%s;:crtbegin.o%s}"
#else
#define STARTFILE_SPEC \
"%{!shared: %{pg|p|profile:%{static:gcrt0.o%s;:gcrt1.o%s};static:crt0.o%s;:crt1.o%s}} \
- crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+ crti.o%s %{static:crtbeginT.o%s;shared:crtbeginS.o%s;:crtbegin.o%s}"
#endif
#ifdef TARGET_LIBC_PROVIDES_SSP
diff --git a/gcc/config/i386/gnu64.h b/gcc/config/i386/gnu64.h
index a411f0e..332372f 100644
--- a/gcc/config/i386/gnu64.h
+++ b/gcc/config/i386/gnu64.h
@@ -31,10 +31,10 @@ along with GCC. If not, see <http://www.gnu.org/licenses/>.
#undef STARTFILE_SPEC
#if defined HAVE_LD_PIE
#define STARTFILE_SPEC \
- "%{!shared: %{pg|p|profile:%{static:gcrt0.o%s;:gcrt1.o%s};pie:Scrt1.o%s;static:crt0.o%s;:crt1.o%s}} \
- crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+ "%{!shared: %{pg|p|profile:%{static-pie:grcrt0.o%s;static:gcrt0.o%s;:gcrt1.o%s};static-pie:rcrt0.o%s;static:crt0.o%s;" PIE_SPEC ":Scrt1.o%s;:crt1.o%s}} \
+ crti.o%s %{static:crtbeginT.o%s;shared|static-pie|" PIE_SPEC ":crtbeginS.o%s;:crtbegin.o%s}"
#else
#define STARTFILE_SPEC \
"%{!shared: %{pg|p|profile:%{static:gcrt0.o%s;:gcrt1.o%s};static:crt0.o%s;:crt1.o%s}} \
- crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+ crti.o%s %{static:crtbeginT.o%s;shared|static-pie|" PIE_SPEC ":crtbeginS.o%s;:crtbegin.o%s}"
#endif
diff --git a/gcc/config/i386/i386-features.cc b/gcc/config/i386/i386-features.cc
index 6fac67e..e6fc135 100644
--- a/gcc/config/i386/i386-features.cc
+++ b/gcc/config/i386/i386-features.cc
@@ -2627,10 +2627,11 @@ convert_scalars_to_vector (bool timode_p)
static unsigned int
rest_of_handle_insert_vzeroupper (void)
{
- /* vzeroupper instructions are inserted immediately after reload to
- account for possible spills from 256bit or 512bit registers. The pass
- reuses mode switching infrastructure by re-running mode insertion
- pass, so disable entities that have already been processed. */
+ /* vzeroupper instructions are inserted immediately after reload and
+ postreload_cse to clean up after it a little bit to account for possible
+ spills from 256bit or 512bit registers. The pass reuses mode switching
+ infrastructure by re-running mode insertion pass, so disable entities
+ that have already been processed. */
for (int i = 0; i < MAX_386_ENTITIES; i++)
ix86_optimize_mode_switching[i] = 0;
diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
index fb8638a..f86ad33 100644
--- a/gcc/config/i386/i386-options.cc
+++ b/gcc/config/i386/i386-options.cc
@@ -4086,7 +4086,7 @@ handle_nodirect_extern_access_attribute (tree *pnode, tree name,
}
/* Table of valid machine attributes. */
-const struct attribute_spec ix86_attribute_table[] =
+static const attribute_spec ix86_gnu_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -4166,10 +4166,12 @@ const struct attribute_spec ix86_attribute_table[] =
{ "cf_check", 0, 0, true, false, false, false,
ix86_handle_fndecl_attribute, NULL },
{ "nodirect_extern_access", 0, 0, true, false, false, false,
- handle_nodirect_extern_access_attribute, NULL },
+ handle_nodirect_extern_access_attribute, NULL }
+};
- /* End element. */
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+const scoped_attribute_specs ix86_gnu_attribute_table =
+{
+ "gnu", { ix86_gnu_attributes }
};
#include "gt-i386-options.h"
diff --git a/gcc/config/i386/i386-options.h b/gcc/config/i386/i386-options.h
index 6866606..6274c59 100644
--- a/gcc/config/i386/i386-options.h
+++ b/gcc/config/i386/i386-options.h
@@ -82,7 +82,7 @@ void ix86_function_specific_print (FILE *, int,
struct cl_target_option *);
bool ix86_valid_target_attribute_p (tree, tree, tree, int);
-extern const struct attribute_spec ix86_attribute_table[];
+extern const struct scoped_attribute_specs ix86_gnu_attribute_table;
#endif /* GCC_I386_OPTIONS_H */
diff --git a/gcc/config/i386/i386-passes.def b/gcc/config/i386/i386-passes.def
index 90f2234..2d18981 100644
--- a/gcc/config/i386/i386-passes.def
+++ b/gcc/config/i386/i386-passes.def
@@ -24,7 +24,7 @@ along with GCC; see the file COPYING3. If not see
REPLACE_PASS (PASS, INSTANCE, TGT_PASS)
*/
- INSERT_PASS_AFTER (pass_reload, 1, pass_insert_vzeroupper);
+ INSERT_PASS_AFTER (pass_postreload_cse, 1, pass_insert_vzeroupper);
INSERT_PASS_AFTER (pass_combine, 1, pass_stv, false /* timode_p */);
/* Run the 64-bit STV pass before the CSE pass so that CONST0_RTX and
CONSTM1_RTX generated by the STV pass can be CSEed. */
diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
index 9390f52..7c5cab4 100644
--- a/gcc/config/i386/i386.cc
+++ b/gcc/config/i386/i386.cc
@@ -8607,10 +8607,11 @@ ix86_elim_entry_set_got (rtx reg)
rtx pat = PATTERN (c_insn);
if (GET_CODE (pat) == PARALLEL)
{
- rtx vec = XVECEXP (pat, 0, 0);
- if (GET_CODE (vec) == SET
- && XINT (XEXP (vec, 1), 1) == UNSPEC_SET_GOT
- && REGNO (XEXP (vec, 0)) == REGNO (reg))
+ rtx set = XVECEXP (pat, 0, 0);
+ if (GET_CODE (set) == SET
+ && GET_CODE (SET_SRC (set)) == UNSPEC
+ && XINT (SET_SRC (set), 1) == UNSPEC_SET_GOT
+ && REGNO (SET_DEST (set)) == REGNO (reg))
delete_insn (c_insn);
}
}
@@ -23680,8 +23681,9 @@ static void map_egpr_constraints (vec<const char *> &constraints)
static rtx_insn *
ix86_md_asm_adjust (vec<rtx> &outputs, vec<rtx> & /*inputs*/,
vec<machine_mode> & /*input_modes*/,
- vec<const char *> &constraints, vec<rtx> &clobbers,
- HARD_REG_SET &clobbered_regs, location_t loc)
+ vec<const char *> &constraints, vec<rtx> &/*uses*/,
+ vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs,
+ location_t loc)
{
bool saw_asm_flag = false;
@@ -24562,15 +24564,34 @@ ix86_noce_conversion_profitable_p (rtx_insn *seq, struct noce_if_info *if_info)
/* x86-specific vector costs. */
class ix86_vector_costs : public vector_costs
{
- using vector_costs::vector_costs;
+public:
+ ix86_vector_costs (vec_info *, bool);
unsigned int add_stmt_cost (int count, vect_cost_for_stmt kind,
stmt_vec_info stmt_info, slp_tree node,
tree vectype, int misalign,
vect_cost_model_location where) override;
void finish_cost (const vector_costs *) override;
+
+private:
+
+ /* Estimate register pressure of the vectorized code. */
+ void ix86_vect_estimate_reg_pressure ();
+ /* Number of GENERAL_REGS/SSE_REGS used in the vectorizer, it's used for
+ estimation of register pressure.
+ ??? Currently it's only used by vec_construct/scalar_to_vec
+ where we know it's not loaded from memory. */
+ unsigned m_num_gpr_needed[3];
+ unsigned m_num_sse_needed[3];
};
+ix86_vector_costs::ix86_vector_costs (vec_info* vinfo, bool costing_for_scalar)
+ : vector_costs (vinfo, costing_for_scalar),
+ m_num_gpr_needed (),
+ m_num_sse_needed ()
+{
+}
+
/* Implement targetm.vectorize.create_costs. */
static vector_costs *
@@ -24748,8 +24769,7 @@ ix86_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
}
else if ((kind == vec_construct || kind == scalar_to_vec)
&& node
- && SLP_TREE_DEF_TYPE (node) == vect_external_def
- && INTEGRAL_TYPE_P (TREE_TYPE (vectype)))
+ && SLP_TREE_DEF_TYPE (node) == vect_external_def)
{
stmt_cost = ix86_builtin_vectorization_cost (kind, vectype, misalign);
unsigned i;
@@ -24785,7 +24805,15 @@ ix86_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
&& (gimple_assign_rhs_code (def) != BIT_FIELD_REF
|| !VECTOR_TYPE_P (TREE_TYPE
(TREE_OPERAND (gimple_assign_rhs1 (def), 0))))))
- stmt_cost += ix86_cost->sse_to_integer;
+ {
+ if (fp)
+ m_num_sse_needed[where]++;
+ else
+ {
+ m_num_gpr_needed[where]++;
+ stmt_cost += ix86_cost->sse_to_integer;
+ }
+ }
}
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_OPS (node), i, op)
if (TREE_CODE (op) == SSA_NAME)
@@ -24822,6 +24850,24 @@ ix86_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
}
void
+ix86_vector_costs::ix86_vect_estimate_reg_pressure ()
+{
+ unsigned gpr_spill_cost = COSTS_N_INSNS (ix86_cost->int_store [2]) / 2;
+ unsigned sse_spill_cost = COSTS_N_INSNS (ix86_cost->sse_store[0]) / 2;
+
+ /* Any better way to have target available fp registers, currently use SSE_REGS. */
+ unsigned target_avail_sse = TARGET_64BIT ? (TARGET_AVX512F ? 32 : 16) : 8;
+ for (unsigned i = 0; i != 3; i++)
+ {
+ if (m_num_gpr_needed[i] > target_avail_regs)
+ m_costs[i] += gpr_spill_cost * (m_num_gpr_needed[i] - target_avail_regs);
+ /* Only measure sse registers pressure. */
+ if (TARGET_SSE && (m_num_sse_needed[i] > target_avail_sse))
+ m_costs[i] += sse_spill_cost * (m_num_sse_needed[i] - target_avail_sse);
+ }
+}
+
+void
ix86_vector_costs::finish_cost (const vector_costs *scalar_costs)
{
loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (m_vinfo);
@@ -24843,6 +24889,8 @@ ix86_vector_costs::finish_cost (const vector_costs *scalar_costs)
m_costs[vect_body] = INT_MAX;
}
+ ix86_vect_estimate_reg_pressure ();
+
vector_costs::finish_cost (scalar_costs);
}
@@ -25968,6 +26016,11 @@ ix86_run_selftests (void)
#endif /* CHECKING_P */
+static const scoped_attribute_specs *const ix86_attribute_table[] =
+{
+ &ix86_gnu_attribute_table
+};
+
/* Initialize the GCC target structure. */
#undef TARGET_RETURN_IN_MEMORY
#define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index cb32de7..df7f917 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -2699,7 +2699,10 @@
(clobber (reg:CC FLAGS_REG))])]
{
int shift = ctz_hwi (UINTVAL (operands[1]));
- operands[1] = gen_int_mode (UINTVAL (operands[1]) >> shift, DImode);
+ rtx op1 = gen_int_mode (UINTVAL (operands[1]) >> shift, DImode);
+ if (ix86_endbr_immediate_operand (op1, VOIDmode))
+ FAIL;
+ operands[1] = op1;
operands[2] = gen_int_mode (shift, QImode);
})
@@ -23333,9 +23336,8 @@
(const_int 0))
(compare:CC (match_operand 4 "memory_operand")
(match_operand 5 "memory_operand"))
- (const_int 0)))
+ (reg:CC FLAGS_REG)))
(use (match_operand:SI 3 "immediate_operand"))
- (use (reg:CC FLAGS_REG))
(clobber (match_operand 0 "register_operand"))
(clobber (match_operand 1 "register_operand"))
(clobber (match_dup 2))])]
@@ -23351,9 +23353,8 @@
(const_int 0))
(compare:CC (mem:BLK (match_operand:P 4 "register_operand" "0"))
(mem:BLK (match_operand:P 5 "register_operand" "1")))
- (const_int 0)))
+ (reg:CC FLAGS_REG)))
(use (match_operand:SI 3 "immediate_operand" "i"))
- (use (reg:CC FLAGS_REG))
(clobber (match_operand:P 0 "register_operand" "=S"))
(clobber (match_operand:P 1 "register_operand" "=D"))
(clobber (match_operand:P 2 "register_operand" "=c"))]
@@ -23464,9 +23465,8 @@
(const_int 0))
(compare:CC (mem:BLK (match_operand 4 "register_operand"))
(mem:BLK (match_operand 5 "register_operand")))
- (const_int 0)))
+ (reg:CC FLAGS_REG)))
(use (match_operand:SI 3 "immediate_operand"))
- (use (reg:CC FLAGS_REG))
(clobber (match_operand 0 "register_operand"))
(clobber (match_operand 1 "register_operand"))
(clobber (match_operand 2 "register_operand"))])
@@ -23484,9 +23484,8 @@
(const_int 0))
(compare:CC (mem:BLK (match_dup 4))
(mem:BLK (match_dup 5)))
- (const_int 0)))
+ (reg:CC FLAGS_REG)))
(use (match_dup 3))
- (use (reg:CC FLAGS_REG))
(clobber (match_dup 0))
(clobber (match_dup 1))
(clobber (match_dup 2))])])
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 4f51169..edd6f66 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -1297,6 +1297,11 @@
(V32QI "V16HI") (V16HI "V8SI") (V8SI "V4DI")
(V32HI "V16SI") (V64QI "V32HI") (V16SI "V8DI")])
+(define_mode_attr sseunpackmodelower
+ [(V16QI "v8hi") (V8HI "v4si") (V4SI "v2di")
+ (V32QI "v16hi") (V16HI "v8si") (V8SI "v4di")
+ (V32HI "v16si") (V64QI "v32hi") (V16SI "v8di")])
+
(define_mode_attr ssepackmode
[(V8HI "V16QI") (V4SI "V8HI") (V2DI "V4SI")
(V16HI "V32QI") (V8SI "V16HI") (V4DI "V8SI")
@@ -3480,11 +3485,12 @@
""
{
rtx tmp = gen_reg_rtx (<ssehalfvecmode>mode);
- emit_insn (gen_vec_extract_hi_<mode> (tmp, operands[1]));
rtx tmp2 = gen_reg_rtx (<ssehalfvecmode>mode);
- rtx tmp3 = gen_lowpart (<ssehalfvecmode>mode, operands[1]);
- emit_insn (gen_add<ssehalfvecmodelower>3 (tmp2, tmp, tmp3));
- emit_insn (gen_reduc_plus_scal_<ssehalfvecmodelower> (operands[0], tmp2));
+ rtx tmp3 = gen_reg_rtx (<ssehalfvecmode>mode);
+ emit_insn (gen_vec_extract_hi_<mode> (tmp, operands[1]));
+ emit_insn (gen_vec_extract_lo_<mode> (tmp2, operands[1]));
+ emit_insn (gen_add<ssehalfvecmodelower>3 (tmp3, tmp, tmp2));
+ emit_insn (gen_reduc_plus_scal_<ssehalfvecmodelower> (operands[0], tmp3));
DONE;
})
@@ -3528,11 +3534,12 @@
""
{
rtx tmp = gen_reg_rtx (<ssehalfvecmode>mode);
- emit_insn (gen_vec_extract_hi_<mode> (tmp, operands[1]));
rtx tmp2 = gen_reg_rtx (<ssehalfvecmode>mode);
- emit_insn (gen_<code><ssehalfvecmodelower>3
- (tmp2, tmp, gen_lowpart (<ssehalfvecmode>mode, operands[1])));
- emit_insn (gen_reduc_<code>_scal_<ssehalfvecmodelower> (operands[0], tmp2));
+ rtx tmp3 = gen_reg_rtx (<ssehalfvecmode>mode);
+ emit_insn (gen_vec_extract_hi_<mode> (tmp, operands[1]));
+ emit_insn (gen_vec_extract_lo_<mode> (tmp2, operands[1]));
+ emit_insn (gen_<code><ssehalfvecmodelower>3 (tmp3, tmp, tmp2));
+ emit_insn (gen_reduc_<code>_scal_<ssehalfvecmodelower> (operands[0], tmp3));
DONE;
})
@@ -3543,11 +3550,12 @@
"TARGET_AVX512F"
{
rtx tmp = gen_reg_rtx (<ssehalfvecmode>mode);
- emit_insn (gen_vec_extract_hi_<mode> (tmp, operands[1]));
rtx tmp2 = gen_reg_rtx (<ssehalfvecmode>mode);
- emit_insn (gen_<code><ssehalfvecmodelower>3
- (tmp2, tmp, gen_lowpart (<ssehalfvecmode>mode, operands[1])));
- emit_insn (gen_reduc_<code>_scal_<ssehalfvecmodelower> (operands[0], tmp2));
+ rtx tmp3 = gen_reg_rtx (<ssehalfvecmode>mode);
+ emit_insn (gen_vec_extract_hi_<mode> (tmp, operands[1]));
+ emit_insn (gen_vec_extract_lo_<mode> (tmp2, operands[1]));
+ emit_insn (gen_<code><ssehalfvecmodelower>3 (tmp3, tmp, tmp2));
+ emit_insn (gen_reduc_<code>_scal_<ssehalfvecmodelower> (operands[0], tmp3));
DONE;
})
@@ -3558,14 +3566,15 @@
"TARGET_AVX2"
{
rtx tmp = gen_reg_rtx (<ssehalfvecmode>mode);
- emit_insn (gen_vec_extract_hi_<mode> (tmp, operands[1]));
rtx tmp2 = gen_reg_rtx (<ssehalfvecmode>mode);
- emit_insn (gen_<code><ssehalfvecmodelower>3
- (tmp2, tmp, gen_lowpart (<ssehalfvecmode>mode, operands[1])));
rtx tmp3 = gen_reg_rtx (<ssehalfvecmode>mode);
- ix86_expand_reduc (gen_<code><ssehalfvecmodelower>3, tmp3, tmp2);
+ emit_insn (gen_vec_extract_hi_<mode> (tmp, operands[1]));
+ emit_insn (gen_vec_extract_lo_<mode> (tmp2, operands[1]));
+ emit_insn (gen_<code><ssehalfvecmodelower>3 (tmp3, tmp, tmp2));
+ rtx tmp4 = gen_reg_rtx (<ssehalfvecmode>mode);
+ ix86_expand_reduc (gen_<code><ssehalfvecmodelower>3, tmp4, tmp3);
emit_insn (gen_vec_extract<ssehalfvecmodelower><ssescalarmodelower>
- (operands[0], tmp3, const0_rtx));
+ (operands[0], tmp4, const0_rtx));
DONE;
})
@@ -3637,11 +3646,12 @@
""
{
rtx tmp = gen_reg_rtx (<ssehalfvecmode>mode);
- emit_insn (gen_vec_extract_hi_<mode> (tmp, operands[1]));
rtx tmp2 = gen_reg_rtx (<ssehalfvecmode>mode);
- rtx tmp3 = gen_lowpart (<ssehalfvecmode>mode, operands[1]);
- emit_insn (gen_<code><ssehalfvecmodelower>3 (tmp2, tmp, tmp3));
- emit_insn (gen_reduc_<code>_scal_<ssehalfvecmodelower> (operands[0], tmp2));
+ rtx tmp3 = gen_reg_rtx (<ssehalfvecmode>mode);
+ emit_insn (gen_vec_extract_hi_<mode> (tmp, operands[1]));
+ emit_insn (gen_vec_extract_lo_<mode> (tmp2, operands[1]));
+ emit_insn (gen_<code><ssehalfvecmodelower>3 (tmp3, tmp, tmp2));
+ emit_insn (gen_reduc_<code>_scal_<ssehalfvecmodelower> (operands[0], tmp3));
DONE;
})
@@ -5106,7 +5116,10 @@
(match_operand:VF1_AVX2 1 "register_operand") 0)
(match_dup 2)))]
"TARGET_SSE2"
- "operands[2] = GEN_INT (GET_MODE_UNIT_BITSIZE (<MODE>mode)-1);")
+{
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ operands[2] = GEN_INT (GET_MODE_UNIT_BITSIZE (<MODE>mode)-1);
+})
;; Also define scalar versions. These are used for abs, neg, and
;; conditional move. Using subregs into vector modes causes register
@@ -16601,6 +16614,18 @@
DONE;
})
+(define_split
+ [(set (match_operand:VI248_AVX2 0 "register_operand")
+ (eq:VI248_AVX2
+ (eq:VI248_AVX2
+ (lshiftrt:VI248_AVX2
+ (match_operand:VI248_AVX2 1 "register_operand")
+ (match_operand:SI 2 "const_int_operand"))
+ (match_operand:VI248_AVX2 3 "const0_operand"))
+ (match_operand:VI248_AVX2 4 "const0_operand")))]
+ "INTVAL (operands[2]) == GET_MODE_PRECISION (<ssescalarmode>mode) - 1"
+ [(set (match_dup 0) (ashiftrt:VI248_AVX2 (match_dup 1) (match_dup 2)))])
+
(define_expand "rotlv1ti3"
[(set (match_operand:V1TI 0 "register_operand")
(rotate:V1TI
@@ -30748,39 +30773,155 @@
(define_expand "sdot_prod<mode>"
[(match_operand:<ssedvecmode> 0 "register_operand")
- (match_operand:VI1 1 "register_operand")
- (match_operand:VI1 2 "register_operand")
+ (match_operand:VI1_AVX2 1 "register_operand")
+ (match_operand:VI1_AVX2 2 "register_operand")
(match_operand:<ssedvecmode> 3 "register_operand")]
- "TARGET_AVXVNNIINT8"
+ "TARGET_SSE2"
{
- operands[1] = lowpart_subreg (<ssedvecmode>mode,
- force_reg (<MODE>mode, operands[1]),
- <MODE>mode);
- operands[2] = lowpart_subreg (<ssedvecmode>mode,
- force_reg (<MODE>mode, operands[2]),
- <MODE>mode);
- emit_insn (gen_rtx_SET (operands[0], operands[3]));
- emit_insn (gen_vpdpbssd_<ssedvecmodelower> (operands[0], operands[3],
- operands[1], operands[2]));
+ if (TARGET_AVXVNNIINT8)
+ {
+ operands[1] = lowpart_subreg (<ssedvecmode>mode,
+ force_reg (<MODE>mode, operands[1]),
+ <MODE>mode);
+ operands[2] = lowpart_subreg (<ssedvecmode>mode,
+ force_reg (<MODE>mode, operands[2]),
+ <MODE>mode);
+ emit_insn (gen_rtx_SET (operands[0], operands[3]));
+ emit_insn (gen_vpdpbssd_<ssedvecmodelower> (operands[0], operands[3],
+ operands[1], operands[2]));
+ }
+ else
+ {
+ /* Emulate with vpdpwssd. */
+ rtx op1_lo = gen_reg_rtx (<sseunpackmode>mode);
+ rtx op1_hi = gen_reg_rtx (<sseunpackmode>mode);
+ rtx op2_lo = gen_reg_rtx (<sseunpackmode>mode);
+ rtx op2_hi = gen_reg_rtx (<sseunpackmode>mode);
+
+ emit_insn (gen_vec_unpacks_lo_<mode> (op1_lo, operands[1]));
+ emit_insn (gen_vec_unpacks_lo_<mode> (op2_lo, operands[2]));
+ emit_insn (gen_vec_unpacks_hi_<mode> (op1_hi, operands[1]));
+ emit_insn (gen_vec_unpacks_hi_<mode> (op2_hi, operands[2]));
+
+ rtx res1 = gen_reg_rtx (<ssedvecmode>mode);
+ rtx res2 = gen_reg_rtx (<ssedvecmode>mode);
+ rtx sum = gen_reg_rtx (<ssedvecmode>mode);
+
+ emit_move_insn (sum, CONST0_RTX (<ssedvecmode>mode));
+ emit_insn (gen_sdot_prod<sseunpackmodelower> (res1, op1_lo,
+ op2_lo, sum));
+ emit_insn (gen_sdot_prod<sseunpackmodelower> (res2, op1_hi,
+ op2_hi, operands[3]));
+ emit_insn (gen_add<ssedvecmodelower>3 (operands[0], res1, res2));
+ }
+
+ DONE;
+})
+
+(define_expand "sdot_prodv64qi"
+ [(match_operand:V16SI 0 "register_operand")
+ (match_operand:V64QI 1 "register_operand")
+ (match_operand:V64QI 2 "register_operand")
+ (match_operand:V16SI 3 "register_operand")]
+ "(TARGET_AVX512VNNI || TARGET_AVX512BW) && TARGET_EVEX512"
+{
+ /* Emulate with vpdpwssd. */
+ rtx op1_lo = gen_reg_rtx (V32HImode);
+ rtx op1_hi = gen_reg_rtx (V32HImode);
+ rtx op2_lo = gen_reg_rtx (V32HImode);
+ rtx op2_hi = gen_reg_rtx (V32HImode);
+
+ emit_insn (gen_vec_unpacks_lo_v64qi (op1_lo, operands[1]));
+ emit_insn (gen_vec_unpacks_lo_v64qi (op2_lo, operands[2]));
+ emit_insn (gen_vec_unpacks_hi_v64qi (op1_hi, operands[1]));
+ emit_insn (gen_vec_unpacks_hi_v64qi (op2_hi, operands[2]));
+
+ rtx res1 = gen_reg_rtx (V16SImode);
+ rtx res2 = gen_reg_rtx (V16SImode);
+ rtx sum = gen_reg_rtx (V16SImode);
+
+ emit_move_insn (sum, CONST0_RTX (V16SImode));
+ emit_insn (gen_sdot_prodv32hi (res1, op1_lo, op2_lo, sum));
+ emit_insn (gen_sdot_prodv32hi (res2, op1_hi, op2_hi, operands[3]));
+
+ emit_insn (gen_addv16si3 (operands[0], res1, res2));
DONE;
})
(define_expand "udot_prod<mode>"
[(match_operand:<ssedvecmode> 0 "register_operand")
- (match_operand:VI1 1 "register_operand")
- (match_operand:VI1 2 "register_operand")
+ (match_operand:VI1_AVX2 1 "register_operand")
+ (match_operand:VI1_AVX2 2 "register_operand")
(match_operand:<ssedvecmode> 3 "register_operand")]
- "TARGET_AVXVNNIINT8"
+ "TARGET_SSE2"
{
- operands[1] = lowpart_subreg (<ssedvecmode>mode,
- force_reg (<MODE>mode, operands[1]),
- <MODE>mode);
- operands[2] = lowpart_subreg (<ssedvecmode>mode,
- force_reg (<MODE>mode, operands[2]),
- <MODE>mode);
- emit_insn (gen_rtx_SET (operands[0], operands[3]));
- emit_insn (gen_vpdpbuud_<ssedvecmodelower> (operands[0], operands[3],
- operands[1], operands[2]));
+ if (TARGET_AVXVNNIINT8)
+ {
+ operands[1] = lowpart_subreg (<ssedvecmode>mode,
+ force_reg (<MODE>mode, operands[1]),
+ <MODE>mode);
+ operands[2] = lowpart_subreg (<ssedvecmode>mode,
+ force_reg (<MODE>mode, operands[2]),
+ <MODE>mode);
+ emit_insn (gen_rtx_SET (operands[0], operands[3]));
+ emit_insn (gen_vpdpbuud_<ssedvecmodelower> (operands[0], operands[3],
+ operands[1], operands[2]));
+ }
+ else
+ {
+ /* Emulate with vpdpwssd. */
+ rtx op1_lo = gen_reg_rtx (<sseunpackmode>mode);
+ rtx op1_hi = gen_reg_rtx (<sseunpackmode>mode);
+ rtx op2_lo = gen_reg_rtx (<sseunpackmode>mode);
+ rtx op2_hi = gen_reg_rtx (<sseunpackmode>mode);
+
+ emit_insn (gen_vec_unpacku_lo_<mode> (op1_lo, operands[1]));
+ emit_insn (gen_vec_unpacku_lo_<mode> (op2_lo, operands[2]));
+ emit_insn (gen_vec_unpacku_hi_<mode> (op1_hi, operands[1]));
+ emit_insn (gen_vec_unpacku_hi_<mode> (op2_hi, operands[2]));
+
+ rtx res1 = gen_reg_rtx (<ssedvecmode>mode);
+ rtx res2 = gen_reg_rtx (<ssedvecmode>mode);
+ rtx sum = gen_reg_rtx (<ssedvecmode>mode);
+
+ emit_move_insn (sum, CONST0_RTX (<ssedvecmode>mode));
+ emit_insn (gen_sdot_prod<sseunpackmodelower> (res1, op1_lo,
+ op2_lo, sum));
+ emit_insn (gen_sdot_prod<sseunpackmodelower> (res2, op1_hi,
+ op2_hi, operands[3]));
+ emit_insn (gen_add<ssedvecmodelower>3 (operands[0], res1, res2));
+ }
+
+ DONE;
+})
+
+(define_expand "udot_prodv64qi"
+ [(match_operand:V16SI 0 "register_operand")
+ (match_operand:V64QI 1 "register_operand")
+ (match_operand:V64QI 2 "register_operand")
+ (match_operand:V16SI 3 "register_operand")]
+ "(TARGET_AVX512VNNI || TARGET_AVX512BW) && TARGET_EVEX512"
+{
+ /* Emulate with vpdpwssd. */
+ rtx op1_lo = gen_reg_rtx (V32HImode);
+ rtx op1_hi = gen_reg_rtx (V32HImode);
+ rtx op2_lo = gen_reg_rtx (V32HImode);
+ rtx op2_hi = gen_reg_rtx (V32HImode);
+
+ emit_insn (gen_vec_unpacku_lo_v64qi (op1_lo, operands[1]));
+ emit_insn (gen_vec_unpacku_lo_v64qi (op2_lo, operands[2]));
+ emit_insn (gen_vec_unpacku_hi_v64qi (op1_hi, operands[1]));
+ emit_insn (gen_vec_unpacku_hi_v64qi (op2_hi, operands[2]));
+
+ rtx res1 = gen_reg_rtx (V16SImode);
+ rtx res2 = gen_reg_rtx (V16SImode);
+ rtx sum = gen_reg_rtx (V16SImode);
+
+ emit_move_insn (sum, CONST0_RTX (V16SImode));
+ emit_insn (gen_sdot_prodv32hi (res1, op1_lo, op2_lo, sum));
+ emit_insn (gen_sdot_prodv32hi (res2, op1_hi, op2_hi, operands[3]));
+
+ emit_insn (gen_addv16si3 (operands[0], res1, res2));
DONE;
})
diff --git a/gcc/config/i386/t-gnu64 b/gcc/config/i386/t-gnu64
new file mode 100644
index 0000000..23ee682
--- /dev/null
+++ b/gcc/config/i386/t-gnu64
@@ -0,0 +1,38 @@
+# Copyright (C) 2002-2023 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# On Debian, Ubuntu and other derivative distributions, the 32bit libraries
+# are found in /lib32 and /usr/lib32, /lib64 and /usr/lib64 are symlinks to
+# /lib and /usr/lib, while other distributions install libraries into /lib64
+# and /usr/lib64. The LSB does not enforce the use of /lib64 and /usr/lib64,
+# it doesn't tell anything about the 32bit libraries on those systems. Set
+# MULTILIB_OSDIRNAMES according to what is found on the target.
+
+# To support i386, x86-64 and x32 libraries, the directory structrue
+# should be:
+#
+# /lib has i386 libraries.
+# /lib64 has x86-64 libraries.
+# /libx32 has x32 libraries.
+#
+comma=,
+MULTILIB_OPTIONS = $(subst $(comma),/,$(TM_MULTILIB_CONFIG))
+MULTILIB_DIRNAMES = $(patsubst m%, %, $(subst /, ,$(MULTILIB_OPTIONS)))
+MULTILIB_OSDIRNAMES = m64=../lib64$(call if_multiarch,:x86_64-gnu)
+MULTILIB_OSDIRNAMES+= m32=$(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib)$(call if_multiarch,:i386-gnu)
+MULTILIB_OSDIRNAMES+= mx32=../libx32$(call if_multiarch,:x86_64-gnux32)
diff --git a/gcc/config/ia64/ia64.cc b/gcc/config/ia64/ia64.cc
index c241e1a..ac566ef 100644
--- a/gcc/config/ia64/ia64.cc
+++ b/gcc/config/ia64/ia64.cc
@@ -358,7 +358,7 @@ static bool ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d);
/* Table of valid machine attributes. */
-static const struct attribute_spec ia64_attribute_table[] =
+static const attribute_spec ia64_gnu_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -370,8 +370,17 @@ static const struct attribute_spec ia64_attribute_table[] =
ia64_vms_common_object_attribute, NULL },
#endif
{ "version_id", 1, 1, true, false, false, false,
- ia64_handle_version_id_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+ ia64_handle_version_id_attribute, NULL }
+};
+
+static const scoped_attribute_specs ia64_gnu_attribute_table =
+{
+ "gnu", { ia64_gnu_attributes }
+};
+
+static const scoped_attribute_specs *const ia64_attribute_table[] =
+{
+ &ia64_gnu_attribute_table
};
/* Initialize the GCC target structure. */
diff --git a/gcc/config/linux.h b/gcc/config/linux.h
index 79b6537..73f39d3 100644
--- a/gcc/config/linux.h
+++ b/gcc/config/linux.h
@@ -215,7 +215,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# undef TARGET_LIBM_FUNCTION_MAX_ERROR
# define TARGET_LIBM_FUNCTION_MAX_ERROR linux_libm_function_max_error
-#endif
-
#undef TARGET_FORTIFY_SOURCE_DEFAULT_LEVEL
#define TARGET_FORTIFY_SOURCE_DEFAULT_LEVEL linux_fortify_source_default_level
+
+#endif
diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
index 2e11f06..116b30c 100644
--- a/gcc/config/loongarch/lasx.md
+++ b/gcc/config/loongarch/lasx.md
@@ -53,7 +53,6 @@
UNSPEC_LASX_XVFCMP_SULT
UNSPEC_LASX_XVFCMP_SUN
UNSPEC_LASX_XVFCMP_SUNE
- UNSPEC_LASX_XVFTINT_S
UNSPEC_LASX_XVFTINT_U
UNSPEC_LASX_XVCLO
UNSPEC_LASX_XVSAT_S
@@ -69,8 +68,6 @@
UNSPEC_LASX_BRANCH
UNSPEC_LASX_BRANCH_V
- UNSPEC_LASX_XVMUH_S
- UNSPEC_LASX_XVMUH_U
UNSPEC_LASX_MXVEXTW_U
UNSPEC_LASX_XVSLLWIL_S
UNSPEC_LASX_XVSLLWIL_U
@@ -92,12 +89,6 @@
UNSPEC_LASX_XVEXTRINS
UNSPEC_LASX_XVMSKLTZ
UNSPEC_LASX_XVSIGNCOV
- UNSPEC_LASX_XVFTINTRNE_W_S
- UNSPEC_LASX_XVFTINTRNE_L_D
- UNSPEC_LASX_XVFTINTRP_W_S
- UNSPEC_LASX_XVFTINTRP_L_D
- UNSPEC_LASX_XVFTINTRM_W_S
- UNSPEC_LASX_XVFTINTRM_L_D
UNSPEC_LASX_XVFTINT_W_D
UNSPEC_LASX_XVFFINT_S_L
UNSPEC_LASX_XVFTINTRZ_W_D
@@ -116,14 +107,6 @@
UNSPEC_LASX_XVFTINTRML_L_S
UNSPEC_LASX_XVFTINTRNEL_L_S
UNSPEC_LASX_XVFTINTRNEH_L_S
- UNSPEC_LASX_XVFRINTRNE_S
- UNSPEC_LASX_XVFRINTRNE_D
- UNSPEC_LASX_XVFRINTRZ_S
- UNSPEC_LASX_XVFRINTRZ_D
- UNSPEC_LASX_XVFRINTRP_S
- UNSPEC_LASX_XVFRINTRP_D
- UNSPEC_LASX_XVFRINTRM_S
- UNSPEC_LASX_XVFRINTRM_D
UNSPEC_LASX_XVREPLVE0_Q
UNSPEC_LASX_XVPERM_W
UNSPEC_LASX_XVPERMI_Q
@@ -155,7 +138,6 @@
UNSPEC_LASX_XVHSUBW_Q_D
UNSPEC_LASX_XVHADDW_QU_DU
UNSPEC_LASX_XVHSUBW_QU_DU
- UNSPEC_LASX_XVROTR
UNSPEC_LASX_XVADD_Q
UNSPEC_LASX_XVSUB_Q
UNSPEC_LASX_XVREPLVE
@@ -206,9 +188,6 @@
;; Only used for copy256_{u,s}.w.
(define_mode_iterator LASX_W [V8SI V8SF])
-;; Only integer modes in LASX.
-(define_mode_iterator ILASX [V4DI V8SI V16HI V32QI])
-
;; As ILASX but excludes V32QI.
(define_mode_iterator ILASX_DWH [V4DI V8SI V16HI])
@@ -224,9 +203,6 @@
;; Only integer modes smaller than a word.
(define_mode_iterator ILASX_HB [V16HI V32QI])
-;; Only floating-point modes in LASX.
-(define_mode_iterator FLASX [V4DF V8SF])
-
;; Only used for immediate set shuffle elements instruction.
(define_mode_iterator LASX_WHB_W [V8SI V16HI V32QI V8SF])
@@ -500,37 +476,6 @@
(V16HI "w")
(V32QI "w")])
-(define_int_iterator FRINT256_S [UNSPEC_LASX_XVFRINTRP_S
- UNSPEC_LASX_XVFRINTRZ_S
- UNSPEC_LASX_XVFRINT
- UNSPEC_LASX_XVFRINTRM_S])
-
-(define_int_iterator FRINT256_D [UNSPEC_LASX_XVFRINTRP_D
- UNSPEC_LASX_XVFRINTRZ_D
- UNSPEC_LASX_XVFRINT
- UNSPEC_LASX_XVFRINTRM_D])
-
-(define_int_attr frint256_pattern_s
- [(UNSPEC_LASX_XVFRINTRP_S "ceil")
- (UNSPEC_LASX_XVFRINTRZ_S "btrunc")
- (UNSPEC_LASX_XVFRINT "rint")
- (UNSPEC_LASX_XVFRINTRM_S "floor")])
-
-(define_int_attr frint256_pattern_d
- [(UNSPEC_LASX_XVFRINTRP_D "ceil")
- (UNSPEC_LASX_XVFRINTRZ_D "btrunc")
- (UNSPEC_LASX_XVFRINT "rint")
- (UNSPEC_LASX_XVFRINTRM_D "floor")])
-
-(define_int_attr frint256_suffix
- [(UNSPEC_LASX_XVFRINTRP_S "rp")
- (UNSPEC_LASX_XVFRINTRP_D "rp")
- (UNSPEC_LASX_XVFRINTRZ_S "rz")
- (UNSPEC_LASX_XVFRINTRZ_D "rz")
- (UNSPEC_LASX_XVFRINT "")
- (UNSPEC_LASX_XVFRINTRM_S "rm")
- (UNSPEC_LASX_XVFRINTRM_D "rm")])
-
(define_expand "vec_init<mode><unitmode>"
[(match_operand:LASX 0 "register_operand")
(match_operand:LASX 1 "")]
@@ -1688,15 +1633,6 @@
[(set_attr "type" "simd_fdiv")
(set_attr "mode" "<MODE>")])
-(define_insn "lasx_xvfrint_<flasxfmt>"
- [(set (match_operand:FLASX 0 "register_operand" "=f")
- (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
- UNSPEC_LASX_XVFRINT))]
- "ISA_HAS_LASX"
- "xvfrint.<flasxfmt>\t%u0,%u1"
- [(set_attr "type" "simd_fcvt")
- (set_attr "mode" "<MODE>")])
-
(define_insn "lasx_xvfrsqrt_<flasxfmt>"
[(set (match_operand:FLASX 0 "register_operand" "=f")
(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
@@ -1706,16 +1642,6 @@
[(set_attr "type" "simd_fdiv")
(set_attr "mode" "<MODE>")])
-(define_insn "lasx_xvftint_s_<ilasxfmt>_<flasxfmt>"
- [(set (match_operand:<VIMODE256> 0 "register_operand" "=f")
- (unspec:<VIMODE256> [(match_operand:FLASX 1 "register_operand" "f")]
- UNSPEC_LASX_XVFTINT_S))]
- "ISA_HAS_LASX"
- "xvftint.<ilasxfmt>.<flasxfmt>\t%u0,%u1"
- [(set_attr "type" "simd_fcvt")
- (set_attr "cnv_mode" "<FINTCNV256_2>")
- (set_attr "mode" "<MODE>")])
-
(define_insn "lasx_xvftint_u_<ilasxfmt_u>_<flasxfmt>"
[(set (match_operand:<VIMODE256> 0 "register_operand" "=f")
(unspec:<VIMODE256> [(match_operand:FLASX 1 "register_operand" "f")]
@@ -1726,18 +1652,6 @@
(set_attr "cnv_mode" "<FINTCNV256_2>")
(set_attr "mode" "<MODE>")])
-
-
-(define_insn "fix_trunc<FLASX:mode><mode256_i>2"
- [(set (match_operand:<VIMODE256> 0 "register_operand" "=f")
- (fix:<VIMODE256> (match_operand:FLASX 1 "register_operand" "f")))]
- "ISA_HAS_LASX"
- "xvftintrz.<ilasxfmt>.<flasxfmt>\t%u0,%u1"
- [(set_attr "type" "simd_fcvt")
- (set_attr "cnv_mode" "<FINTCNV256_2>")
- (set_attr "mode" "<MODE>")])
-
-
(define_insn "fixuns_trunc<FLASX:mode><mode256_i>2"
[(set (match_operand:<VIMODE256> 0 "register_operand" "=f")
(unsigned_fix:<VIMODE256> (match_operand:FLASX 1 "register_operand" "f")))]
@@ -2906,26 +2820,6 @@
[(set_attr "type" "simd_logic")
(set_attr "mode" "<MODE>")])
-(define_insn "lasx_xvmuh_s_<lasxfmt>"
- [(set (match_operand:ILASX 0 "register_operand" "=f")
- (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
- (match_operand:ILASX 2 "register_operand" "f")]
- UNSPEC_LASX_XVMUH_S))]
- "ISA_HAS_LASX"
- "xvmuh.<lasxfmt>\t%u0,%u1,%u2"
- [(set_attr "type" "simd_int_arith")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "lasx_xvmuh_u_<lasxfmt_u>"
- [(set (match_operand:ILASX 0 "register_operand" "=f")
- (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
- (match_operand:ILASX 2 "register_operand" "f")]
- UNSPEC_LASX_XVMUH_U))]
- "ISA_HAS_LASX"
- "xvmuh.<lasxfmt_u>\t%u0,%u1,%u2"
- [(set_attr "type" "simd_int_arith")
- (set_attr "mode" "<MODE>")])
-
(define_insn "lasx_xvsllwil_s_<dlasxfmt>_<lasxfmt>"
[(set (match_operand:<VDMODE256> 0 "register_operand" "=f")
(unspec:<VDMODE256> [(match_operand:ILASX_WHB 1 "register_operand" "f")
@@ -3245,60 +3139,6 @@
[(set_attr "type" "simd_fmadd")
(set_attr "mode" "<MODE>")])
-(define_insn "lasx_xvftintrne_w_s"
- [(set (match_operand:V8SI 0 "register_operand" "=f")
- (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFTINTRNE_W_S))]
- "ISA_HAS_LASX"
- "xvftintrne.w.s\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V8SF")])
-
-(define_insn "lasx_xvftintrne_l_d"
- [(set (match_operand:V4DI 0 "register_operand" "=f")
- (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFTINTRNE_L_D))]
- "ISA_HAS_LASX"
- "xvftintrne.l.d\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4DF")])
-
-(define_insn "lasx_xvftintrp_w_s"
- [(set (match_operand:V8SI 0 "register_operand" "=f")
- (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFTINTRP_W_S))]
- "ISA_HAS_LASX"
- "xvftintrp.w.s\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V8SF")])
-
-(define_insn "lasx_xvftintrp_l_d"
- [(set (match_operand:V4DI 0 "register_operand" "=f")
- (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFTINTRP_L_D))]
- "ISA_HAS_LASX"
- "xvftintrp.l.d\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4DF")])
-
-(define_insn "lasx_xvftintrm_w_s"
- [(set (match_operand:V8SI 0 "register_operand" "=f")
- (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFTINTRM_W_S))]
- "ISA_HAS_LASX"
- "xvftintrm.w.s\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V8SF")])
-
-(define_insn "lasx_xvftintrm_l_d"
- [(set (match_operand:V4DI 0 "register_operand" "=f")
- (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFTINTRM_L_D))]
- "ISA_HAS_LASX"
- "xvftintrm.l.d\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4DF")])
-
(define_insn "lasx_xvftint_w_d"
[(set (match_operand:V8SI 0 "register_operand" "=f")
(unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f")
@@ -3467,108 +3307,6 @@
[(set_attr "type" "simd_shift")
(set_attr "mode" "V8SF")])
-(define_insn "lasx_xvfrintrne_s"
- [(set (match_operand:V8SF 0 "register_operand" "=f")
- (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFRINTRNE_S))]
- "ISA_HAS_LASX"
- "xvfrintrne.s\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V8SF")])
-
-(define_insn "lasx_xvfrintrne_d"
- [(set (match_operand:V4DF 0 "register_operand" "=f")
- (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFRINTRNE_D))]
- "ISA_HAS_LASX"
- "xvfrintrne.d\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4DF")])
-
-(define_insn "lasx_xvfrintrz_s"
- [(set (match_operand:V8SF 0 "register_operand" "=f")
- (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFRINTRZ_S))]
- "ISA_HAS_LASX"
- "xvfrintrz.s\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V8SF")])
-
-(define_insn "lasx_xvfrintrz_d"
- [(set (match_operand:V4DF 0 "register_operand" "=f")
- (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFRINTRZ_D))]
- "ISA_HAS_LASX"
- "xvfrintrz.d\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4DF")])
-
-(define_insn "lasx_xvfrintrp_s"
- [(set (match_operand:V8SF 0 "register_operand" "=f")
- (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFRINTRP_S))]
- "ISA_HAS_LASX"
- "xvfrintrp.s\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V8SF")])
-
-(define_insn "lasx_xvfrintrp_d"
- [(set (match_operand:V4DF 0 "register_operand" "=f")
- (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFRINTRP_D))]
- "ISA_HAS_LASX"
- "xvfrintrp.d\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4DF")])
-
-(define_insn "lasx_xvfrintrm_s"
- [(set (match_operand:V8SF 0 "register_operand" "=f")
- (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFRINTRM_S))]
- "ISA_HAS_LASX"
- "xvfrintrm.s\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V8SF")])
-
-(define_insn "lasx_xvfrintrm_d"
- [(set (match_operand:V4DF 0 "register_operand" "=f")
- (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
- UNSPEC_LASX_XVFRINTRM_D))]
- "ISA_HAS_LASX"
- "xvfrintrm.d\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4DF")])
-
-;; Vector versions of the floating-point frint patterns.
-;; Expands to btrunc, ceil, floor, rint.
-(define_insn "<FRINT256_S:frint256_pattern_s>v8sf2"
- [(set (match_operand:V8SF 0 "register_operand" "=f")
- (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
- FRINT256_S))]
- "ISA_HAS_LASX"
- "xvfrint<FRINT256_S:frint256_suffix>.s\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V8SF")])
-
-(define_insn "<FRINT256_D:frint256_pattern_d>v4df2"
- [(set (match_operand:V4DF 0 "register_operand" "=f")
- (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
- FRINT256_D))]
- "ISA_HAS_LASX"
- "xvfrint<FRINT256_D:frint256_suffix>.d\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4DF")])
-
-;; Expands to round.
-(define_insn "round<mode>2"
- [(set (match_operand:FLASX 0 "register_operand" "=f")
- (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
- UNSPEC_LASX_XVFRINT))]
- "ISA_HAS_LASX"
- "xvfrint.<flasxfmt>\t%u0,%u1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "<MODE>")])
-
;; Offset load and broadcast
(define_expand "lasx_xvldrepl_<lasxfmt_f>"
[(match_operand:LASX 0 "register_operand")
@@ -4493,18 +4231,6 @@
[(set_attr "type" "simd_int_arith")
(set_attr "mode" "V4DI")])
-;;XVROTR.B XVROTR.H XVROTR.W XVROTR.D
-;;TODO-478
-(define_insn "lasx_xvrotr_<lasxfmt>"
- [(set (match_operand:ILASX 0 "register_operand" "=f")
- (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
- (match_operand:ILASX 2 "register_operand" "f")]
- UNSPEC_LASX_XVROTR))]
- "ISA_HAS_LASX"
- "xvrotr.<lasxfmt>\t%u0,%u1,%u2"
- [(set_attr "type" "simd_int_arith")
- (set_attr "mode" "<MODE>")])
-
;;XVADD.Q
;;TODO2
(define_insn "lasx_xvadd_q"
@@ -4687,15 +4413,6 @@
[(set_attr "type" "simd_fcvt")
(set_attr "mode" "V4DI")])
-(define_insn "lasx_xvrotri_<lasxfmt>"
- [(set (match_operand:ILASX 0 "register_operand" "=f")
- (rotatert:ILASX (match_operand:ILASX 1 "register_operand" "f")
- (match_operand 2 "const_<bitimm256>_operand" "")))]
- "ISA_HAS_LASX"
- "xvrotri.<lasxfmt>\t%u0,%u1,%2"
- [(set_attr "type" "simd_shf")
- (set_attr "mode" "<MODE>")])
-
(define_insn "lasx_xvextl_q_d"
[(set (match_operand:V4DI 0 "register_operand" "=f")
(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")]
diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
index db02aac..5d037ab 100644
--- a/gcc/config/loongarch/loongarch-builtins.cc
+++ b/gcc/config/loongarch/loongarch-builtins.cc
@@ -319,6 +319,14 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
#define CODE_FOR_lsx_vmod_hu CODE_FOR_umodv8hi3
#define CODE_FOR_lsx_vmod_wu CODE_FOR_umodv4si3
#define CODE_FOR_lsx_vmod_du CODE_FOR_umodv2di3
+#define CODE_FOR_lsx_vmuh_b CODE_FOR_smulv16qi3_highpart
+#define CODE_FOR_lsx_vmuh_h CODE_FOR_smulv8hi3_highpart
+#define CODE_FOR_lsx_vmuh_w CODE_FOR_smulv4si3_highpart
+#define CODE_FOR_lsx_vmuh_d CODE_FOR_smulv2di3_highpart
+#define CODE_FOR_lsx_vmuh_bu CODE_FOR_umulv16qi3_highpart
+#define CODE_FOR_lsx_vmuh_hu CODE_FOR_umulv8hi3_highpart
+#define CODE_FOR_lsx_vmuh_wu CODE_FOR_umulv4si3_highpart
+#define CODE_FOR_lsx_vmuh_du CODE_FOR_umulv2di3_highpart
#define CODE_FOR_lsx_vmul_b CODE_FOR_mulv16qi3
#define CODE_FOR_lsx_vmul_h CODE_FOR_mulv8hi3
#define CODE_FOR_lsx_vmul_w CODE_FOR_mulv4si3
@@ -361,6 +369,14 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
#define CODE_FOR_lsx_vsrli_h CODE_FOR_vlshrv8hi3
#define CODE_FOR_lsx_vsrli_w CODE_FOR_vlshrv4si3
#define CODE_FOR_lsx_vsrli_d CODE_FOR_vlshrv2di3
+#define CODE_FOR_lsx_vrotr_b CODE_FOR_vrotrv16qi3
+#define CODE_FOR_lsx_vrotr_h CODE_FOR_vrotrv8hi3
+#define CODE_FOR_lsx_vrotr_w CODE_FOR_vrotrv4si3
+#define CODE_FOR_lsx_vrotr_d CODE_FOR_vrotrv2di3
+#define CODE_FOR_lsx_vrotri_b CODE_FOR_rotrv16qi3
+#define CODE_FOR_lsx_vrotri_h CODE_FOR_rotrv8hi3
+#define CODE_FOR_lsx_vrotri_w CODE_FOR_rotrv4si3
+#define CODE_FOR_lsx_vrotri_d CODE_FOR_rotrv2di3
#define CODE_FOR_lsx_vsub_b CODE_FOR_subv16qi3
#define CODE_FOR_lsx_vsub_h CODE_FOR_subv8hi3
#define CODE_FOR_lsx_vsub_w CODE_FOR_subv4si3
@@ -419,8 +435,6 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
#define CODE_FOR_lsx_vabsd_hu CODE_FOR_lsx_vabsd_u_hu
#define CODE_FOR_lsx_vabsd_wu CODE_FOR_lsx_vabsd_u_wu
#define CODE_FOR_lsx_vabsd_du CODE_FOR_lsx_vabsd_u_du
-#define CODE_FOR_lsx_vftint_w_s CODE_FOR_lsx_vftint_s_w_s
-#define CODE_FOR_lsx_vftint_l_d CODE_FOR_lsx_vftint_s_l_d
#define CODE_FOR_lsx_vftint_wu_s CODE_FOR_lsx_vftint_u_wu_s
#define CODE_FOR_lsx_vftint_lu_d CODE_FOR_lsx_vftint_u_lu_d
#define CODE_FOR_lsx_vandn_v CODE_FOR_vandnv16qi3
@@ -441,14 +455,6 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
#define CODE_FOR_lsx_vfnmsub_s CODE_FOR_vfnmsubv4sf4_nmsub4
#define CODE_FOR_lsx_vfnmsub_d CODE_FOR_vfnmsubv2df4_nmsub4
-#define CODE_FOR_lsx_vmuh_b CODE_FOR_lsx_vmuh_s_b
-#define CODE_FOR_lsx_vmuh_h CODE_FOR_lsx_vmuh_s_h
-#define CODE_FOR_lsx_vmuh_w CODE_FOR_lsx_vmuh_s_w
-#define CODE_FOR_lsx_vmuh_d CODE_FOR_lsx_vmuh_s_d
-#define CODE_FOR_lsx_vmuh_bu CODE_FOR_lsx_vmuh_u_bu
-#define CODE_FOR_lsx_vmuh_hu CODE_FOR_lsx_vmuh_u_hu
-#define CODE_FOR_lsx_vmuh_wu CODE_FOR_lsx_vmuh_u_wu
-#define CODE_FOR_lsx_vmuh_du CODE_FOR_lsx_vmuh_u_du
#define CODE_FOR_lsx_vsllwil_h_b CODE_FOR_lsx_vsllwil_s_h_b
#define CODE_FOR_lsx_vsllwil_w_h CODE_FOR_lsx_vsllwil_s_w_h
#define CODE_FOR_lsx_vsllwil_d_w CODE_FOR_lsx_vsllwil_s_d_w
@@ -590,6 +596,14 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
#define CODE_FOR_lasx_xvmul_h CODE_FOR_mulv16hi3
#define CODE_FOR_lasx_xvmul_w CODE_FOR_mulv8si3
#define CODE_FOR_lasx_xvmul_d CODE_FOR_mulv4di3
+#define CODE_FOR_lasx_xvmuh_b CODE_FOR_smulv32qi3_highpart
+#define CODE_FOR_lasx_xvmuh_h CODE_FOR_smulv16hi3_highpart
+#define CODE_FOR_lasx_xvmuh_w CODE_FOR_smulv8si3_highpart
+#define CODE_FOR_lasx_xvmuh_d CODE_FOR_smulv4di3_highpart
+#define CODE_FOR_lasx_xvmuh_bu CODE_FOR_umulv32qi3_highpart
+#define CODE_FOR_lasx_xvmuh_hu CODE_FOR_umulv16hi3_highpart
+#define CODE_FOR_lasx_xvmuh_wu CODE_FOR_umulv8si3_highpart
+#define CODE_FOR_lasx_xvmuh_du CODE_FOR_umulv4di3_highpart
#define CODE_FOR_lasx_xvclz_b CODE_FOR_clzv32qi2
#define CODE_FOR_lasx_xvclz_h CODE_FOR_clzv16hi2
#define CODE_FOR_lasx_xvclz_w CODE_FOR_clzv8si2
@@ -628,6 +642,14 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
#define CODE_FOR_lasx_xvsrli_h CODE_FOR_vlshrv16hi3
#define CODE_FOR_lasx_xvsrli_w CODE_FOR_vlshrv8si3
#define CODE_FOR_lasx_xvsrli_d CODE_FOR_vlshrv4di3
+#define CODE_FOR_lasx_xvrotr_b CODE_FOR_vrotrv32qi3
+#define CODE_FOR_lasx_xvrotr_h CODE_FOR_vrotrv16hi3
+#define CODE_FOR_lasx_xvrotr_w CODE_FOR_vrotrv8si3
+#define CODE_FOR_lasx_xvrotr_d CODE_FOR_vrotrv4di3
+#define CODE_FOR_lasx_xvrotri_b CODE_FOR_rotrv32qi3
+#define CODE_FOR_lasx_xvrotri_h CODE_FOR_rotrv16hi3
+#define CODE_FOR_lasx_xvrotri_w CODE_FOR_rotrv8si3
+#define CODE_FOR_lasx_xvrotri_d CODE_FOR_rotrv4di3
#define CODE_FOR_lasx_xvsub_b CODE_FOR_subv32qi3
#define CODE_FOR_lasx_xvsub_h CODE_FOR_subv16hi3
#define CODE_FOR_lasx_xvsub_w CODE_FOR_subv8si3
@@ -699,14 +721,6 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
#define CODE_FOR_lasx_xvavgr_hu CODE_FOR_lasx_xvavgr_u_hu
#define CODE_FOR_lasx_xvavgr_wu CODE_FOR_lasx_xvavgr_u_wu
#define CODE_FOR_lasx_xvavgr_du CODE_FOR_lasx_xvavgr_u_du
-#define CODE_FOR_lasx_xvmuh_b CODE_FOR_lasx_xvmuh_s_b
-#define CODE_FOR_lasx_xvmuh_h CODE_FOR_lasx_xvmuh_s_h
-#define CODE_FOR_lasx_xvmuh_w CODE_FOR_lasx_xvmuh_s_w
-#define CODE_FOR_lasx_xvmuh_d CODE_FOR_lasx_xvmuh_s_d
-#define CODE_FOR_lasx_xvmuh_bu CODE_FOR_lasx_xvmuh_u_bu
-#define CODE_FOR_lasx_xvmuh_hu CODE_FOR_lasx_xvmuh_u_hu
-#define CODE_FOR_lasx_xvmuh_wu CODE_FOR_lasx_xvmuh_u_wu
-#define CODE_FOR_lasx_xvmuh_du CODE_FOR_lasx_xvmuh_u_du
#define CODE_FOR_lasx_xvssran_b_h CODE_FOR_lasx_xvssran_s_b_h
#define CODE_FOR_lasx_xvssran_h_w CODE_FOR_lasx_xvssran_s_h_w
#define CODE_FOR_lasx_xvssran_w_d CODE_FOR_lasx_xvssran_s_w_d
@@ -725,8 +739,6 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
#define CODE_FOR_lasx_xvssrlrn_bu_h CODE_FOR_lasx_xvssrlrn_u_bu_h
#define CODE_FOR_lasx_xvssrlrn_hu_w CODE_FOR_lasx_xvssrlrn_u_hu_w
#define CODE_FOR_lasx_xvssrlrn_wu_d CODE_FOR_lasx_xvssrlrn_u_wu_d
-#define CODE_FOR_lasx_xvftint_w_s CODE_FOR_lasx_xvftint_s_w_s
-#define CODE_FOR_lasx_xvftint_l_d CODE_FOR_lasx_xvftint_s_l_d
#define CODE_FOR_lasx_xvftint_wu_s CODE_FOR_lasx_xvftint_u_wu_s
#define CODE_FOR_lasx_xvftint_lu_d CODE_FOR_lasx_xvftint_u_lu_d
#define CODE_FOR_lasx_xvsllwil_h_b CODE_FOR_lasx_xvsllwil_s_h_b
diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
index d3896d7..3545e66 100644
--- a/gcc/config/loongarch/loongarch.cc
+++ b/gcc/config/loongarch/loongarch.cc
@@ -7840,15 +7840,13 @@ loongarch_handle_model_attribute (tree *node, tree name, tree arg, int,
return NULL_TREE;
}
-static const struct attribute_spec loongarch_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (loongarch_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "model", 1, 1, true, false, false, false,
- loongarch_handle_model_attribute, NULL },
- /* The last attribute spec is set to be NULL. */
- {}
-};
+ loongarch_handle_model_attribute, NULL }
+});
bool
loongarch_use_anchors_for_symbol_p (const_rtx symbol)
@@ -8673,6 +8671,12 @@ loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
}
static bool
+loongarch_is_odd_extraction (struct expand_vec_perm_d *);
+
+static bool
+loongarch_is_even_extraction (struct expand_vec_perm_d *);
+
+static bool
loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
{
int i;
@@ -8694,6 +8698,24 @@ loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
if (d->testing_p)
return true;
+ /* If match extract-even and extract-odd permutations pattern, use
+ * vselect much better than vshuf. */
+ if (loongarch_is_odd_extraction (d)
+ || loongarch_is_even_extraction (d))
+ {
+ if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1,
+ d->perm, d->nelt))
+ return true;
+
+ unsigned char perm2[MAX_VECT_LEN];
+ for (i = 0; i < d->nelt; ++i)
+ perm2[i] = (d->perm[i] + d->nelt) & (2 * d->nelt - 1);
+
+ if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0,
+ perm2, d->nelt))
+ return true;
+ }
+
for (i = 0; i < d->nelt; i += 1)
{
rperm[i] = GEN_INT (d->perm[i]);
@@ -8878,7 +8900,7 @@ loongarch_is_even_extraction (struct expand_vec_perm_d *d)
result = false;
break;
}
- buf += 1;
+ buf += 2;
}
return result;
@@ -8900,7 +8922,7 @@ loongarch_is_extraction_permutation (struct expand_vec_perm_d *d)
result = false;
break;
}
- buf += 2;
+ buf += 1;
}
return result;
@@ -9377,6 +9399,11 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
Selector after: { 1, 3, 1, 3 }.
Even extraction selector sample: E_V4DImode, { 0, 2, 4, 6 }
Selector after: { 0, 2, 0, 2 }. */
+
+ /* Better implement of extract-even and extract-odd permutations. */
+ if (loongarch_expand_vec_perm_even_odd (d))
+ return true;
+
for (i = 0; i < d->nelt / 2; i += 1)
{
idx = d->perm[i];
diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
index 115222e..fa8a3f5 100644
--- a/gcc/config/loongarch/loongarch.h
+++ b/gcc/config/loongarch/loongarch.h
@@ -288,10 +288,12 @@ along with GCC; see the file COPYING3. If not see
/* Define if loading short immediate values into registers sign extends. */
#define SHORT_IMMEDIATES_SIGN_EXTEND 1
-/* The clz.{w/d} instructions have the natural values at 0. */
+/* The clz.{w/d}, ctz.{w/d} instructions have the natural values at 0. */
#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
+#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
/* Standard register usage. */
@@ -1239,8 +1241,3 @@ struct GTY (()) machine_function
#define TARGET_EXPLICIT_RELOCS \
(la_opt_explicit_relocs == EXPLICIT_RELOCS_ALWAYS)
-
-#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
- ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
-#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
- ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
index cd4ed49..7a101dd 100644
--- a/gcc/config/loongarch/loongarch.md
+++ b/gcc/config/loongarch/loongarch.md
@@ -585,9 +585,6 @@
(define_int_attr lrint_submenmonic [(UNSPEC_FTINT "")
(UNSPEC_FTINTRM "rm")
(UNSPEC_FTINTRP "rp")])
-(define_int_attr lrint_allow_inexact [(UNSPEC_FTINT "1")
- (UNSPEC_FTINTRM "0")
- (UNSPEC_FTINTRP "0")])
;; Iterator and attributes for bytepick.d
(define_int_iterator bytepick_w_ashift_amount [8 16 24])
@@ -1515,7 +1512,30 @@
(set_attr "cnv_mode" "D2S")
(set_attr "mode" "SF")])
-
+;; In vector registers, popcount can be implemented directly through
+;; the vector instruction [X]VPCNT. For GP registers, we can implement
+;; it through the following method. Compared with loop implementation
+;; of popcount, the following method has better performance.
+
+;; This attribute used for get connection of scalar mode and corresponding
+;; vector mode.
+(define_mode_attr cntmap [(SI "v4si") (DI "v2di")])
+
+(define_expand "popcount<mode>2"
+ [(set (match_operand:GPR 0 "register_operand")
+ (popcount:GPR (match_operand:GPR 1 "register_operand")))]
+ "ISA_HAS_LSX"
+{
+ rtx in = operands[1];
+ rtx out = operands[0];
+ rtx vreg = <MODE>mode == SImode ? gen_reg_rtx (V4SImode) :
+ gen_reg_rtx (V2DImode);
+ emit_insn (gen_lsx_vinsgr2vr_<size> (vreg, in, vreg, GEN_INT (1)));
+ emit_insn (gen_popcount<cntmap>2 (vreg, vreg));
+ emit_insn (gen_lsx_vpickve2gr_<size> (out, vreg, GEN_INT (0)));
+ DONE;
+})
+
;;
;; ....................
;;
@@ -2384,7 +2404,7 @@
(unspec:ANYFI [(match_operand:ANYF 1 "register_operand" "f")]
LRINT))]
"TARGET_HARD_FLOAT &&
- (<lrint_allow_inexact>
+ (<LRINT> == UNSPEC_FTINT
|| flag_fp_int_builtin_inexact
|| !flag_trapping_math)"
"ftint<lrint_submenmonic>.<ANYFI:ifmt>.<ANYF:fmt> %0,%1"
@@ -3882,7 +3902,7 @@
(any_extend:SI (match_dup 3)))])]
"")
-
+
(define_mode_iterator QHSD [QI HI SI DI])
@@ -4026,11 +4046,8 @@
(include "generic.md")
(include "la464.md")
-; The LoongArch SX Instructions.
-(include "lsx.md")
-
-; The LoongArch ASX Instructions.
-(include "lasx.md")
+; The LoongArch SIMD Instructions.
+(include "simd.md")
(define_c_enum "unspec" [
UNSPEC_ADDRESS_FIRST
diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
index 5e8d8d7..2323999 100644
--- a/gcc/config/loongarch/lsx.md
+++ b/gcc/config/loongarch/lsx.md
@@ -55,7 +55,6 @@
UNSPEC_LSX_VFCMP_SULT
UNSPEC_LSX_VFCMP_SUN
UNSPEC_LSX_VFCMP_SUNE
- UNSPEC_LSX_VFTINT_S
UNSPEC_LSX_VFTINT_U
UNSPEC_LSX_VSAT_S
UNSPEC_LSX_VSAT_U
@@ -65,8 +64,6 @@
UNSPEC_LSX_VSRLR
UNSPEC_LSX_VSRLRI
UNSPEC_LSX_VSHUF
- UNSPEC_LSX_VMUH_S
- UNSPEC_LSX_VMUH_U
UNSPEC_LSX_VEXTW_S
UNSPEC_LSX_VEXTW_U
UNSPEC_LSX_VSLLWIL_S
@@ -89,9 +86,6 @@
UNSPEC_LSX_VEXTRINS
UNSPEC_LSX_VMSKLTZ
UNSPEC_LSX_VSIGNCOV
- UNSPEC_LSX_VFTINTRNE
- UNSPEC_LSX_VFTINTRP
- UNSPEC_LSX_VFTINTRM
UNSPEC_LSX_VFTINT_W_D
UNSPEC_LSX_VFFINT_S_L
UNSPEC_LSX_VFTINTRZ_W_D
@@ -110,14 +104,6 @@
UNSPEC_LSX_VFTINTRNEL_L_S
UNSPEC_LSX_VFTINTRNEH_L_S
UNSPEC_LSX_VFTINTH_L_H
- UNSPEC_LSX_VFRINTRNE_S
- UNSPEC_LSX_VFRINTRNE_D
- UNSPEC_LSX_VFRINTRZ_S
- UNSPEC_LSX_VFRINTRZ_D
- UNSPEC_LSX_VFRINTRP_S
- UNSPEC_LSX_VFRINTRP_D
- UNSPEC_LSX_VFRINTRM_S
- UNSPEC_LSX_VFRINTRM_D
UNSPEC_LSX_VSSRARN_S
UNSPEC_LSX_VSSRARN_U
UNSPEC_LSX_VSSRLN_U
@@ -155,7 +141,6 @@
UNSPEC_LSX_VMADDWOD
UNSPEC_LSX_VMADDWOD2
UNSPEC_LSX_VMADDWOD3
- UNSPEC_LSX_VROTR
UNSPEC_LSX_VADD_Q
UNSPEC_LSX_VSUB_Q
UNSPEC_LSX_VEXTH_Q_D
@@ -221,9 +206,6 @@
;; Only used for copy_{u,s}.w and vilvh.
(define_mode_iterator LSX_W [V4SI V4SF])
-;; Only integer modes.
-(define_mode_iterator ILSX [V2DI V4SI V8HI V16QI])
-
;; As ILSX but excludes V16QI.
(define_mode_iterator ILSX_DWH [V2DI V4SI V8HI])
@@ -242,21 +224,9 @@
;;;; Only integer modes for fixed-point madd_q/maddr_q.
;;(define_mode_iterator ILSX_WH [V4SI V8HI])
-;; Only floating-point modes.
-(define_mode_iterator FLSX [V2DF V4SF])
-
;; Only used for immediate set shuffle elements instruction.
(define_mode_iterator LSX_WHB_W [V4SI V8HI V16QI V4SF])
-;; The attribute gives the integer vector mode with same size.
-(define_mode_attr VIMODE
- [(V2DF "V2DI")
- (V4SF "V4SI")
- (V2DI "V2DI")
- (V4SI "V4SI")
- (V8HI "V8HI")
- (V16QI "V16QI")])
-
;; The attribute gives half modes for vector modes.
(define_mode_attr VHMODE
[(V8HI "V16QI")
@@ -392,46 +362,6 @@
(V8HI "exp_8")
(V16QI "exp_16")])
-;; This attribute is used to form an immediate operand constraint using
-;; "const_<bitimm>_operand".
-(define_mode_attr bitimm
- [(V16QI "uimm3")
- (V8HI "uimm4")
- (V4SI "uimm5")
- (V2DI "uimm6")])
-
-
-(define_int_iterator FRINT_S [UNSPEC_LSX_VFRINTRP_S
- UNSPEC_LSX_VFRINTRZ_S
- UNSPEC_LSX_VFRINT
- UNSPEC_LSX_VFRINTRM_S])
-
-(define_int_iterator FRINT_D [UNSPEC_LSX_VFRINTRP_D
- UNSPEC_LSX_VFRINTRZ_D
- UNSPEC_LSX_VFRINT
- UNSPEC_LSX_VFRINTRM_D])
-
-(define_int_attr frint_pattern_s
- [(UNSPEC_LSX_VFRINTRP_S "ceil")
- (UNSPEC_LSX_VFRINTRZ_S "btrunc")
- (UNSPEC_LSX_VFRINT "rint")
- (UNSPEC_LSX_VFRINTRM_S "floor")])
-
-(define_int_attr frint_pattern_d
- [(UNSPEC_LSX_VFRINTRP_D "ceil")
- (UNSPEC_LSX_VFRINTRZ_D "btrunc")
- (UNSPEC_LSX_VFRINT "rint")
- (UNSPEC_LSX_VFRINTRM_D "floor")])
-
-(define_int_attr frint_suffix
- [(UNSPEC_LSX_VFRINTRP_S "rp")
- (UNSPEC_LSX_VFRINTRP_D "rp")
- (UNSPEC_LSX_VFRINTRZ_S "rz")
- (UNSPEC_LSX_VFRINTRZ_D "rz")
- (UNSPEC_LSX_VFRINT "")
- (UNSPEC_LSX_VFRINTRM_S "rm")
- (UNSPEC_LSX_VFRINTRM_D "rm")])
-
(define_expand "vec_init<mode><unitmode>"
[(match_operand:LSX 0 "register_operand")
(match_operand:LSX 1 "")]
@@ -1616,15 +1546,6 @@
[(set_attr "type" "simd_fdiv")
(set_attr "mode" "<MODE>")])
-(define_insn "lsx_vfrint_<flsxfmt>"
- [(set (match_operand:FLSX 0 "register_operand" "=f")
- (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
- UNSPEC_LSX_VFRINT))]
- "ISA_HAS_LSX"
- "vfrint.<flsxfmt>\t%w0,%w1"
- [(set_attr "type" "simd_fcvt")
- (set_attr "mode" "<MODE>")])
-
(define_insn "lsx_vfrsqrt_<flsxfmt>"
[(set (match_operand:FLSX 0 "register_operand" "=f")
(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
@@ -1634,16 +1555,6 @@
[(set_attr "type" "simd_fdiv")
(set_attr "mode" "<MODE>")])
-(define_insn "lsx_vftint_s_<ilsxfmt>_<flsxfmt>"
- [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
- (unspec:<VIMODE> [(match_operand:FLSX 1 "register_operand" "f")]
- UNSPEC_LSX_VFTINT_S))]
- "ISA_HAS_LSX"
- "vftint.<ilsxfmt>.<flsxfmt>\t%w0,%w1"
- [(set_attr "type" "simd_fcvt")
- (set_attr "cnv_mode" "<FINTCNV_2>")
- (set_attr "mode" "<MODE>")])
-
(define_insn "lsx_vftint_u_<ilsxfmt_u>_<flsxfmt>"
[(set (match_operand:<VIMODE> 0 "register_operand" "=f")
(unspec:<VIMODE> [(match_operand:FLSX 1 "register_operand" "f")]
@@ -1654,15 +1565,6 @@
(set_attr "cnv_mode" "<FINTCNV_2>")
(set_attr "mode" "<MODE>")])
-(define_insn "fix_trunc<FLSX:mode><mode_i>2"
- [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
- (fix:<VIMODE> (match_operand:FLSX 1 "register_operand" "f")))]
- "ISA_HAS_LSX"
- "vftintrz.<ilsxfmt>.<flsxfmt>\t%w0,%w1"
- [(set_attr "type" "simd_fcvt")
- (set_attr "cnv_mode" "<FINTCNV_2>")
- (set_attr "mode" "<MODE>")])
-
(define_insn "fixuns_trunc<FLSX:mode><mode_i>2"
[(set (match_operand:<VIMODE> 0 "register_operand" "=f")
(unsigned_fix:<VIMODE> (match_operand:FLSX 1 "register_operand" "f")))]
@@ -2593,26 +2495,6 @@
[(set_attr "type" "simd_logic")
(set_attr "mode" "<MODE>")])
-(define_insn "lsx_vmuh_s_<lsxfmt>"
- [(set (match_operand:ILSX 0 "register_operand" "=f")
- (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
- (match_operand:ILSX 2 "register_operand" "f")]
- UNSPEC_LSX_VMUH_S))]
- "ISA_HAS_LSX"
- "vmuh.<lsxfmt>\t%w0,%w1,%w2"
- [(set_attr "type" "simd_int_arith")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "lsx_vmuh_u_<lsxfmt_u>"
- [(set (match_operand:ILSX 0 "register_operand" "=f")
- (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
- (match_operand:ILSX 2 "register_operand" "f")]
- UNSPEC_LSX_VMUH_U))]
- "ISA_HAS_LSX"
- "vmuh.<lsxfmt_u>\t%w0,%w1,%w2"
- [(set_attr "type" "simd_int_arith")
- (set_attr "mode" "<MODE>")])
-
(define_insn "lsx_vextw_s_d"
[(set (match_operand:V2DI 0 "register_operand" "=f")
(unspec:V2DI [(match_operand:V4SI 1 "register_operand" "f")]
@@ -2965,60 +2847,6 @@
[(set_attr "type" "simd_fmadd")
(set_attr "mode" "<MODE>")])
-(define_insn "lsx_vftintrne_w_s"
- [(set (match_operand:V4SI 0 "register_operand" "=f")
- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")]
- UNSPEC_LSX_VFTINTRNE))]
- "ISA_HAS_LSX"
- "vftintrne.w.s\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4SF")])
-
-(define_insn "lsx_vftintrne_l_d"
- [(set (match_operand:V2DI 0 "register_operand" "=f")
- (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")]
- UNSPEC_LSX_VFTINTRNE))]
- "ISA_HAS_LSX"
- "vftintrne.l.d\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V2DF")])
-
-(define_insn "lsx_vftintrp_w_s"
- [(set (match_operand:V4SI 0 "register_operand" "=f")
- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")]
- UNSPEC_LSX_VFTINTRP))]
- "ISA_HAS_LSX"
- "vftintrp.w.s\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4SF")])
-
-(define_insn "lsx_vftintrp_l_d"
- [(set (match_operand:V2DI 0 "register_operand" "=f")
- (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")]
- UNSPEC_LSX_VFTINTRP))]
- "ISA_HAS_LSX"
- "vftintrp.l.d\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V2DF")])
-
-(define_insn "lsx_vftintrm_w_s"
- [(set (match_operand:V4SI 0 "register_operand" "=f")
- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")]
- UNSPEC_LSX_VFTINTRM))]
- "ISA_HAS_LSX"
- "vftintrm.w.s\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4SF")])
-
-(define_insn "lsx_vftintrm_l_d"
- [(set (match_operand:V2DI 0 "register_operand" "=f")
- (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")]
- UNSPEC_LSX_VFTINTRM))]
- "ISA_HAS_LSX"
- "vftintrm.l.d\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V2DF")])
-
(define_insn "lsx_vftint_w_d"
[(set (match_operand:V4SI 0 "register_operand" "=f")
(unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f")
@@ -3187,108 +3015,6 @@
[(set_attr "type" "simd_shift")
(set_attr "mode" "V4SF")])
-(define_insn "lsx_vfrintrne_s"
- [(set (match_operand:V4SF 0 "register_operand" "=f")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
- UNSPEC_LSX_VFRINTRNE_S))]
- "ISA_HAS_LSX"
- "vfrintrne.s\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4SF")])
-
-(define_insn "lsx_vfrintrne_d"
- [(set (match_operand:V2DF 0 "register_operand" "=f")
- (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
- UNSPEC_LSX_VFRINTRNE_D))]
- "ISA_HAS_LSX"
- "vfrintrne.d\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V2DF")])
-
-(define_insn "lsx_vfrintrz_s"
- [(set (match_operand:V4SF 0 "register_operand" "=f")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
- UNSPEC_LSX_VFRINTRZ_S))]
- "ISA_HAS_LSX"
- "vfrintrz.s\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4SF")])
-
-(define_insn "lsx_vfrintrz_d"
- [(set (match_operand:V2DF 0 "register_operand" "=f")
- (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
- UNSPEC_LSX_VFRINTRZ_D))]
- "ISA_HAS_LSX"
- "vfrintrz.d\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V2DF")])
-
-(define_insn "lsx_vfrintrp_s"
- [(set (match_operand:V4SF 0 "register_operand" "=f")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
- UNSPEC_LSX_VFRINTRP_S))]
- "ISA_HAS_LSX"
- "vfrintrp.s\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4SF")])
-
-(define_insn "lsx_vfrintrp_d"
- [(set (match_operand:V2DF 0 "register_operand" "=f")
- (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
- UNSPEC_LSX_VFRINTRP_D))]
- "ISA_HAS_LSX"
- "vfrintrp.d\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V2DF")])
-
-(define_insn "lsx_vfrintrm_s"
- [(set (match_operand:V4SF 0 "register_operand" "=f")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
- UNSPEC_LSX_VFRINTRM_S))]
- "ISA_HAS_LSX"
- "vfrintrm.s\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4SF")])
-
-(define_insn "lsx_vfrintrm_d"
- [(set (match_operand:V2DF 0 "register_operand" "=f")
- (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
- UNSPEC_LSX_VFRINTRM_D))]
- "ISA_HAS_LSX"
- "vfrintrm.d\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V2DF")])
-
-;; Vector versions of the floating-point frint patterns.
-;; Expands to btrunc, ceil, floor, rint.
-(define_insn "<FRINT_S:frint_pattern_s>v4sf2"
- [(set (match_operand:V4SF 0 "register_operand" "=f")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
- FRINT_S))]
- "ISA_HAS_LSX"
- "vfrint<FRINT_S:frint_suffix>.s\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V4SF")])
-
-(define_insn "<FRINT_D:frint_pattern_d>v2df2"
- [(set (match_operand:V2DF 0 "register_operand" "=f")
- (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
- FRINT_D))]
- "ISA_HAS_LSX"
- "vfrint<FRINT_D:frint_suffix>.d\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "V2DF")])
-
-;; Expands to round.
-(define_insn "round<mode>2"
- [(set (match_operand:FLSX 0 "register_operand" "=f")
- (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
- UNSPEC_LSX_VFRINT))]
- "ISA_HAS_LSX"
- "vfrint.<flsxfrint>\t%w0,%w1"
- [(set_attr "type" "simd_shift")
- (set_attr "mode" "<MODE>")])
-
;; Offset load and broadcast
(define_expand "lsx_vldrepl_<lsxfmt_f>"
[(match_operand:LSX 0 "register_operand")
@@ -4417,16 +4143,6 @@
[(set_attr "type" "simd_int_arith")
(set_attr "mode" "V2DI")])
-(define_insn "lsx_vrotr_<lsxfmt>"
- [(set (match_operand:ILSX 0 "register_operand" "=f")
- (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
- (match_operand:ILSX 2 "register_operand" "f")]
- UNSPEC_LSX_VROTR))]
- "ISA_HAS_LSX"
- "vrotr.<lsxfmt>\t%w0,%w1,%w2"
- [(set_attr "type" "simd_int_arith")
- (set_attr "mode" "<MODE>")])
-
(define_insn "lsx_vadd_q"
[(set (match_operand:V2DI 0 "register_operand" "=f")
(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
@@ -4520,15 +4236,6 @@
[(set_attr "type" "simd_fcvt")
(set_attr "mode" "V2DI")])
-(define_insn "lsx_vrotri_<lsxfmt>"
- [(set (match_operand:ILSX 0 "register_operand" "=f")
- (rotatert:ILSX (match_operand:ILSX 1 "register_operand" "f")
- (match_operand 2 "const_<bitimm>_operand" "")))]
- "ISA_HAS_LSX"
- "vrotri.<lsxfmt>\t%w0,%w1,%2"
- [(set_attr "type" "simd_shf")
- (set_attr "mode" "<MODE>")])
-
(define_insn "lsx_vextl_q_d"
[(set (match_operand:V2DI 0 "register_operand" "=f")
(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")]
diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
new file mode 100644
index 0000000..843b1a4
--- /dev/null
+++ b/gcc/config/loongarch/simd.md
@@ -0,0 +1,286 @@
+;; Machine Description for LoongArch SIMD instructions for GNU compiler.
+;; Copyright (C) 2023 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Integer modes supported by LSX.
+(define_mode_iterator ILSX [V2DI V4SI V8HI V16QI])
+
+;; Integer modes supported by LASX.
+(define_mode_iterator ILASX [V4DI V8SI V16HI V32QI])
+
+;; FP modes supported by LSX
+(define_mode_iterator FLSX [V2DF V4SF])
+
+;; FP modes supported by LASX
+(define_mode_iterator FLASX [V4DF V8SF])
+
+;; All integer modes available
+(define_mode_iterator IVEC [(ILSX "ISA_HAS_LSX") (ILASX "ISA_HAS_LASX")])
+
+;; All FP modes available
+(define_mode_iterator FVEC [(FLSX "ISA_HAS_LSX") (FLASX "ISA_HAS_LASX")])
+
+;; Mnemonic prefix, "x" for LASX modes.
+(define_mode_attr x [(V2DI "") (V4SI "") (V8HI "") (V16QI "")
+ (V2DF "") (V4SF "")
+ (V4DI "x") (V8SI "x") (V16HI "x") (V32QI "x")
+ (V4DF "x") (V8SF "x")])
+
+;; Modifier for vector register, "w" for LSX modes, "u" for LASX modes.
+(define_mode_attr wu [(V2DI "w") (V4SI "w") (V8HI "w") (V16QI "w")
+ (V2DF "w") (V4SF "w")
+ (V4DI "u") (V8SI "u") (V16HI "u") (V32QI "u")
+ (V4DF "u") (V8SF "u")])
+
+;; define_insn name prefix, "lsx" or "lasx"
+(define_mode_attr simd_isa
+ [(V2DI "lsx") (V4SI "lsx") (V8HI "lsx") (V16QI "lsx")
+ (V2DF "lsx") (V4SF "lsx")
+ (V4DI "lasx") (V8SI "lasx") (V16HI "lasx") (V32QI "lasx")
+ (V4DF "lasx") (V8SF "lasx")])
+
+;; Widen integer modes for intermediate values in RTX pattern.
+(define_mode_attr WVEC [(V2DI "V2TI") (V4DI "V4TI")
+ (V4SI "V4DI") (V8SI "V8DI")
+ (V8HI "V8SI") (V16HI "V16SI")
+ (V16QI "V16HI") (V32QI "V32HI")])
+
+;; Integer vector modes with the same length and unit size as a mode.
+(define_mode_attr VIMODE [(V2DI "V2DI") (V4SI "V4SI")
+ (V8HI "V8HI") (V16QI "V16QI")
+ (V2DF "V2DI") (V4SF "V4SI")
+ (V4DI "V4DI") (V8SI "V8SI")
+ (V16HI "V16HI") (V32QI "V32QI")
+ (V4DF "V4DI") (V8SF "V8SI")])
+
+;; Lower-case version.
+(define_mode_attr vimode [(V2DF "v2di") (V4SF "v4si")
+ (V4DF "v4di") (V8SF "v8si")])
+
+;; Suffix for LSX or LASX instructions.
+(define_mode_attr simdfmt [(V2DF "d") (V4DF "d")
+ (V4SF "s") (V8SF "s")
+ (V2DI "d") (V4DI "d")
+ (V4SI "w") (V8SI "w")
+ (V8HI "h") (V16HI "h")
+ (V16QI "b") (V32QI "b")])
+
+;; Suffix for integer mode in LSX or LASX instructions with FP input but
+;; integer output.
+(define_mode_attr simdifmt_for_f [(V2DF "l") (V4DF "l")
+ (V4SF "w") (V8SF "w")])
+
+;; Size of vector elements in bits.
+(define_mode_attr elmbits [(V2DI "64") (V4DI "64")
+ (V4SI "32") (V8SI "32")
+ (V8HI "16") (V16HI "16")
+ (V16QI "8") (V32QI "8")])
+
+;; This attribute is used to form an immediate operand constraint using
+;; "const_<bitimm>_operand".
+(define_mode_attr bitimm [(V16QI "uimm3") (V32QI "uimm3")
+ (V8HI "uimm4") (V16HI "uimm4")
+ (V4SI "uimm5") (V8SI "uimm5")
+ (V2DI "uimm6") (V4DI "uimm6")])
+
+;; =======================================================================
+;; For many LASX instructions, the only difference of it from the LSX
+;; counterpart is the length of vector operands. Describe these LSX/LASX
+;; instruction here so we can avoid duplicating logics.
+;; =======================================================================
+
+;;
+;; FP vector rounding instructions
+;;
+
+(define_c_enum "unspec"
+ [UNSPEC_SIMD_FRINTRP
+ UNSPEC_SIMD_FRINTRZ
+ UNSPEC_SIMD_FRINT
+ UNSPEC_SIMD_FRINTRM
+ UNSPEC_SIMD_FRINTRNE])
+
+(define_int_iterator SIMD_FRINT
+ [UNSPEC_SIMD_FRINTRP
+ UNSPEC_SIMD_FRINTRZ
+ UNSPEC_SIMD_FRINT
+ UNSPEC_SIMD_FRINTRM
+ UNSPEC_SIMD_FRINTRNE])
+
+(define_int_attr simd_frint_rounding
+ [(UNSPEC_SIMD_FRINTRP "rp")
+ (UNSPEC_SIMD_FRINTRZ "rz")
+ (UNSPEC_SIMD_FRINT "")
+ (UNSPEC_SIMD_FRINTRM "rm")
+ (UNSPEC_SIMD_FRINTRNE "rne")])
+
+;; All these, but rint, are controlled by -ffp-int-builtin-inexact.
+;; Note: nearbyint is NOT allowed to raise FE_INEXACT even if
+;; -ffp-int-builtin-inexact, but rint is ALLOWED to raise it even if
+;; -fno-fp-int-builtin-inexact.
+(define_int_attr simd_frint_pattern
+ [(UNSPEC_SIMD_FRINTRP "ceil")
+ (UNSPEC_SIMD_FRINTRZ "btrunc")
+ (UNSPEC_SIMD_FRINT "rint")
+ (UNSPEC_SIMD_FRINTRNE "roundeven")
+ (UNSPEC_SIMD_FRINTRM "floor")])
+
+;; <x>vfrint.{/rp/rz/rm}
+(define_insn "<simd_isa>_<x>vfrint<simd_frint_rounding>_<simdfmt>"
+ [(set (match_operand:FVEC 0 "register_operand" "=f")
+ (unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
+ SIMD_FRINT))]
+ ""
+ "<x>vfrint<simd_frint_rounding>.<simdfmt>\t%<wu>0,%<wu>1"
+ [(set_attr "type" "simd_fcvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Expand the standard-named patterns to <x>vfrint instructions if
+;; raising inexact exception is allowed.
+
+(define_expand "<simd_frint_pattern><mode>2"
+ [(set (match_operand:FVEC 0 "register_operand" "=f")
+ (unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
+ SIMD_FRINT))]
+ "<SIMD_FRINT> == UNSPEC_SIMD_FRINT ||
+ flag_fp_int_builtin_inexact ||
+ !flag_trapping_math")
+
+;; ftrunc is like btrunc, but it's allowed to raise inexact exception
+;; even if -fno-fp-int-builtin-inexact.
+(define_expand "ftrunc<mode>2"
+ [(set (match_operand:FVEC 0 "register_operand" "=f")
+ (unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
+ UNSPEC_SIMD_FRINTRZ))]
+ "")
+
+;; Use LSX for scalar ceil/floor/trunc/roundeven when -mlsx and -ffp-int-
+;; builtin-inexact. The base FP instruction set lacks these operations.
+;; Yes we are wasting 50% or even 75% of the CPU horsepower, but it's still
+;; much faster than calling a libc function: on LA464 and LA664 there is a
+;; 3x ~ 5x speed up.
+;;
+;; Note that a vreplvei instruction is needed or we'll also operate on the
+;; junk in high bits of the vector register and produce random FP exceptions.
+
+(define_int_iterator LSX_SCALAR_FRINT
+ [UNSPEC_SIMD_FRINTRP
+ UNSPEC_SIMD_FRINTRZ
+ UNSPEC_SIMD_FRINTRM
+ UNSPEC_SIMD_FRINTRNE])
+
+(define_mode_attr VLSX_FOR_FMODE [(DF "V2DF") (SF "V4SF")])
+
+(define_expand "<simd_frint_pattern><mode>2"
+ [(set (match_dup 2)
+ (vec_duplicate:<VLSX_FOR_FMODE>
+ (match_operand:ANYF 1 "register_operand")))
+ (set (match_dup 2)
+ (unspec:<VLSX_FOR_FMODE> [(match_dup 2)] LSX_SCALAR_FRINT))
+ (set (match_operand:ANYF 0 "register_operand")
+ (vec_select:ANYF (match_dup 2) (parallel [(const_int 0)])))]
+ "ISA_HAS_LSX && (flag_fp_int_builtin_inexact || !flag_trapping_math)"
+ "operands[2] = gen_reg_rtx (<VLSX_FOR_FMODE>mode);")
+
+;; <x>vftint.{/rp/rz/rm}
+(define_insn
+ "<simd_isa>_<x>vftint<simd_frint_rounding>_<simdifmt_for_f>_<simdfmt>"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (fix:<VIMODE>
+ (unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
+ SIMD_FRINT)))]
+ ""
+ "<x>vftint<simd_frint_rounding>.<simdifmt_for_f>.<simdfmt>\t%<wu>0,%<wu>1"
+ [(set_attr "type" "simd_fcvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Expand the standard-named patterns to <x>vftint instructions if
+;; raising inexact exception.
+
+(define_expand "l<simd_frint_pattern><mode><vimode>2"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (fix:<VIMODE>
+ (unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
+ SIMD_FRINT)))]
+ "<SIMD_FRINT> == UNSPEC_SIMD_FRINT ||
+ flag_fp_int_builtin_inexact ||
+ !flag_trapping_math")
+
+;; fix_trunc is allowed to raise inexact exception even if
+;; -fno-fp-int-builtin-inexact. Because the middle end trys to match
+;; (FIX x) and it does not know (FIX (UNSPEC_SIMD_FRINTRZ x)), we need
+;; to use define_insn_and_split instead of define_expand (expanders are
+;; not considered during matching).
+(define_insn_and_split "fix_trunc<mode><vimode>2"
+ [(set (match_operand:<VIMODE> 0 "register_operand" "=f")
+ (fix:<VIMODE> (match_operand:FVEC 1 "register_operand" "f")))]
+ ""
+ "#"
+ ""
+ [(const_int 0)]
+ {
+ emit_insn (gen_<simd_isa>_<x>vftintrz_<simdifmt_for_f>_<simdfmt> (
+ operands[0], operands[1]));
+ DONE;
+ }
+ [(set_attr "type" "simd_fcvt")
+ (set_attr "mode" "<MODE>")])
+
+;; <x>vmuh.{b/h/w/d}
+
+(define_code_attr muh
+ [(sign_extend "smul_highpart")
+ (zero_extend "umul_highpart")])
+
+(define_insn "<su>mul<mode>3_highpart"
+ [(set (match_operand:IVEC 0 "register_operand" "=f")
+ (<muh>:IVEC (match_operand:IVEC 1 "register_operand" "f")
+ (match_operand:IVEC 2 "register_operand" "f")))
+ (any_extend (const_int 0))]
+ ""
+ "<x>vmuh.<simdfmt><u>\t%<wu>0,%<wu>1,%<wu>2"
+ [(set_attr "type" "simd_int_arith")
+ (set_attr "mode" "<MODE>")])
+
+;; <x>vrotr.{b/h/w/d}
+
+(define_insn "vrotr<mode>3"
+ [(set (match_operand:IVEC 0 "register_operand" "=f")
+ (rotatert:IVEC (match_operand:IVEC 1 "register_operand" "f")
+ (match_operand:IVEC 2 "register_operand" "f")))]
+ ""
+ "<x>vrotr.<simdfmt>\t%<wu>0,%<wu>1,%<wu>2"
+ [(set_attr "type" "simd_int_arith")
+ (set_attr "mode" "<MODE>")])
+
+;; <x>vrotri.{b/h/w/d}
+
+(define_insn "rotr<mode>3"
+ [(set (match_operand:IVEC 0 "register_operand" "=f")
+ (rotatert:IVEC (match_operand:IVEC 1 "register_operand" "f")
+ (match_operand:SI 2 "const_<bitimm>_operand")))]
+ ""
+ "<x>vrotri.<simdfmt>\t%<wu>0,%<wu>1,%2";
+ [(set_attr "type" "simd_int_arith")
+ (set_attr "mode" "<MODE>")])
+
+; The LoongArch SX Instructions.
+(include "lsx.md")
+
+; The LoongArch ASX Instructions.
+(include "lasx.md")
diff --git a/gcc/config/m32c/m32c.cc b/gcc/config/m32c/m32c.cc
index e18efc3..c63c75a 100644
--- a/gcc/config/m32c/m32c.cc
+++ b/gcc/config/m32c/m32c.cc
@@ -2999,7 +2999,7 @@ current_function_special_page_vector (rtx x)
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
-static const struct attribute_spec m32c_attribute_table[] = {
+TARGET_GNU_ATTRIBUTES (m32c_attribute_table, {
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "interrupt", 0, 0, false, false, false, false, interrupt_handler, NULL },
@@ -3007,9 +3007,8 @@ static const struct attribute_spec m32c_attribute_table[] = {
{ "fast_interrupt", 0, 0, false, false, false, false,
interrupt_handler, NULL },
{ "function_vector", 1, 1, true, false, false, false,
- function_vector_handler, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ function_vector_handler, NULL }
+});
#undef TARGET_COMP_TYPE_ATTRIBUTES
#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
diff --git a/gcc/config/m32r/m32r.cc b/gcc/config/m32r/m32r.cc
index 63a1798..1a9c8ef 100644
--- a/gcc/config/m32r/m32r.cc
+++ b/gcc/config/m32r/m32r.cc
@@ -112,15 +112,14 @@ static HOST_WIDE_INT m32r_starting_frame_offset (void);
/* M32R specific attributes. */
-static const struct attribute_spec m32r_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (m32r_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "interrupt", 0, 0, true, false, false, false, NULL, NULL },
{ "model", 1, 1, true, false, false, false, m32r_handle_model_attribute,
- NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ NULL }
+});
/* Initialize the GCC target structure. */
#undef TARGET_ATTRIBUTE_TABLE
diff --git a/gcc/config/m68k/m68k.cc b/gcc/config/m68k/m68k.cc
index 145a92d..001cf5b 100644
--- a/gcc/config/m68k/m68k.cc
+++ b/gcc/config/m68k/m68k.cc
@@ -361,7 +361,7 @@ static void m68k_asm_final_postscan_insn (FILE *, rtx_insn *insn, rtx [], int);
#undef TARGET_ASM_FINAL_POSTSCAN_INSN
#define TARGET_ASM_FINAL_POSTSCAN_INSN m68k_asm_final_postscan_insn
-static const struct attribute_spec m68k_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (m68k_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -370,9 +370,8 @@ static const struct attribute_spec m68k_attribute_table[] =
{ "interrupt_handler", 0, 0, true, false, false, false,
m68k_handle_fndecl_attribute, NULL },
{ "interrupt_thread", 0, 0, true, false, false, false,
- m68k_handle_fndecl_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ m68k_handle_fndecl_attribute, NULL }
+});
struct gcc_target targetm = TARGET_INITIALIZER;
diff --git a/gcc/config/mcore/mcore.cc b/gcc/config/mcore/mcore.cc
index 6f1d7af..ca67254 100644
--- a/gcc/config/mcore/mcore.cc
+++ b/gcc/config/mcore/mcore.cc
@@ -151,16 +151,15 @@ static bool mcore_modes_tieable_p (machine_mode, machine_mode);
/* MCore specific attributes. */
-static const struct attribute_spec mcore_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (mcore_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "dllexport", 0, 0, true, false, false, false, NULL, NULL },
{ "dllimport", 0, 0, true, false, false, false, NULL, NULL },
{ "naked", 0, 0, true, false, false, false,
- mcore_handle_naked_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ mcore_handle_naked_attribute, NULL }
+});
/* Initialize the GCC target structure. */
#undef TARGET_ASM_EXTERNAL_LIBCALL
diff --git a/gcc/config/microblaze/microblaze.cc b/gcc/config/microblaze/microblaze.cc
index 60ad551..3ea177b 100644
--- a/gcc/config/microblaze/microblaze.cc
+++ b/gcc/config/microblaze/microblaze.cc
@@ -218,15 +218,14 @@ int break_handler;
int fast_interrupt;
int save_volatiles;
-const struct attribute_spec microblaze_attribute_table[] = {
+TARGET_GNU_ATTRIBUTES (microblaze_attribute_table, {
/* name min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude */
{"interrupt_handler", 0, 0, true, false, false, false, NULL, NULL },
{"break_handler", 0, 0, true, false, false, false, NULL, NULL },
{"fast_interrupt", 0, 0, true, false, false, false, NULL, NULL },
- {"save_volatiles", 0, 0, true, false, false, false, NULL, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ {"save_volatiles", 0, 0, true, false, false, false, NULL, NULL }
+});
static int microblaze_interrupt_function_p (tree);
diff --git a/gcc/config/microblaze/microblaze.md b/gcc/config/microblaze/microblaze.md
index 671667b..a8ee886 100644
--- a/gcc/config/microblaze/microblaze.md
+++ b/gcc/config/microblaze/microblaze.md
@@ -1089,8 +1089,8 @@
"@
addik\t%0,r0,%1\t# %X1
addk\t%0,%1,r0
- lhui\t%0,%1
- lhui\t%0,%1
+ lhu%i1\t%0,%1
+ lhu%i1\t%0,%1
sh%i0\t%z1,%0
sh%i0\t%z1,%0"
[(set_attr "type" "arith,move,load,no_delay_load,store,no_delay_store")
diff --git a/gcc/config/mips/mips.cc b/gcc/config/mips/mips.cc
index cbd7d9b..9180dbb 100644
--- a/gcc/config/mips/mips.cc
+++ b/gcc/config/mips/mips.cc
@@ -611,7 +611,7 @@ static tree mips_handle_use_shadow_register_set_attr (tree *, tree, tree, int,
bool *);
/* The value of TARGET_ATTRIBUTE_TABLE. */
-static const struct attribute_spec mips_attribute_table[] = {
+TARGET_GNU_ATTRIBUTES (mips_attribute_table, {
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "long_call", 0, 0, false, true, true, false, NULL, NULL },
@@ -636,9 +636,8 @@ static const struct attribute_spec mips_attribute_table[] = {
mips_handle_use_shadow_register_set_attr, NULL },
{ "keep_interrupts_masked", 0, 0, false, true, true, false, NULL, NULL },
{ "use_debug_exception_return", 0, 0, false, true, true, false, NULL, NULL },
- { "use_hazard_barrier_return", 0, 0, true, false, false, false, NULL, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ { "use_hazard_barrier_return", 0, 0, true, false, false, false, NULL, NULL }
+});
/* A table describing all the processors GCC knows about; see
mips-cpus.def for details. */
diff --git a/gcc/config/mn10300/mn10300.cc b/gcc/config/mn10300/mn10300.cc
index cd1de1b..d56247a 100644
--- a/gcc/config/mn10300/mn10300.cc
+++ b/gcc/config/mn10300/mn10300.cc
@@ -2850,7 +2850,8 @@ mn10300_conditional_register_usage (void)
static rtx_insn *
mn10300_md_asm_adjust (vec<rtx> & /*outputs*/, vec<rtx> & /*inputs*/,
vec<machine_mode> & /*input_modes*/,
- vec<const char *> & /*constraints*/, vec<rtx> &clobbers,
+ vec<const char *> & /*constraints*/,
+ vec<rtx> &/*uses*/, vec<rtx> &clobbers,
HARD_REG_SET &clobbered_regs, location_t /*loc*/)
{
clobbers.safe_push (gen_rtx_REG (CCmode, CC_REG));
diff --git a/gcc/config/msp430/msp430.cc b/gcc/config/msp430/msp430.cc
index 061a9c7..85f499f 100644
--- a/gcc/config/msp430/msp430.cc
+++ b/gcc/config/msp430/msp430.cc
@@ -2057,7 +2057,7 @@ static const struct attribute_spec::exclusions attr_either_exclusions[] =
#define TARGET_ATTRIBUTE_TABLE msp430_attribute_table
/* Table of MSP430-specific attributes. */
-const struct attribute_spec msp430_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (msp430_attribute_table,
{
/* { name, min_num_args, max_num_args, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -2075,10 +2075,8 @@ const struct attribute_spec msp430_attribute_table[] =
{ ATTR_UPPER, 0, 0, true, false, false, false, msp430_section_attr,
attr_upper_exclusions },
{ ATTR_EITHER, 0, 0, true, false, false, false, msp430_section_attr,
- attr_either_exclusions },
-
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
- };
+ attr_either_exclusions }
+ });
#undef TARGET_HANDLE_GENERIC_ATTRIBUTE
#define TARGET_HANDLE_GENERIC_ATTRIBUTE msp430_handle_generic_attribute
diff --git a/gcc/config/nds32/nds32.cc b/gcc/config/nds32/nds32.cc
index 1f8de2a..921102d 100644
--- a/gcc/config/nds32/nds32.cc
+++ b/gcc/config/nds32/nds32.cc
@@ -288,7 +288,7 @@ static const int nds32_reg_alloc_order_for_speed[] =
};
/* Defining target-specific uses of __attribute__. */
-static const struct attribute_spec nds32_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (nds32_attribute_table,
{
/* Syntax: { name, min_len, max_len, decl_required, type_required,
function_type_required, affects_type_identity, handler,
@@ -326,11 +326,8 @@ static const struct attribute_spec nds32_attribute_table[] =
/* FOR BACKWARD COMPATIBILITY,
this attribute also tells no prologue/epilogue. */
- { "no_prologue", 0, 0, false, false, false, false, NULL, NULL },
-
- /* The last attribute spec is set to be NULL. */
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ { "no_prologue", 0, 0, false, false, false, false, NULL, NULL }
+});
/* ------------------------------------------------------------------------ */
@@ -4203,8 +4200,8 @@ nds32_md_asm_adjust (vec<rtx> &outputs ATTRIBUTE_UNUSED,
vec<rtx> &inputs ATTRIBUTE_UNUSED,
vec<machine_mode> &input_modes ATTRIBUTE_UNUSED,
vec<const char *> &constraints ATTRIBUTE_UNUSED,
- vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs,
- location_t /*loc*/)
+ vec<rtx> &/*uses*/, vec<rtx> &clobbers,
+ HARD_REG_SET &clobbered_regs, location_t /*loc*/)
{
if (!flag_inline_asm_r15)
{
diff --git a/gcc/config/nvptx/nvptx.cc b/gcc/config/nvptx/nvptx.cc
index 570bcc7..ae20802 100644
--- a/gcc/config/nvptx/nvptx.cc
+++ b/gcc/config/nvptx/nvptx.cc
@@ -1791,7 +1791,7 @@ nvptx_get_drap_rtx (void)
argument to the next call. */
static void
-nvptx_call_args (rtx arg, tree fntype)
+nvptx_call_args (cumulative_args_t, rtx arg, tree fntype)
{
if (!cfun->machine->doing_call)
{
@@ -1819,7 +1819,7 @@ nvptx_call_args (rtx arg, tree fntype)
information we recorded. */
static void
-nvptx_end_call_args (void)
+nvptx_end_call_args (cumulative_args_t)
{
cfun->machine->doing_call = false;
free_EXPR_LIST_list (&cfun->machine->call_args);
@@ -5834,16 +5834,15 @@ nvptx_handle_shared_attribute (tree *node, tree name, tree ARG_UNUSED (args),
}
/* Table of valid machine attributes. */
-static const struct attribute_spec nvptx_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (nvptx_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "kernel", 0, 0, true, false, false, false, nvptx_handle_kernel_attribute,
NULL },
{ "shared", 0, 0, true, false, false, false, nvptx_handle_shared_attribute,
- NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ NULL }
+});
/* Limit vector alignments to BIGGEST_ALIGNMENT. */
diff --git a/gcc/config/pdp11/pdp11.cc b/gcc/config/pdp11/pdp11.cc
index 78c1927..478297e 100644
--- a/gcc/config/pdp11/pdp11.cc
+++ b/gcc/config/pdp11/pdp11.cc
@@ -155,7 +155,8 @@ static int pdp11_addr_cost (rtx, machine_mode, addr_space_t, bool);
static int pdp11_insn_cost (rtx_insn *insn, bool speed);
static rtx_insn *pdp11_md_asm_adjust (vec<rtx> &, vec<rtx> &,
vec<machine_mode> &, vec<const char *> &,
- vec<rtx> &, HARD_REG_SET &, location_t);
+ vec<rtx> &, vec<rtx> &,
+ HARD_REG_SET &, location_t);
static bool pdp11_return_in_memory (const_tree, const_tree);
static rtx pdp11_function_value (const_tree, const_tree, bool);
static rtx pdp11_libcall_value (machine_mode, const_rtx);
@@ -2137,7 +2138,8 @@ pdp11_cmp_length (rtx *operands, int words)
static rtx_insn *
pdp11_md_asm_adjust (vec<rtx> & /*outputs*/, vec<rtx> & /*inputs*/,
vec<machine_mode> & /*input_modes*/,
- vec<const char *> & /*constraints*/, vec<rtx> &clobbers,
+ vec<const char *> & /*constraints*/,
+ vec<rtx> &/*uses*/, vec<rtx> &clobbers,
HARD_REG_SET &clobbered_regs, location_t /*loc*/)
{
clobbers.safe_push (gen_rtx_REG (CCmode, CC_REGNUM));
diff --git a/gcc/config/riscv/autovec.md b/gcc/config/riscv/autovec.md
index 2d727c2..55d3ae5 100644
--- a/gcc/config/riscv/autovec.md
+++ b/gcc/config/riscv/autovec.md
@@ -59,7 +59,7 @@
(match_operand:<RATIO64:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO64I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -74,7 +74,7 @@
(match_operand:<RATIO32:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO32I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -89,7 +89,7 @@
(match_operand:<RATIO16:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO16I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -104,7 +104,7 @@
(match_operand:<RATIO8:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO8I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -119,7 +119,7 @@
(match_operand:<RATIO4:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO4I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -134,7 +134,7 @@
(match_operand:<RATIO2:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO2I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, true);
DONE;
@@ -172,7 +172,7 @@
(match_operand:<RATIO64:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO64I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -187,7 +187,7 @@
(match_operand:<RATIO32:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO32I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -202,7 +202,7 @@
(match_operand:<RATIO16:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO16I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -217,7 +217,7 @@
(match_operand:<RATIO8:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO8I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -232,7 +232,7 @@
(match_operand:<RATIO4:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO4I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -247,7 +247,7 @@
(match_operand:<RATIO2:VM> 5 "vector_mask_operand")
(match_operand 6 "autovec_length_operand")
(match_operand 7 "const_0_operand")]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && riscv_vector::gather_scatter_valid_offset_p (<RATIO2I:MODE>mode)"
{
riscv_vector::expand_gather_scatter (operands, false);
DONE;
@@ -394,6 +394,22 @@
}
)
+;; Provide a vec_init for mask registers by initializing
+;; a QImode vector and comparing it against 0.
+(define_expand "vec_init<mode>qi"
+ [(match_operand:VB 0 "register_operand")
+ (match_operand 1 "")]
+ "TARGET_VECTOR"
+ {
+ machine_mode qimode = riscv_vector::get_vector_mode
+ (QImode, GET_MODE_NUNITS (<MODE>mode)).require ();
+ rtx tmp = gen_reg_rtx (qimode);
+ riscv_vector::expand_vec_init (tmp, operands[1]);
+ riscv_vector::expand_vec_cmp (operands[0], NE, tmp, CONST0_RTX (qimode));
+ DONE;
+ }
+)
+
;; Slide an RVV vector left and insert a scalar into element 0.
(define_expand "vec_shl_insert_<mode>"
[(match_operand:VI 0 "register_operand")
diff --git a/gcc/config/riscv/constraints.md b/gcc/config/riscv/constraints.md
index 68be451..9836fd3 100644
--- a/gcc/config/riscv/constraints.md
+++ b/gcc/config/riscv/constraints.md
@@ -169,6 +169,29 @@
(define_register_constraint "vm" "TARGET_VECTOR ? VM_REGS : NO_REGS"
"A vector mask register (if available).")
+;; These following constraints are used by RVV instructions with dest EEW > src EEW.
+;; RISC-V 'V' Spec 5.2. Vector Operands:
+;; The destination EEW is greater than the source EEW, the source EMUL is at least 1,
+;; and the overlap is in the highest-numbered part of the destination register group.
+;; (e.g., when LMUL=8, vzext.vf4 v0, v6 is legal, but a source of v0, v2, or v4 is not).
+(define_register_constraint "W21" "TARGET_VECTOR ? V_REGS : NO_REGS"
+ "A vector register has register number % 2 == 1." "regno % 2 == 1")
+
+(define_register_constraint "W42" "TARGET_VECTOR ? V_REGS : NO_REGS"
+ "A vector register has register number % 4 == 2." "regno % 4 == 2")
+
+(define_register_constraint "W84" "TARGET_VECTOR ? V_REGS : NO_REGS"
+ "A vector register has register number % 8 == 4." "regno % 8 == 4")
+
+(define_register_constraint "W43" "TARGET_VECTOR ? V_REGS : NO_REGS"
+ "A vector register has register number % 4 == 3." "regno % 4 == 3")
+
+(define_register_constraint "W86" "TARGET_VECTOR ? V_REGS : NO_REGS"
+ "A vector register has register number % 8 == 6." "regno % 8 == 6")
+
+(define_register_constraint "W87" "TARGET_VECTOR ? V_REGS : NO_REGS"
+ "A vector register has register number % 8 == 7." "regno % 8 == 7")
+
;; This constraint is used to match instruction "csrr %0, vlenb" which is generated in "mov<mode>".
;; VLENB is a run-time constant which represent the vector register length in bytes.
;; BYTES_PER_RISCV_VECTOR represent runtime invariant of vector register length in bytes.
diff --git a/gcc/config/riscv/riscv-cores.def b/gcc/config/riscv/riscv-cores.def
index 91deabb..34df59e 100644
--- a/gcc/config/riscv/riscv-cores.def
+++ b/gcc/config/riscv/riscv-cores.def
@@ -73,6 +73,7 @@ RISCV_CORE("sifive-s76", "rv64imafdc", "sifive-7-series")
RISCV_CORE("sifive-u54", "rv64imafdc", "sifive-5-series")
RISCV_CORE("sifive-u74", "rv64imafdc", "sifive-7-series")
+RISCV_CORE("sifive-x280", "rv64imafdcv_zfh_zba_zbb_zvfh_zvl512b", "sifive-7-series")
RISCV_CORE("thead-c906", "rv64imafdc_xtheadba_xtheadbb_xtheadbs_xtheadcmo_"
"xtheadcondmov_xtheadfmemidx_xtheadmac_"
diff --git a/gcc/config/riscv/riscv-opts.h b/gcc/config/riscv/riscv-opts.h
index e6e55ad..30efebb 100644
--- a/gcc/config/riscv/riscv-opts.h
+++ b/gcc/config/riscv/riscv-opts.h
@@ -104,15 +104,15 @@ enum riscv_entity
};
/* RISC-V stringop strategy. */
-enum riscv_stringop_strategy_enum {
- /* Use scalar or vector instructions. */
- USE_AUTO,
- /* Always use a library call. */
- USE_LIBCALL,
- /* Only use scalar instructions. */
- USE_SCALAR,
- /* Only use vector instructions. */
- USE_VECTOR
+enum stringop_strategy_enum {
+ /* No expansion. */
+ STRATEGY_LIBCALL = 1,
+ /* Use scalar expansion if possible. */
+ STRATEGY_SCALAR = 2,
+ /* Only vector expansion if possible. */
+ STRATEGY_VECTOR = 4,
+ /* Use any. */
+ STRATEGY_AUTO = STRATEGY_SCALAR | STRATEGY_VECTOR
};
#define TARGET_ZICOND_LIKE (TARGET_ZICOND || (TARGET_XVENTANACONDOPS && TARGET_64BIT))
diff --git a/gcc/config/riscv/riscv-protos.h b/gcc/config/riscv/riscv-protos.h
index 695ee24..bfbd2bf 100644
--- a/gcc/config/riscv/riscv-protos.h
+++ b/gcc/config/riscv/riscv-protos.h
@@ -606,6 +606,7 @@ enum vlmul_type get_vlmul (rtx_insn *);
int count_regno_occurrences (rtx_insn *, unsigned int);
bool imm_avl_p (machine_mode);
bool can_be_broadcasted_p (rtx);
+bool gather_scatter_valid_offset_p (machine_mode);
}
/* We classify builtin types into two classes:
diff --git a/gcc/config/riscv/riscv-string.cc b/gcc/config/riscv/riscv-string.cc
index 3b5e05e..594ff49 100644
--- a/gcc/config/riscv/riscv-string.cc
+++ b/gcc/config/riscv/riscv-string.cc
@@ -707,51 +707,68 @@ riscv_block_move_loop (rtx dest, rtx src, unsigned HOST_WIDE_INT length,
/* Expand a cpymemsi instruction, which copies LENGTH bytes from
memory reference SRC to memory reference DEST. */
-bool
-riscv_expand_block_move (rtx dest, rtx src, rtx length)
+static bool
+riscv_expand_block_move_scalar (rtx dest, rtx src, rtx length)
{
- if (riscv_memcpy_strategy == USE_LIBCALL
- || riscv_memcpy_strategy == USE_VECTOR)
+ if (!CONST_INT_P (length))
return false;
- if (CONST_INT_P (length))
- {
- unsigned HOST_WIDE_INT hwi_length = UINTVAL (length);
- unsigned HOST_WIDE_INT factor, align;
+ unsigned HOST_WIDE_INT hwi_length = UINTVAL (length);
+ unsigned HOST_WIDE_INT factor, align;
- align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
- factor = BITS_PER_WORD / align;
+ align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
+ factor = BITS_PER_WORD / align;
- if (optimize_function_for_size_p (cfun)
- && hwi_length * factor * UNITS_PER_WORD > MOVE_RATIO (false))
- return false;
+ if (optimize_function_for_size_p (cfun)
+ && hwi_length * factor * UNITS_PER_WORD > MOVE_RATIO (false))
+ return false;
- if (hwi_length <= (RISCV_MAX_MOVE_BYTES_STRAIGHT / factor))
+ if (hwi_length <= (RISCV_MAX_MOVE_BYTES_STRAIGHT / factor))
+ {
+ riscv_block_move_straight (dest, src, INTVAL (length));
+ return true;
+ }
+ else if (optimize && align >= BITS_PER_WORD)
+ {
+ unsigned min_iter_words
+ = RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD;
+ unsigned iter_words = min_iter_words;
+ unsigned HOST_WIDE_INT bytes = hwi_length;
+ unsigned HOST_WIDE_INT words = bytes / UNITS_PER_WORD;
+
+ /* Lengthen the loop body if it shortens the tail. */
+ for (unsigned i = min_iter_words; i < min_iter_words * 2 - 1; i++)
{
- riscv_block_move_straight (dest, src, INTVAL (length));
- return true;
+ unsigned cur_cost = iter_words + words % iter_words;
+ unsigned new_cost = i + words % i;
+ if (new_cost <= cur_cost)
+ iter_words = i;
}
- else if (optimize && align >= BITS_PER_WORD)
- {
- unsigned min_iter_words
- = RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD;
- unsigned iter_words = min_iter_words;
- unsigned HOST_WIDE_INT bytes = hwi_length;
- unsigned HOST_WIDE_INT words = bytes / UNITS_PER_WORD;
-
- /* Lengthen the loop body if it shortens the tail. */
- for (unsigned i = min_iter_words; i < min_iter_words * 2 - 1; i++)
- {
- unsigned cur_cost = iter_words + words % iter_words;
- unsigned new_cost = i + words % i;
- if (new_cost <= cur_cost)
- iter_words = i;
- }
- riscv_block_move_loop (dest, src, bytes, iter_words * UNITS_PER_WORD);
- return true;
- }
+ riscv_block_move_loop (dest, src, bytes, iter_words * UNITS_PER_WORD);
+ return true;
+ }
+
+ return false;
+}
+
+/* This function delegates block-move expansion to either the vector
+ implementation or the scalar one. Return TRUE if successful or FALSE
+ otherwise. */
+
+bool
+riscv_expand_block_move (rtx dest, rtx src, rtx length)
+{
+ if (TARGET_VECTOR && stringop_strategy & STRATEGY_VECTOR)
+ {
+ bool ok = riscv_vector::expand_block_move (dest, src, length);
+ if (ok)
+ return true;
}
+
+ if (stringop_strategy & STRATEGY_SCALAR)
+ return riscv_expand_block_move_scalar (dest, src, length);
+
return false;
}
@@ -777,9 +794,8 @@ expand_block_move (rtx dst_in, rtx src_in, rtx length_in)
bnez a2, loop # Any more?
ret # Return
*/
- if (!TARGET_VECTOR || riscv_memcpy_strategy == USE_LIBCALL
- || riscv_memcpy_strategy == USE_SCALAR)
- return false;
+ gcc_assert (TARGET_VECTOR);
+
HOST_WIDE_INT potential_ew
= (MIN (MIN (MEM_ALIGN (src_in), MEM_ALIGN (dst_in)), BITS_PER_WORD)
/ BITS_PER_UNIT);
@@ -866,6 +882,7 @@ expand_block_move (rtx dst_in, rtx src_in, rtx length_in)
if (TARGET_MIN_VLEN * lmul <= nunits * BITS_PER_UNIT
/* Avoid loosing the option of using vsetivli . */
&& (nunits <= 31 * lmul || nunits > 31 * 8)
+ && multiple_p (BYTES_PER_RISCV_VECTOR * lmul, potential_ew)
&& (riscv_vector::get_vector_mode
(elem_mode, exact_div (BYTES_PER_RISCV_VECTOR * lmul,
potential_ew)).exists (&vmode)))
@@ -1000,6 +1017,8 @@ expand_rawmemchr (machine_mode mode, rtx dst, rtx src, rtx pat)
machine_mode mask_mode = riscv_vector::get_mask_mode (vmode);
rtx cnt = gen_reg_rtx (Pmode);
+ emit_move_insn (cnt, CONST0_RTX (Pmode));
+
rtx end = gen_reg_rtx (Pmode);
rtx vec = gen_reg_rtx (vmode);
rtx mask = gen_reg_rtx (mask_mode);
@@ -1016,6 +1035,11 @@ expand_rawmemchr (machine_mode mode, rtx dst, rtx src, rtx pat)
rtx vsrc = change_address (src, vmode, src_addr);
+ /* Bump the pointer. */
+ rtx step = gen_reg_rtx (Pmode);
+ emit_insn (gen_rtx_SET (step, gen_rtx_ASHIFT (Pmode, cnt, GEN_INT (shift))));
+ emit_insn (gen_rtx_SET (src_addr, gen_rtx_PLUS (Pmode, src_addr, step)));
+
/* Emit a first-fault load. */
rtx vlops[] = {vec, vsrc};
emit_vlmax_insn (code_for_pred_fault_load (vmode),
@@ -1038,16 +1062,10 @@ expand_rawmemchr (machine_mode mode, rtx dst, rtx src, rtx pat)
emit_nonvlmax_insn (code_for_pred_ffs (mask_mode, Pmode),
riscv_vector::CPOP_OP, vfops, cnt);
- /* Bump the pointer. */
- emit_insn (gen_rtx_SET (src_addr, gen_rtx_PLUS (Pmode, src_addr, cnt)));
-
/* Emit the loop condition. */
rtx test = gen_rtx_LT (VOIDmode, end, const0_rtx);
emit_jump_insn (gen_cbranch4 (Pmode, test, end, const0_rtx, loop));
- /* We overran by CNT, subtract it. */
- emit_insn (gen_rtx_SET (src_addr, gen_rtx_MINUS (Pmode, src_addr, cnt)));
-
/* We found something at SRC + END * [1,2,4,8]. */
emit_insn (gen_rtx_SET (end, gen_rtx_ASHIFT (Pmode, end, GEN_INT (shift))));
emit_insn (gen_rtx_SET (dst, gen_rtx_PLUS (Pmode, src_addr, end)));
diff --git a/gcc/config/riscv/riscv-subset.h b/gcc/config/riscv/riscv-subset.h
index d2a4bd2..ad1cab2 100644
--- a/gcc/config/riscv/riscv-subset.h
+++ b/gcc/config/riscv/riscv-subset.h
@@ -79,6 +79,7 @@ private:
void handle_implied_ext (const char *);
bool check_implied_ext ();
void handle_combine_ext ();
+ void check_conflict_ext ();
public:
~riscv_subset_list ();
diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index 983c037..71cb756 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -1728,7 +1728,12 @@ sew64_scalar_helper (rtx *operands, rtx *scalar_op, rtx vl,
}
if (CONST_INT_P (*scalar_op))
- *scalar_op = force_reg (scalar_mode, *scalar_op);
+ {
+ if (maybe_gt (GET_MODE_SIZE (scalar_mode), GET_MODE_SIZE (Pmode)))
+ *scalar_op = force_const_mem (scalar_mode, *scalar_op);
+ else
+ *scalar_op = force_reg (scalar_mode, *scalar_op);
+ }
rtx tmp = gen_reg_rtx (vector_mode);
rtx ops[] = {tmp, *scalar_op};
@@ -3364,6 +3369,15 @@ expand_vec_perm_const (machine_mode vmode, machine_mode op_mode, rtx target,
mask to do the iteration loop control. Just disable it directly. */
if (GET_MODE_CLASS (vmode) == MODE_VECTOR_BOOL)
return false;
+ /* FIXME: Explicitly disable VLA interleave SLP vectorization when we
+ may encounter ICE for poly size (1, 1) vectors in loop vectorizer.
+ Ideally, middle-end loop vectorizer should be able to disable it
+ itself, We can remove the codes here when middle-end code is able
+ to disable VLA SLP vectorization for poly size (1, 1) VF. */
+ if (!BYTES_PER_RISCV_VECTOR.is_constant ()
+ && maybe_lt (BYTES_PER_RISCV_VECTOR * TARGET_MAX_LMUL,
+ poly_int64 (16, 16)))
+ return false;
struct expand_vec_perm_d d;
@@ -4037,7 +4051,21 @@ vls_mode_valid_p (machine_mode vls_mode)
return false;
if (riscv_autovec_preference == RVV_SCALABLE)
- return true;
+ {
+ if (GET_MODE_CLASS (vls_mode) != MODE_VECTOR_BOOL
+ && !ordered_p (TARGET_MAX_LMUL * BITS_PER_RISCV_VECTOR,
+ GET_MODE_PRECISION (vls_mode)))
+ /* We enable VLS modes which are aligned with TARGET_MAX_LMUL and
+ BITS_PER_RISCV_VECTOR.
+
+ e.g. When TARGET_MAX_LMUL = 1 and BITS_PER_RISCV_VECTOR = (128,128).
+ We enable VLS modes have fixed size <= 128bit. Since ordered_p is
+ false between VLA modes with size = (128, 128) bits and VLS mode
+ with size = 128 bits, we will end up with multiple ICEs in
+ middle-end generic codes. */
+ return false;
+ return true;
+ }
if (riscv_autovec_preference == RVV_FIXED_VLMAX)
{
@@ -4677,4 +4705,22 @@ emit_vec_extract (rtx target, rtx src, poly_int64 index)
emit_move_insn (target, ops[0].value);
}
+/* Return true if the offset mode is valid mode that we use for gather/scatter
+ autovectorization. */
+bool
+gather_scatter_valid_offset_p (machine_mode mode)
+{
+ /* If the element size of offset mode is already >= Pmode size,
+ we don't need any extensions. */
+ if (known_ge (GET_MODE_SIZE (GET_MODE_INNER (mode)), UNITS_PER_WORD))
+ return true;
+
+ /* Since we are very likely extend the offset mode into vector Pmode,
+ Disable gather/scatter autovectorization if we can't extend the offset
+ mode into vector Pmode. */
+ if (!get_vector_mode (Pmode, GET_MODE_NUNITS (mode)).exists ())
+ return false;
+ return true;
+}
+
} // namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vsetvl.cc b/gcc/config/riscv/riscv-vsetvl.cc
index 74367ec..68f0be7 100644
--- a/gcc/config/riscv/riscv-vsetvl.cc
+++ b/gcc/config/riscv/riscv-vsetvl.cc
@@ -987,11 +987,11 @@ public:
/* Determine the demand info of the RVV insn. */
m_max_sew = get_max_int_sew ();
- unsigned demand_flags = 0;
+ unsigned dflags = 0;
if (vector_config_insn_p (insn->rtl ()))
{
- demand_flags |= demand_flags::DEMAND_AVL_P;
- demand_flags |= demand_flags::DEMAND_RATIO_P;
+ dflags |= demand_flags::DEMAND_AVL_P;
+ dflags |= demand_flags::DEMAND_RATIO_P;
}
else
{
@@ -1006,39 +1006,39 @@ public:
available.
*/
if (has_non_zero_avl ())
- demand_flags |= demand_flags::DEMAND_NON_ZERO_AVL_P;
+ dflags |= demand_flags::DEMAND_NON_ZERO_AVL_P;
else
- demand_flags |= demand_flags::DEMAND_AVL_P;
+ dflags |= demand_flags::DEMAND_AVL_P;
}
else
- demand_flags |= demand_flags::DEMAND_AVL_P;
+ dflags |= demand_flags::DEMAND_AVL_P;
}
if (get_attr_ratio (insn->rtl ()) != INVALID_ATTRIBUTE)
- demand_flags |= demand_flags::DEMAND_RATIO_P;
+ dflags |= demand_flags::DEMAND_RATIO_P;
else
{
if (scalar_move_insn_p (insn->rtl ()) && m_ta)
{
- demand_flags |= demand_flags::DEMAND_GE_SEW_P;
+ dflags |= demand_flags::DEMAND_GE_SEW_P;
m_max_sew = get_attr_type (insn->rtl ()) == TYPE_VFMOVFV
? get_max_float_sew ()
: get_max_int_sew ();
}
else
- demand_flags |= demand_flags::DEMAND_SEW_P;
+ dflags |= demand_flags::DEMAND_SEW_P;
if (!ignore_vlmul_insn_p (insn->rtl ()))
- demand_flags |= demand_flags::DEMAND_LMUL_P;
+ dflags |= demand_flags::DEMAND_LMUL_P;
}
if (!m_ta)
- demand_flags |= demand_flags::DEMAND_TAIL_POLICY_P;
+ dflags |= demand_flags::DEMAND_TAIL_POLICY_P;
if (!m_ma)
- demand_flags |= demand_flags::DEMAND_MASK_POLICY_P;
+ dflags |= demand_flags::DEMAND_MASK_POLICY_P;
}
- normalize_demand (demand_flags);
+ normalize_demand (dflags);
/* Optimize AVL from the vsetvl instruction. */
insn_info *def_insn = extract_single_source (get_avl_def ());
@@ -1433,9 +1433,23 @@ private:
inline bool modify_or_use_vl_p (insn_info *i, const vsetvl_info &info)
{
- return info.has_vl ()
- && (find_access (i->uses (), REGNO (info.get_vl ()))
- || find_access (i->defs (), REGNO (info.get_vl ())));
+ if (info.has_vl ())
+ {
+ if (find_access (i->defs (), REGNO (info.get_vl ())))
+ return true;
+ if (find_access (i->uses (), REGNO (info.get_vl ())))
+ {
+ resource_info resource = full_register (REGNO (info.get_vl ()));
+ def_lookup dl1 = crtl->ssa->find_def (resource, i);
+ def_lookup dl2 = crtl->ssa->find_def (resource, info.get_insn ());
+ if (dl1.matching_set () || dl2.matching_set ())
+ return true;
+ /* If their VLs are coming from same def, we still want to fuse
+ their VSETVL demand info to gain better performance. */
+ return dl1.prev_def (i) != dl2.prev_def (i);
+ }
+ }
+ return false;
}
inline bool modify_avl_p (insn_info *i, const vsetvl_info &info)
{
@@ -1483,9 +1497,6 @@ private:
{
gcc_assert (prev.valid_p () && next.valid_p ());
- if (prev.get_ratio () != next.get_ratio ())
- return false;
-
if (next.has_vl () && next.vl_used_by_non_rvv_insn_p ())
return false;
@@ -1702,7 +1713,7 @@ public:
for (insn_info *i = next_insn->prev_nondebug_insn (); i != prev_insn;
i = i->prev_nondebug_insn ())
{
- // no def amd use of vl
+ // no def and use of vl
if (!ignore_vl && modify_or_use_vl_p (i, info))
return false;
@@ -2023,7 +2034,7 @@ private:
gcc_unreachable ();
}
- bool anticpatable_exp_p (const vsetvl_info &header_info)
+ bool anticipated_exp_p (const vsetvl_info &header_info)
{
if (!header_info.has_nonvlmax_reg_avl () && !header_info.has_vl ())
return true;
@@ -2174,7 +2185,7 @@ private:
return true;
}
- bool preds_has_same_avl_p (const vsetvl_info &curr_info)
+ bool preds_all_same_avl_and_ratio_p (const vsetvl_info &curr_info)
{
gcc_assert (
!bitmap_empty_p (m_vsetvl_def_in[curr_info.get_bb ()->index ()]));
@@ -2186,7 +2197,8 @@ private:
{
const vsetvl_info &prev_info = *m_vsetvl_def_exprs[expr_index];
if (!prev_info.valid_p ()
- || !m_dem.avl_available_p (prev_info, curr_info))
+ || !m_dem.avl_available_p (prev_info, curr_info)
+ || prev_info.get_ratio () != curr_info.get_ratio ())
return false;
}
@@ -2633,17 +2645,67 @@ pre_vsetvl::compute_lcm_local_properties ()
}
}
- for (const insn_info *insn : bb->real_nondebug_insns ())
+ for (insn_info *insn : bb->real_nondebug_insns ())
{
- if ((info.has_nonvlmax_reg_avl ()
- && find_access (insn->defs (), REGNO (info.get_avl ())))
- || (info.has_vl ()
- && find_access (insn->uses (),
- REGNO (info.get_vl ()))))
+ if (info.has_nonvlmax_reg_avl ()
+ && find_access (insn->defs (), REGNO (info.get_avl ())))
{
bitmap_clear_bit (m_transp[bb_index], i);
break;
}
+
+ if (info.has_vl ()
+ && reg_mentioned_p (info.get_vl (), insn->rtl ()))
+ {
+ if (find_access (insn->defs (), REGNO (info.get_vl ())))
+ /* We can't fuse vsetvl into the blocks that modify the
+ VL operand since successors of such blocks will need
+ the value of those blocks are defining.
+
+ bb 4: def a5
+ / \
+ bb 5:use a5 bb 6:vsetvl a5, 5
+
+ The example above shows that we can't fuse vsetvl
+ from bb 6 into bb 4 since the successor bb 5 is using
+ the value defined in bb 4. */
+ ;
+ else
+ {
+ /* We can't fuse vsetvl into the blocks that use the
+ VL operand which has different value from the
+ vsetvl info.
+
+ bb 4: def a5
+ |
+ bb 5: use a5
+ |
+ bb 6: def a5
+ |
+ bb 7: use a5
+
+ The example above shows that we can't fuse vsetvl
+ from bb 6 into bb 5 since their value is different.
+ */
+ resource_info resource
+ = full_register (REGNO (info.get_vl ()));
+ def_lookup dl = crtl->ssa->find_def (resource, insn);
+ def_info *def
+ = dl.matching_set_or_last_def_of_prev_group ();
+ gcc_assert (def);
+ insn_info *def_insn = extract_single_source (
+ dyn_cast<set_info *> (def));
+ if (def_insn && vsetvl_insn_p (def_insn->rtl ()))
+ {
+ vsetvl_info def_info = vsetvl_info (def_insn);
+ if (m_dem.compatible_p (def_info, info))
+ continue;
+ }
+ }
+
+ bitmap_clear_bit (m_transp[bb_index], i);
+ break;
+ }
}
}
@@ -2654,7 +2716,7 @@ pre_vsetvl::compute_lcm_local_properties ()
vsetvl_info &footer_info = block_info.get_exit_info ();
if (header_info.valid_p ()
- && (anticpatable_exp_p (header_info) || block_info.full_available))
+ && (anticipated_exp_p (header_info) || block_info.full_available))
bitmap_set_bit (m_antloc[bb_index],
get_expr_index (m_exprs, header_info));
@@ -2911,6 +2973,13 @@ pre_vsetvl::earliest_fuse_vsetvl_info ()
|| eg->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
+ /* When multiple set bits in earliest edge, such edge may
+ have infinite loop in preds or succs or multiple conflict
+ vsetvl expression which make such edge is unrelated. We
+ don't perform fusion for such situation. */
+ if (bitmap_count_bits (e) != 1)
+ continue;
+
vsetvl_block_info &src_block_info = get_block_info (eg->src);
vsetvl_block_info &dest_block_info = get_block_info (eg->dest);
@@ -3160,7 +3229,7 @@ pre_vsetvl::pre_global_vsetvl_info ()
curr_info = block_info.local_infos[0];
}
if (curr_info.valid_p () && !curr_info.vl_used_by_non_rvv_insn_p ()
- && preds_has_same_avl_p (curr_info))
+ && preds_all_same_avl_and_ratio_p (curr_info))
curr_info.set_change_vtype_only ();
vsetvl_info prev_info = vsetvl_info ();
@@ -3168,7 +3237,8 @@ pre_vsetvl::pre_global_vsetvl_info ()
for (auto &curr_info : block_info.local_infos)
{
if (prev_info.valid_p () && curr_info.valid_p ()
- && m_dem.avl_available_p (prev_info, curr_info))
+ && m_dem.avl_available_p (prev_info, curr_info)
+ && prev_info.get_ratio () == curr_info.get_ratio ())
curr_info.set_change_vtype_only ();
prev_info = curr_info;
}
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index a4fc858..3f111fa 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -434,7 +434,7 @@ static tree riscv_handle_type_attribute (tree *, tree, tree, int, bool *);
static void riscv_legitimize_poly_move (machine_mode, rtx, rtx, rtx);
/* Defining target-specific uses of __attribute__. */
-static const struct attribute_spec riscv_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (riscv_attribute_table,
{
/* Syntax: { name, min_len, max_len, decl_required, type_required,
function_type_required, affects_type_identity, handler,
@@ -450,11 +450,8 @@ static const struct attribute_spec riscv_attribute_table[] =
/* The following two are used for the built-in properties of the Vector type
and are not used externally */
{"RVV sizeless type", 4, 4, false, true, false, true, NULL, NULL},
- {"RVV type", 0, 0, false, true, false, true, NULL, NULL},
-
- /* The last attribute spec is set to be NULL. */
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ {"RVV type", 0, 0, false, true, false, true, NULL, NULL}
+});
/* Order for the CLOBBERs/USEs of gpr_save. */
static const unsigned gpr_save_reg_order[] = {
@@ -2605,41 +2602,64 @@ riscv_legitimize_move (machine_mode mode, rtx dest, rtx src)
unsigned int nunits = vmode_size > mode_size ? vmode_size / mode_size : 1;
scalar_mode smode = as_a<scalar_mode> (mode);
unsigned int index = SUBREG_BYTE (src).to_constant () / mode_size;
- unsigned int num = smode == DImode && !TARGET_VECTOR_ELEN_64 ? 2 : 1;
+ unsigned int num = known_eq (GET_MODE_SIZE (smode), 8)
+ && !TARGET_VECTOR_ELEN_64 ? 2 : 1;
+ bool need_int_reg_p = false;
if (num == 2)
{
/* If we want to extract 64bit value but ELEN < 64,
we use RVV vector mode with EEW = 32 to extract
the highpart and lowpart. */
+ need_int_reg_p = smode == DFmode;
smode = SImode;
nunits = nunits * 2;
}
- vmode = riscv_vector::get_vector_mode (smode, nunits).require ();
- rtx v = gen_lowpart (vmode, SUBREG_REG (src));
- for (unsigned int i = 0; i < num; i++)
+ if (riscv_vector::get_vector_mode (smode, nunits).exists (&vmode))
{
- rtx result;
- if (num == 1)
- result = dest;
- else if (i == 0)
- result = gen_lowpart (smode, dest);
- else
- result = gen_reg_rtx (smode);
- riscv_vector::emit_vec_extract (result, v, index + i);
+ rtx v = gen_lowpart (vmode, SUBREG_REG (src));
+ rtx int_reg = dest;
- if (i == 1)
+ if (need_int_reg_p)
{
- rtx tmp
- = expand_binop (Pmode, ashl_optab, gen_lowpart (Pmode, result),
- gen_int_mode (32, Pmode), NULL_RTX, 0,
- OPTAB_DIRECT);
- rtx tmp2 = expand_binop (Pmode, ior_optab, tmp, dest, NULL_RTX, 0,
- OPTAB_DIRECT);
- emit_move_insn (dest, tmp2);
+ int_reg = gen_reg_rtx (DImode);
+ emit_move_insn (int_reg, gen_lowpart (GET_MODE (int_reg), dest));
}
+
+ for (unsigned int i = 0; i < num; i++)
+ {
+ rtx result;
+ if (num == 1)
+ result = int_reg;
+ else if (i == 0)
+ result = gen_lowpart (smode, int_reg);
+ else
+ result = gen_reg_rtx (smode);
+
+ riscv_vector::emit_vec_extract (result, v, index + i);
+
+ if (i == 1)
+ {
+ rtx tmp = expand_binop (Pmode, ashl_optab,
+ gen_lowpart (Pmode, result),
+ gen_int_mode (32, Pmode), NULL_RTX, 0,
+ OPTAB_DIRECT);
+ rtx tmp2 = expand_binop (Pmode, ior_optab, tmp, int_reg,
+ NULL_RTX, 0,
+ OPTAB_DIRECT);
+ emit_move_insn (int_reg, tmp2);
+ }
+ }
+
+ if (need_int_reg_p)
+ emit_move_insn (dest, gen_lowpart (GET_MODE (dest), int_reg));
+ else
+ emit_move_insn (dest, int_reg);
}
+ else
+ gcc_unreachable ();
+
return true;
}
/* Expand
@@ -8671,10 +8691,12 @@ riscv_option_override (void)
/* RVE requires specific ABI. */
if (TARGET_RVE)
- if (!TARGET_64BIT && riscv_abi != ABI_ILP32E)
- error ("rv32e requires ilp32e ABI");
- else if (TARGET_64BIT && riscv_abi != ABI_LP64E)
- error ("rv64e requires lp64e ABI");
+ {
+ if (!TARGET_64BIT && riscv_abi != ABI_ILP32E)
+ error ("rv32e requires ilp32e ABI");
+ else if (TARGET_64BIT && riscv_abi != ABI_LP64E)
+ error ("rv64e requires lp64e ABI");
+ }
/* Zfinx require abi ilp32, ilp32e, lp64 or lp64e. */
if (TARGET_ZFINX
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index 935eeb7..0db659a 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -235,7 +235,6 @@
RVVM1x7DF,RVVM1x6DF,RVVM1x5DF,RVVM2x4DF,
RVVM1x4DF,RVVM2x3DF,RVVM1x3DF,RVVM4x2DF,
RVVM2x2DF,RVVM1x2DF,
- VNx2x1DF,VNx3x1DF,VNx4x1DF,VNx5x1DF,VNx6x1DF,VNx7x1DF,VNx8x1DF,
V1QI,V2QI,V4QI,V8QI,V16QI,V32QI,V64QI,V128QI,V256QI,V512QI,V1024QI,V2048QI,V4096QI,
V1HI,V2HI,V4HI,V8HI,V16HI,V32HI,V64HI,V128HI,V256HI,V512HI,V1024HI,V2048HI,
V1SI,V2SI,V4SI,V8SI,V16SI,V32SI,V64SI,V128SI,V256SI,V512SI,V1024SI,
@@ -501,6 +500,45 @@
]
(const_string "no")))
+;; Widening instructions have group-overlap constraints. Those are only
+;; valid for certain register-group sizes. This attribute marks the
+;; alternatives not matching the required register-group size as disabled.
+(define_attr "group_overlap" "none,W21,W42,W84,W43,W86,W87"
+ (const_string "none"))
+
+(define_attr "group_overlap_valid" "no,yes"
+ (cond [(eq_attr "group_overlap" "none")
+ (const_string "yes")
+
+ (and (eq_attr "group_overlap" "W21")
+ (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 2"))
+ (const_string "no")
+
+ (and (eq_attr "group_overlap" "W42")
+ (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 4"))
+ (const_string "no")
+
+ (and (eq_attr "group_overlap" "W84")
+ (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 8"))
+ (const_string "no")
+
+ ;; According to RVV ISA:
+ ;; The destination EEW is greater than the source EEW, the source EMUL is at least 1,
+ ;; and the overlap is in the highest-numbered part of the destination register group
+ ;; (e.g., when LMUL=8, vzext.vf4 v0, v6 is legal, but a source of v0, v2, or v4 is not).
+ ;; So the source operand should have LMUL >= 1.
+ (and (eq_attr "group_overlap" "W43")
+ (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 4
+ && riscv_get_v_regno_alignment (GET_MODE (operands[3])) >= 1"))
+ (const_string "no")
+
+ (and (eq_attr "group_overlap" "W86,W87")
+ (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 8
+ && riscv_get_v_regno_alignment (GET_MODE (operands[3])) >= 1"))
+ (const_string "no")
+ ]
+ (const_string "yes")))
+
;; Attribute to control enable or disable instructions.
(define_attr "enabled" "no,yes"
(cond [
@@ -509,6 +547,9 @@
(eq_attr "fp_vector_disabled" "yes")
(const_string "no")
+
+ (eq_attr "group_overlap_valid" "no")
+ (const_string "no")
]
(const_string "yes")))
@@ -2312,9 +2353,7 @@
(use (match_operand:SI 3 "const_int_operand"))])]
""
{
- if (riscv_vector::expand_block_move (operands[0], operands[1], operands[2]))
- DONE;
- else if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
+ if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
DONE;
else
FAIL;
@@ -2711,24 +2750,6 @@
DONE;
})
-;; Patterns for implementations that optimize short forward branches.
-
-(define_insn "*mov<GPR:mode><X:mode>cc"
- [(set (match_operand:GPR 0 "register_operand" "=r,r")
- (if_then_else:GPR
- (match_operator 5 "ordered_comparison_operator"
- [(match_operand:X 1 "register_operand" "r,r")
- (match_operand:X 2 "reg_or_0_operand" "rJ,rJ")])
- (match_operand:GPR 3 "register_operand" "0,0")
- (match_operand:GPR 4 "sfb_alu_operand" "rJ,IL")))]
- "TARGET_SFB_ALU"
- "@
- b%C5\t%1,%z2,1f\t# movcc\;mv\t%0,%z4\n1:
- b%C5\t%1,%z2,1f\t# movcc\;li\t%0,%4\n1:"
- [(set_attr "length" "8")
- (set_attr "type" "sfb_alu")
- (set_attr "mode" "<GPR:MODE>")])
-
;; Used to implement built-in functions.
(define_expand "condjump"
[(set (pc)
@@ -3748,5 +3769,6 @@
(include "generic-ooo.md")
(include "vector.md")
(include "zicond.md")
+(include "sfb.md")
(include "zc.md")
(include "corev.md")
diff --git a/gcc/config/riscv/riscv.opt b/gcc/config/riscv/riscv.opt
index 0c6517b..59ce710 100644
--- a/gcc/config/riscv/riscv.opt
+++ b/gcc/config/riscv/riscv.opt
@@ -319,6 +319,8 @@ Mask(ZVBB) Var(riscv_zvb_subext)
Mask(ZVBC) Var(riscv_zvb_subext)
+Mask(ZVKB) Var(riscv_zvb_subext)
+
TargetVariable
int riscv_zvk_subext
@@ -536,21 +538,21 @@ Enable the use of vector registers for function arguments and return value.
This is an experimental switch and may be subject to change in the future.
Enum
-Name(riscv_stringop_strategy) Type(enum riscv_stringop_strategy_enum)
-Valid arguments to -mmemcpy-strategy=:
+Name(stringop_strategy) Type(enum stringop_strategy_enum)
+Valid arguments to -mstringop-strategy=:
EnumValue
-Enum(riscv_stringop_strategy) String(auto) Value(USE_AUTO)
+Enum(stringop_strategy) String(auto) Value(STRATEGY_AUTO)
EnumValue
-Enum(riscv_stringop_strategy) String(libcall) Value(USE_LIBCALL)
+Enum(stringop_strategy) String(libcall) Value(STRATEGY_LIBCALL)
EnumValue
-Enum(riscv_stringop_strategy) String(scalar) Value(USE_SCALAR)
+Enum(stringop_strategy) String(scalar) Value(STRATEGY_SCALAR)
EnumValue
-Enum(riscv_stringop_strategy) String(vector) Value(USE_VECTOR)
+Enum(stringop_strategy) String(vector) Value(STRATEGY_VECTOR)
-mmemcpy-strategy=
-Target RejectNegative Joined Enum(riscv_stringop_strategy) Var(riscv_memcpy_strategy) Init(USE_AUTO)
-Specify memcpy expansion strategy.
+mstringop-strategy=
+Target RejectNegative Joined Enum(stringop_strategy) Var(stringop_strategy) Init(STRATEGY_AUTO)
+Specify stringop expansion strategy.
diff --git a/gcc/config/riscv/sfb.md b/gcc/config/riscv/sfb.md
new file mode 100644
index 0000000..52af4b1
--- /dev/null
+++ b/gcc/config/riscv/sfb.md
@@ -0,0 +1,37 @@
+;; Machine description for short forward branches(SFB).
+;; Copyright (C) 2023 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; Patterns for implementations that optimize short forward branches.
+
+(define_insn "*mov<GPR:mode><X:mode>cc"
+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
+ (if_then_else:GPR
+ (match_operator 5 "ordered_comparison_operator"
+ [(match_operand:X 1 "register_operand" "r,r")
+ (match_operand:X 2 "reg_or_0_operand" "rJ,rJ")])
+ (match_operand:GPR 3 "register_operand" "0,0")
+ (match_operand:GPR 4 "sfb_alu_operand" "rJ,IL")))]
+ "TARGET_SFB_ALU"
+ "@
+ b%C5\t%1,%z2,1f\t# movcc\;mv\t%0,%z4\n1:
+ b%C5\t%1,%z2,1f\t# movcc\;li\t%0,%4\n1:"
+ [(set_attr "length" "8")
+ (set_attr "type" "sfb_alu")
+ (set_attr "mode" "<GPR:MODE>")])
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index ba9c9e5..ba0714a 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -1913,7 +1913,7 @@
(match_operand:V_VLSI_D 2 "register_operand" " vr,vr")
(match_operand:<VM> 4 "register_operand" " vm,vm"))
(match_operand:V_VLSI_D 1 "vector_merge_operand" " vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vmerge.vxm\t%0,%2,%3,%4"
[(set_attr "type" "vimerge")
(set_attr "mode" "<MODE>")])
@@ -2091,7 +2091,7 @@
(sign_extend:<VEL>
(match_operand:<VSUBEL> 3 "register_operand" " r, r, r, r")))
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"@
vmv.v.x\t%0,%3
vmv.v.x\t%0,%3
@@ -2223,67 +2223,70 @@
;; DEST eew is greater than SOURCE eew.
(define_insn "@pred_indexed_<order>load<mode>_x2_greater_eew"
- [(set (match_operand:VEEWEXT2 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VEEWEXT2 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VEEWEXT2
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT2
- [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
- (match_operand:<VINDEX_DOUBLE_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
- (match_operand:VEEWEXT2 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VINDEX_DOUBLE_TRUNC> 4 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr")] ORDER)
+ (match_operand:VEEWEXT2 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"vl<order>xei<double_trunc_sew>.v\t%0,(%z3),%4%p1"
[(set_attr "type" "vld<order>x")
- (set_attr "mode" "<MODE>")])
+ (set_attr "mode" "<MODE>")
+ (set_attr "group_overlap" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84,none,none")])
(define_insn "@pred_indexed_<order>load<mode>_x4_greater_eew"
- [(set (match_operand:VEEWEXT4 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VEEWEXT4 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VEEWEXT4
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT4
- [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
- (match_operand:<VINDEX_QUAD_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
- (match_operand:VEEWEXT4 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VINDEX_QUAD_TRUNC> 4 "register_operand" "W43,W43,W43,W43,W86,W86,W86,W86, vr, vr")] ORDER)
+ (match_operand:VEEWEXT4 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"vl<order>xei<quad_trunc_sew>.v\t%0,(%z3),%4%p1"
[(set_attr "type" "vld<order>x")
- (set_attr "mode" "<MODE>")])
+ (set_attr "mode" "<MODE>")
+ (set_attr "group_overlap" "W43,W43,W43,W43,W86,W86,W86,W86,none,none")])
(define_insn "@pred_indexed_<order>load<mode>_x8_greater_eew"
- [(set (match_operand:VEEWEXT8 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VEEWEXT8 0 "register_operand" "=vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VEEWEXT8
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VEEWEXT8
- [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ")
+ [(match_operand 3 "pmode_reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ")
(mem:BLK (scratch))
- (match_operand:<VINDEX_OCT_TRUNC> 4 "register_operand" " vr, vr")] ORDER)
- (match_operand:VEEWEXT8 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VINDEX_OCT_TRUNC> 4 "register_operand" "W87,W87,W87,W87, vr, vr")] ORDER)
+ (match_operand:VEEWEXT8 2 "vector_merge_operand" " vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"vl<order>xei<oct_trunc_sew>.v\t%0,(%z3),%4%p1"
[(set_attr "type" "vld<order>x")
- (set_attr "mode" "<MODE>")])
+ (set_attr "mode" "<MODE>")
+ (set_attr "group_overlap" "W87,W87,W87,W87,none,none")])
;; DEST eew is smaller than SOURCE eew.
(define_insn "@pred_indexed_<order>load<mode>_x2_smaller_eew"
@@ -2674,7 +2677,7 @@
(match_operand:<VSUBEL> 4 "reg_or_0_operand" "rJ,rJ, rJ, rJ")))
(match_operand:V_VLSI_D 3 "register_operand" "vr,vr, vr, vr"))
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"v<insn>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -2750,7 +2753,7 @@
(sign_extend:<VEL>
(match_operand:<VSUBEL> 4 "reg_or_0_operand" "rJ,rJ, rJ, rJ"))))
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"v<insn>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -2826,7 +2829,7 @@
(match_operand:<VSUBEL> 4 "reg_or_0_operand" "rJ,rJ, rJ, rJ")))
(match_operand:V_VLSI_D 3 "register_operand" "vr,vr, vr, vr"))
(match_operand:V_VLSI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vrsub.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vialu")
(set_attr "mode" "<MODE>")])
@@ -2944,7 +2947,7 @@
(match_operand:<VSUBEL> 4 "reg_or_0_operand" "rJ,rJ, rJ, rJ")))
(match_operand:VFULLI_D 3 "register_operand" "vr,vr, vr, vr")] VMULH)
(match_operand:VFULLI_D 2 "vector_merge_operand" "vu, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vmulh<v_su>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vimul")
(set_attr "mode" "<MODE>")])
@@ -3123,7 +3126,7 @@
(match_operand:VI_D 2 "register_operand" "vr,vr"))
(match_operand:<VM> 4 "register_operand" "vm,vm")] UNSPEC_VADC)
(match_operand:VI_D 1 "vector_merge_operand" "vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vadc.vxm\t%0,%2,%z3,%4"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
@@ -3207,7 +3210,7 @@
(match_operand:<VSUBEL> 3 "reg_or_0_operand" "rJ,rJ"))))
(match_operand:<VM> 4 "register_operand" "vm,vm")] UNSPEC_VSBC)
(match_operand:VI_D 1 "vector_merge_operand" "vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vsbc.vxm\t%0,%2,%z3,%4"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
@@ -3357,7 +3360,7 @@
(match_operand 5 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_VMADC))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vmadc.vxm\t%0,%1,%z2,%3"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
@@ -3427,7 +3430,7 @@
(match_operand 5 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_VMSBC))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vmsbc.vxm\t%0,%1,%z2,%3"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
@@ -3568,7 +3571,7 @@
(match_operand 4 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_OVERFLOW))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vmadc.vx\t%0,%1,%z2"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
@@ -3635,7 +3638,7 @@
(match_operand 4 "const_int_operand" " i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)] UNSPEC_OVERFLOW))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vmsbc.vx\t%0,%1,%z2"
[(set_attr "type" "vicalu")
(set_attr "mode" "<MODE>")
@@ -3683,63 +3686,66 @@
;; Vector Double-Widening Sign-extend and Zero-extend.
(define_insn "@pred_<optab><mode>_vf2"
- [(set (match_operand:VWEXTI 0 "register_operand" "=&vr,&vr")
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VWEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(any_extend:VWEXTI
- (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
- (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr"))
+ (match_operand:VWEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"v<sz>ext.vf2\t%0,%3%p1"
[(set_attr "type" "vext")
- (set_attr "mode" "<MODE>")])
+ (set_attr "mode" "<MODE>")
+ (set_attr "group_overlap" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84,none,none")])
;; Vector Quad-Widening Sign-extend and Zero-extend.
(define_insn "@pred_<optab><mode>_vf4"
- [(set (match_operand:VQEXTI 0 "register_operand" "=&vr,&vr")
+ [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VQEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" " i, i, i, i, i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(any_extend:VQEXTI
- (match_operand:<V_QUAD_TRUNC> 3 "register_operand" " vr, vr"))
- (match_operand:VQEXTI 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<V_QUAD_TRUNC> 3 "register_operand" "W43,W43,W43,W43,W86,W86,W86,W86, vr, vr"))
+ (match_operand:VQEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"v<sz>ext.vf4\t%0,%3%p1"
[(set_attr "type" "vext")
- (set_attr "mode" "<MODE>")])
+ (set_attr "mode" "<MODE>")
+ (set_attr "group_overlap" "W43,W43,W43,W43,W86,W86,W86,W86,none,none")])
;; Vector Oct-Widening Sign-extend and Zero-extend.
(define_insn "@pred_<optab><mode>_vf8"
- [(set (match_operand:VOEXTI 0 "register_operand" "=&vr,&vr")
+ [(set (match_operand:VOEXTI 0 "register_operand" "=vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VOEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(any_extend:VOEXTI
- (match_operand:<V_OCT_TRUNC> 3 "register_operand" " vr, vr"))
- (match_operand:VOEXTI 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<V_OCT_TRUNC> 3 "register_operand" "W87,W87,W87,W87, vr, vr"))
+ (match_operand:VOEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"v<sz>ext.vf8\t%0,%3%p1"
[(set_attr "type" "vext")
- (set_attr "mode" "<MODE>")])
+ (set_attr "mode" "<MODE>")
+ (set_attr "group_overlap" "W87,W87,W87,W87,none,none")])
;; Vector Widening Add/Subtract/Multiply.
(define_insn "@pred_dual_widen_<any_widen_binop:optab><any_extend:su><mode>"
@@ -3765,27 +3771,28 @@
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
(define_insn "@pred_dual_widen_<any_widen_binop:optab><any_extend:su><mode>_scalar"
- [(set (match_operand:VWEXTI 0 "register_operand" "=&vr,&vr")
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VWEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(any_widen_binop:VWEXTI
(any_extend:VWEXTI
- (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
+ (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr"))
(any_extend:VWEXTI
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ"))))
- (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ"))))
+ (match_operand:VWEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"vw<any_widen_binop:insn><any_extend:u>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vi<widen_binop_insn_type>")
- (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")
+ (set_attr "group_overlap" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84,none,none")])
(define_insn "@pred_single_widen_sub<any_extend:su><mode>"
[(set (match_operand:VWEXTI 0 "register_operand" "=&vr,&vr")
@@ -3830,7 +3837,7 @@
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
(define_insn "@pred_single_widen_<plus_minus:optab><any_extend:su><mode>_scalar"
- [(set (match_operand:VWEXTI 0 "register_operand" "=&vr,&vr")
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vr, vr")
(if_then_else:VWEXTI
(unspec:<VM>
[(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
@@ -3874,46 +3881,47 @@
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
(define_insn "@pred_widen_mulsu<mode>_scalar"
- [(set (match_operand:VWEXTI 0 "register_operand" "=&vr,&vr")
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VWEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(mult:VWEXTI
(sign_extend:VWEXTI
- (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
+ (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr"))
(zero_extend:VWEXTI
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ"))))
- (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ, rJ"))))
+ (match_operand:VWEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"vwmulsu.vx\t%0,%3,%z4%p1"
[(set_attr "type" "viwmul")
- (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")
+ (set_attr "group_overlap" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84,none,none")])
;; vwcvt<u>.x.x.v
(define_insn "@pred_<optab><mode>"
- [(set (match_operand:VWEXTI 0 "register_operand" "=&vr,&vr")
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VWEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus:VWEXTI
(any_extend:VWEXTI
- (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
+ (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr"))
(vec_duplicate:VWEXTI
(reg:<VEL> X0_REGNUM)))
- (match_operand:VWEXTI 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:VWEXTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"vwcvt<u>.x.x.v\t%0,%3%p1"
[(set_attr "type" "viwalu")
@@ -3921,7 +3929,8 @@
(set_attr "vl_op_idx" "4")
(set (attr "ta") (symbol_ref "riscv_vector::get_ta(operands[5])"))
(set (attr "ma") (symbol_ref "riscv_vector::get_ma(operands[6])"))
- (set (attr "avl_type_idx") (const_int 7))])
+ (set (attr "avl_type_idx") (const_int 7))
+ (set_attr "group_overlap" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84,none,none")])
;; -------------------------------------------------------------------------------
;; ---- Predicated integer Narrowing operations
@@ -4153,7 +4162,7 @@
(match_operand:<VSUBEL> 4 "register_operand" " r, r, r, r")))
(match_operand:VI_D 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:VI_D 2 "vector_merge_operand" " vu, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"v<insn>.vx\t%0,%3,%4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4229,7 +4238,7 @@
(sign_extend:<VEL>
(match_operand:<VSUBEL> 4 "register_operand" " r, r, r, r"))))
(match_operand:VI_D 2 "vector_merge_operand" " vu, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"v<insn>.vx\t%0,%3,%4%p1"
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4377,7 +4386,7 @@
(sign_extend:<VEL>
(match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ"))] VSAT_ARITH_OP)
(match_operand:VI_D 2 "vector_merge_operand" " vu, 0, vu, 0")))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"v<sat_op>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "<sat_insn_type>")
(set_attr "mode" "<MODE>")])
@@ -4985,7 +4994,7 @@
(sign_extend:<VEL>
(match_operand:<VSUBEL> 4 "register_operand" " r")))])
(match_dup 1)))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vms%B2.vx\t%0,%3,%4,v0.t"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")
@@ -5011,7 +5020,7 @@
(sign_extend:<VEL>
(match_operand:<VSUBEL> 5 "register_operand" " r, r")))])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0")))]
- "TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
+ "TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode) && !TARGET_64BIT"
"vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -5032,7 +5041,7 @@
(sign_extend:<VEL>
(match_operand:<VSUBEL> 5 "register_operand" " r, r, r, r, r")))])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, 0, vu, 0")))]
- "TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
+ "TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode) && !TARGET_64BIT"
"vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -5053,7 +5062,7 @@
(match_operand:<VSUBEL> 4 "register_operand" " r")))
(match_operand:V_VLSI_D 3 "register_operand" " vr")])
(match_dup 1)))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vms%B2.vx\t%0,%3,%4,v0.t"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")
@@ -5079,7 +5088,7 @@
(match_operand:<VSUBEL> 5 "register_operand" " r, r")))
(match_operand:V_VLSI_D 4 "register_operand" " vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, 0")))]
- "TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode)"
+ "TARGET_VECTOR && riscv_vector::cmp_lmul_le_one (<MODE>mode) && !TARGET_64BIT"
"vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -5100,7 +5109,7 @@
(match_operand:<VSUBEL> 5 "register_operand" " r, r, r, r, r")))
(match_operand:V_VLSI_D 4 "register_operand" " vr, 0, 0, vr, vr")])
(match_operand:<VM> 2 "vector_merge_operand" " vu, vu, 0, vu, 0")))]
- "TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode)"
+ "TARGET_VECTOR && riscv_vector::cmp_lmul_gt_one (<MODE>mode) && !TARGET_64BIT"
"vms%B3.vx\t%0,%4,%5%p1"
[(set_attr "type" "vicmp")
(set_attr "mode" "<MODE>")])
@@ -5480,7 +5489,7 @@
(match_operand:V_VLSI_D 3 "register_operand" " 0, vr, 0, vr"))
(match_operand:V_VLSI_D 4 "register_operand" " vr, vr, vr, vr"))
(match_dup 3)))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"@
vmadd.vx\t%0,%2,%4%p1
vmv.v.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1
@@ -5513,7 +5522,7 @@
(match_operand:V_VLSI_D 3 "register_operand" " vr, vr, vr, vr"))
(match_operand:V_VLSI_D 4 "register_operand" " 0, vr, 0, vr"))
(match_dup 4)))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"@
vmacc.vx\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
@@ -5778,7 +5787,7 @@
(match_operand:<VSUBEL> 2 "register_operand" " r, r, r, r")))
(match_operand:V_VLSI_D 3 "register_operand" " 0, vr, 0, vr")))
(match_dup 3)))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"@
vnmsub.vx\t%0,%2,%4%p1
vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
@@ -5811,7 +5820,7 @@
(match_operand:<VSUBEL> 2 "register_operand" " r, r, r, r")))
(match_operand:V_VLSI_D 3 "register_operand" " vr, vr, vr, vr")))
(match_dup 4)))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"@
vnmsac.vx\t%0,%2,%3%p1
vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
@@ -5857,29 +5866,30 @@
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
(define_insn "@pred_widen_mul_plus<su><mode>_scalar"
- [(set (match_operand:VWEXTI 0 "register_operand" "=&vr")
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vr, vd, vr, vd, vr, ?&vr")
(if_then_else:VWEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
- (match_operand 5 "vector_length_operand" " rK")
- (match_operand 6 "const_int_operand" " i")
- (match_operand 7 "const_int_operand" " i")
- (match_operand 8 "const_int_operand" " i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus:VWEXTI
(mult:VWEXTI
(any_extend:VWEXTI
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 3 "register_operand" " r")))
+ (match_operand:<VSUBEL> 3 "reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ, rJ")))
(any_extend:VWEXTI
- (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr")))
- (match_operand:VWEXTI 2 "register_operand" " 0"))
+ (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" "W21,W21,W42,W42,W84,W84, vr")))
+ (match_operand:VWEXTI 2 "register_operand" " 0, 0, 0, 0, 0, 0, 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vwmacc<u>.vx\t%0,%3,%4%p1"
+ "vwmacc<u>.vx\t%0,%z3,%4%p1"
[(set_attr "type" "viwmuladd")
- (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none")])
(define_insn "@pred_widen_mul_plussu<mode>"
[(set (match_operand:VWEXTI 0 "register_operand" "=&vr")
@@ -5906,54 +5916,56 @@
(set_attr "mode" "<V_DOUBLE_TRUNC>")])
(define_insn "@pred_widen_mul_plussu<mode>_scalar"
- [(set (match_operand:VWEXTI 0 "register_operand" "=&vr")
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vr, vd, vr, vd, vr, ?&vr")
(if_then_else:VWEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
- (match_operand 5 "vector_length_operand" " rK")
- (match_operand 6 "const_int_operand" " i")
- (match_operand 7 "const_int_operand" " i")
- (match_operand 8 "const_int_operand" " i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus:VWEXTI
(mult:VWEXTI
(sign_extend:VWEXTI
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 3 "register_operand" " r")))
+ (match_operand:<VSUBEL> 3 "reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ, rJ")))
(zero_extend:VWEXTI
- (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr")))
- (match_operand:VWEXTI 2 "register_operand" " 0"))
+ (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" "W21,W21,W42,W42,W84,W84, vr")))
+ (match_operand:VWEXTI 2 "register_operand" " 0, 0, 0, 0, 0, 0, 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vwmaccsu.vx\t%0,%3,%4%p1"
+ "vwmaccsu.vx\t%0,%z3,%4%p1"
[(set_attr "type" "viwmuladd")
- (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none")])
(define_insn "@pred_widen_mul_plusus<mode>_scalar"
- [(set (match_operand:VWEXTI 0 "register_operand" "=&vr")
+ [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vr, vd, vr, vd, vr, ?&vr")
(if_then_else:VWEXTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
- (match_operand 5 "vector_length_operand" " rK")
- (match_operand 6 "const_int_operand" " i")
- (match_operand 7 "const_int_operand" " i")
- (match_operand 8 "const_int_operand" " i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(plus:VWEXTI
(mult:VWEXTI
(zero_extend:VWEXTI
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 3 "register_operand" " r")))
+ (match_operand:<VSUBEL> 3 "reg_or_0_operand" " rJ, rJ, rJ, rJ, rJ, rJ, rJ")))
(sign_extend:VWEXTI
- (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr")))
- (match_operand:VWEXTI 2 "register_operand" " 0"))
+ (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" "W21,W21,W42,W42,W84,W84, vr")))
+ (match_operand:VWEXTI 2 "register_operand" " 0, 0, 0, 0, 0, 0, 0"))
(match_dup 2)))]
"TARGET_VECTOR"
- "vwmaccus.vx\t%0,%3,%4%p1"
+ "vwmaccus.vx\t%0,%z3,%4%p1"
[(set_attr "type" "viwmuladd")
- (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none")])
;; -------------------------------------------------------------------------------
;; ---- Predicated BOOL mask operations
@@ -7033,31 +7045,32 @@
(symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
(define_insn "@pred_dual_widen_<optab><mode>_scalar"
- [(set (match_operand:VWEXTF 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VWEXTF 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VWEXTF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 5 "vector_length_operand" " rK, rK")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
- (match_operand 9 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 9 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)
(reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
(any_widen_binop:VWEXTF
(float_extend:VWEXTF
- (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
+ (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr"))
(float_extend:VWEXTF
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 4 "register_operand" " f, f"))))
- (match_operand:VWEXTF 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VSUBEL> 4 "register_operand" " f, f, f, f, f, f, f, f, f, f, f, f, f, f"))))
+ (match_operand:VWEXTF 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"vfw<insn>.vf\t%0,%3,%4%p1"
[(set_attr "type" "vf<widen_binop_insn_type>")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
- (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
+ (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))
+ (set_attr "group_overlap" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84,none,none")])
(define_insn "@pred_single_widen_add<mode>"
[(set (match_operand:VWEXTF 0 "register_operand" "=&vr, &vr")
@@ -7110,7 +7123,7 @@
(symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
(define_insn "@pred_single_widen_<plus_minus:optab><mode>_scalar"
- [(set (match_operand:VWEXTF 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VWEXTF 0 "register_operand" "=vr, vr")
(if_then_else:VWEXTF
(unspec:<VM>
[(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
@@ -7171,15 +7184,15 @@
(symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
(define_insn "@pred_widen_mul_<optab><mode>_scalar"
- [(set (match_operand:VWEXTF 0 "register_operand" "=&vr")
+ [(set (match_operand:VWEXTF 0 "register_operand" "=vd, vr, vd, vr, vd, vr, ?&vr")
(if_then_else:VWEXTF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
- (match_operand 5 "vector_length_operand" " rK")
- (match_operand 6 "const_int_operand" " i")
- (match_operand 7 "const_int_operand" " i")
- (match_operand 8 "const_int_operand" " i")
- (match_operand 9 "const_int_operand" " i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)
(reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
@@ -7187,17 +7200,18 @@
(mult:VWEXTF
(float_extend:VWEXTF
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 3 "register_operand" " f")))
+ (match_operand:<VSUBEL> 3 "register_operand" " f, f, f, f, f, f, f")))
(float_extend:VWEXTF
- (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr")))
- (match_operand:VWEXTF 2 "register_operand" " 0"))
+ (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" "W21,W21,W42,W42,W84,W84, vr")))
+ (match_operand:VWEXTF 2 "register_operand" " 0, 0, 0, 0, 0, 0, 0"))
(match_dup 2)))]
"TARGET_VECTOR"
"vfw<macc_msac>.vf\t%0,%3,%4%p1"
[(set_attr "type" "vfwmuladd")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
- (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
+ (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none")])
(define_insn "@pred_widen_mul_neg_<optab><mode>"
[(set (match_operand:VWEXTF 0 "register_operand" "=&vr")
@@ -7229,15 +7243,15 @@
(symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
(define_insn "@pred_widen_mul_neg_<optab><mode>_scalar"
- [(set (match_operand:VWEXTF 0 "register_operand" "=&vr")
+ [(set (match_operand:VWEXTF 0 "register_operand" "=vd, vr, vd, vr, vd, vr, ?&vr")
(if_then_else:VWEXTF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
- (match_operand 5 "vector_length_operand" " rK")
- (match_operand 6 "const_int_operand" " i")
- (match_operand 7 "const_int_operand" " i")
- (match_operand 8 "const_int_operand" " i")
- (match_operand 9 "const_int_operand" " i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1,vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i")
+ (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)
(reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
@@ -7246,17 +7260,18 @@
(mult:VWEXTF
(float_extend:VWEXTF
(vec_duplicate:<V_DOUBLE_TRUNC>
- (match_operand:<VSUBEL> 3 "register_operand" " f")))
+ (match_operand:<VSUBEL> 3 "register_operand" " f, f, f, f, f, f, f")))
(float_extend:VWEXTF
- (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr"))))
- (match_operand:VWEXTF 2 "register_operand" " 0"))
+ (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" "W21,W21,W42,W42,W84,W84, vr"))))
+ (match_operand:VWEXTF 2 "register_operand" " 0, 0, 0, 0, 0, 0, 0"))
(match_dup 2)))]
"TARGET_VECTOR"
"vfw<nmsac_nmacc>.vf\t%0,%3,%4%p1"
[(set_attr "type" "vfwmuladd")
(set_attr "mode" "<V_DOUBLE_TRUNC>")
(set (attr "frm_mode")
- (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
+ (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))
+ (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none")])
;; -------------------------------------------------------------------------------
;; ---- Predicated floating-point comparison operations
@@ -7620,84 +7635,88 @@
;; -------------------------------------------------------------------------------
(define_insn "@pred_widen_fcvt_x<v_su>_f<mode>"
- [(set (match_operand:VWCONVERTI 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VWCONVERTI 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VWCONVERTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
- (match_operand 8 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 8 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)
(reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
(unspec:VWCONVERTI
- [(match_operand:<VNCONVERT> 3 "register_operand" " vr, vr")] VFCVTS)
- (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0")))]
+ [(match_operand:<VNCONVERT> 3 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr")] VFCVTS)
+ (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"vfwcvt.x<v_su>.f.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtftoi")
(set_attr "mode" "<VNCONVERT>")
(set (attr "frm_mode")
- (symbol_ref "riscv_vector::get_frm_mode (operands[8])"))])
+ (symbol_ref "riscv_vector::get_frm_mode (operands[8])"))
+ (set_attr "group_overlap" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84,none,none")])
(define_insn "@pred_widen_<fix_cvt><mode>"
- [(set (match_operand:VWCONVERTI 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VWCONVERTI 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VWCONVERTI
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(any_fix:VWCONVERTI
- (match_operand:<VNCONVERT> 3 "register_operand" " vr, vr"))
- (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VNCONVERT> 3 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr"))
+ (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"vfwcvt.rtz.x<u>.f.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtftoi")
- (set_attr "mode" "<VNCONVERT>")])
+ (set_attr "mode" "<VNCONVERT>")
+ (set_attr "group_overlap" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84,none,none")])
(define_insn "@pred_widen_<float_cvt><mode>"
- [(set (match_operand:V_VLSF 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:V_VLSF 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:V_VLSF
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(any_float:V_VLSF
- (match_operand:<VNCONVERT> 3 "register_operand" " vr, vr"))
- (match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<VNCONVERT> 3 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr"))
+ (match_operand:V_VLSF 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"vfwcvt.f.x<u>.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtitof")
- (set_attr "mode" "<VNCONVERT>")])
+ (set_attr "mode" "<VNCONVERT>")
+ (set_attr "group_overlap" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84,none,none")])
(define_insn "@pred_extend<mode>"
- [(set (match_operand:VWEXTF_ZVFHMIN 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:VWEXTF_ZVFHMIN 0 "register_operand" "=vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, vd, vr, ?&vr, ?&vr")
(if_then_else:VWEXTF_ZVFHMIN
(unspec:<VM>
- [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
- (match_operand 4 "vector_length_operand" " rK, rK")
- (match_operand 5 "const_int_operand" " i, i")
- (match_operand 6 "const_int_operand" " i, i")
- (match_operand 7 "const_int_operand" " i, i")
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1, vm,Wc1,vmWc1,vmWc1")
+ (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK, rK")
+ (match_operand 5 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 6 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
+ (match_operand 7 "const_int_operand" "i, i, i, i, i, i, i, i, i, i, i, i, i, i")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(float_extend:VWEXTF_ZVFHMIN
- (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, vr"))
- (match_operand:VWEXTF_ZVFHMIN 2 "vector_merge_operand" " vu, 0")))]
+ (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84, vr, vr"))
+ (match_operand:VWEXTF_ZVFHMIN 2 "vector_merge_operand" " vu, vu, 0, 0, vu, vu, 0, 0, vu, vu, 0, 0, vu, 0")))]
"TARGET_VECTOR"
"vfwcvt.f.f.v\t%0,%3%p1"
[(set_attr "type" "vfwcvtftof")
- (set_attr "mode" "<V_DOUBLE_TRUNC>")])
+ (set_attr "mode" "<V_DOUBLE_TRUNC>")
+ (set_attr "group_overlap" "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84,none,none")])
;; -------------------------------------------------------------------------------
;; ---- Predicated floating-point narrow conversions
@@ -7847,7 +7866,7 @@
;; Integer Widen Reduction Sum (vwredsum[u].vs)
(define_insn "@pred_<reduc_op><mode>"
- [(set (match_operand:<V_EXT_LMUL1> 0 "register_operand" "=&vr,&vr")
+ [(set (match_operand:<V_EXT_LMUL1> 0 "register_operand" "=vr, vr")
(unspec:<V_EXT_LMUL1>
[(unspec:<VM>
[(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
@@ -7858,7 +7877,7 @@
(reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
(unspec:<V_EXT_LMUL1> [
(match_operand:VI_QHS 3 "register_operand" " vr, vr")
- (match_operand:<V_EXT_LMUL1> 4 "register_operand" " vr0, vr0")
+ (match_operand:<V_EXT_LMUL1> 4 "register_operand" " vr, vr")
] ANY_WREDUC)
(match_operand:<V_EXT_LMUL1> 2 "vector_merge_operand" " vu, 0")] UNSPEC_REDUC))]
"TARGET_VECTOR"
@@ -7914,7 +7933,7 @@
;; Float Widen Reduction Sum (vfwred[ou]sum.vs)
(define_insn "@pred_<reduc_op><mode>"
- [(set (match_operand:<V_EXT_LMUL1> 0 "register_operand" "=&vr, &vr")
+ [(set (match_operand:<V_EXT_LMUL1> 0 "register_operand" "=vr, vr")
(unspec:<V_EXT_LMUL1>
[(unspec:<VM>
[(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1")
@@ -7927,7 +7946,7 @@
(reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
(unspec:<V_EXT_LMUL1> [
(match_operand:VF_HS 3 "register_operand" " vr, vr")
- (match_operand:<V_EXT_LMUL1> 4 "register_operand" " vr0, vr0")
+ (match_operand:<V_EXT_LMUL1> 4 "register_operand" " vr, vr")
] ANY_FWREDUC_SUM)
(match_operand:<V_EXT_LMUL1> 2 "vector_merge_operand" " vu, 0")] UNSPEC_REDUC))]
"TARGET_VECTOR"
@@ -8139,7 +8158,7 @@
(match_operand:V_VLSI_D 3 "register_operand" " vr, vr, vr, vr")
(sign_extend:<VEL>
(match_operand:<VSUBEL> 4 "reg_or_0_operand" " rJ, rJ, rJ, rJ"))] VSLIDES1))]
- "TARGET_VECTOR"
+ "TARGET_VECTOR && !TARGET_64BIT"
"vslide<ud>.vx\t%0,%3,%z4%p1"
[(set_attr "type" "vislide<ud>")
(set_attr "mode" "<MODE>")])
diff --git a/gcc/config/rl78/rl78.cc b/gcc/config/rl78/rl78.cc
index 7f13e83..5d8fddb 100644
--- a/gcc/config/rl78/rl78.cc
+++ b/gcc/config/rl78/rl78.cc
@@ -898,7 +898,7 @@ rl78_handle_vector_attribute (tree * node,
#define TARGET_ATTRIBUTE_TABLE rl78_attribute_table
/* Table of RL78-specific attributes. */
-const struct attribute_spec rl78_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (rl78_attribute_table,
{
/* Name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude. */
@@ -911,9 +911,8 @@ const struct attribute_spec rl78_attribute_table[] =
{ "saddr", 0, 0, true, false, false, false,
rl78_handle_saddr_attribute, NULL },
{ "vector", 1, -1, true, false, false, false,
- rl78_handle_vector_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ rl78_handle_vector_attribute, NULL }
+});
diff --git a/gcc/config/rs6000/rs6000.cc b/gcc/config/rs6000/rs6000.cc
index 3dfd79c..2d8afc1 100644
--- a/gcc/config/rs6000/rs6000.cc
+++ b/gcc/config/rs6000/rs6000.cc
@@ -1255,7 +1255,7 @@ static const char alt_reg_names[][8] =
/* Table of valid machine attributes. */
-static const struct attribute_spec rs6000_attribute_table[] =
+static const attribute_spec rs6000_gnu_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -1272,7 +1272,16 @@ static const struct attribute_spec rs6000_attribute_table[] =
#ifdef SUBTARGET_ATTRIBUTE_TABLE
SUBTARGET_ATTRIBUTE_TABLE,
#endif
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+};
+
+static const scoped_attribute_specs rs6000_gnu_attribute_table =
+{
+ "gnu", { rs6000_gnu_attributes }
+};
+
+static const scoped_attribute_specs *const rs6000_attribute_table[] =
+{
+ &rs6000_gnu_attribute_table
};
#ifndef TARGET_PROFILE_KERNEL
@@ -3378,7 +3387,8 @@ darwin_rs6000_override_options (void)
static rtx_insn *
rs6000_md_asm_adjust (vec<rtx> & /*outputs*/, vec<rtx> & /*inputs*/,
vec<machine_mode> & /*input_modes*/,
- vec<const char *> & /*constraints*/, vec<rtx> &clobbers,
+ vec<const char *> & /*constraints*/,
+ vec<rtx> &/*uses*/, vec<rtx> &clobbers,
HARD_REG_SET &clobbered_regs, location_t /*loc*/)
{
clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
@@ -24389,7 +24399,8 @@ invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const
&& VECTOR_TYPE_P (TREE_TYPE (val))
&& (funcdecl == NULL_TREE
|| (TREE_CODE (funcdecl) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
+ && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD
+ && !fndecl_built_in_p (funcdecl, BUILT_IN_CLASSIFY_TYPE))))
? N_("AltiVec argument passed to unprototyped function")
: NULL;
}
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index 2a1b5ec..28482e3 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -5358,7 +5358,7 @@
(set (match_dup 4)
(neg:SFDF (abs:SFDF (match_dup 1))))
(set (match_operand:SFDF 0 "gpc_reg_operand")
- (if_then_else:SFDF (ge (match_operand:SFDF 2 "gpc_reg_operand")
+ (if_then_else:SFDF (ge (match_operand:SFDF 2 "any_operand")
(match_dup 5))
(match_dup 3)
(match_dup 4)))]
@@ -5369,6 +5369,24 @@
|| TARGET_CMPB
|| VECTOR_UNIT_VSX_P (<MODE>mode))"
{
+ /* Middle-end canonicalizes -fabs (x) to copysign (x, -1),
+ but PowerPC prefers -fabs (x). */
+ if (CONST_DOUBLE_AS_FLOAT_P (operands[2]))
+ {
+ if (real_isneg (CONST_DOUBLE_REAL_VALUE (operands[2])))
+ {
+ operands[3] = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_abs<mode>2 (operands[3], operands[1]));
+ emit_insn (gen_neg<mode>2 (operands[0], operands[3]));
+ }
+ else
+ emit_insn (gen_abs<mode>2 (operands[0], operands[1]));
+ DONE;
+ }
+
+ if (!gpc_reg_operand (operands[2], <MODE>mode))
+ operands[2] = copy_to_mode_reg (<MODE>mode, operands[2]);
+
if (TARGET_CMPB || VECTOR_UNIT_VSX_P (<MODE>mode))
{
emit_insn (gen_copysign<mode>3_fcpsgn (operands[0], operands[1],
diff --git a/gcc/config/rx/rx.cc b/gcc/config/rx/rx.cc
index 245c6a4..0754e28 100644
--- a/gcc/config/rx/rx.cc
+++ b/gcc/config/rx/rx.cc
@@ -2760,7 +2760,7 @@ rx_handle_vector_attribute (tree * node,
}
/* Table of RX specific attributes. */
-const struct attribute_spec rx_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (rx_attribute_table,
{
/* Name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude. */
@@ -2771,9 +2771,8 @@ const struct attribute_spec rx_attribute_table[] =
{ "naked", 0, 0, true, false, false, false,
rx_handle_func_attribute, NULL },
{ "vector", 1, -1, true, false, false, false,
- rx_handle_vector_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ rx_handle_vector_attribute, NULL }
+});
/* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
diff --git a/gcc/config/s390/s390.cc b/gcc/config/s390/s390.cc
index 29b5dc9..044de87 100644
--- a/gcc/config/s390/s390.cc
+++ b/gcc/config/s390/s390.cc
@@ -1303,7 +1303,7 @@ s390_handle_string_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
return NULL_TREE;
}
-static const struct attribute_spec s390_attribute_table[] = {
+TARGET_GNU_ATTRIBUTES (s390_attribute_table, {
{ "hotpatch", 2, 2, true, false, false, false,
s390_handle_hotpatch_attribute, NULL },
{ "s390_vector_bool", 0, 0, false, true, false, true,
@@ -1319,11 +1319,8 @@ static const struct attribute_spec s390_attribute_table[] = {
{ "function_return_reg", 1, 1, true, false, false, false,
s390_handle_string_attribute, NULL },
{ "function_return_mem", 1, 1, true, false, false, false,
- s390_handle_string_attribute, NULL },
-
- /* End element. */
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ s390_handle_string_attribute, NULL }
+});
/* Return the alignment for LABEL. We default to the -falign-labels
value except for the literal pool base label. */
@@ -12650,7 +12647,8 @@ s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl,
&& VECTOR_TYPE_P (TREE_TYPE (val))
&& (funcdecl == NULL_TREE
|| (TREE_CODE (funcdecl) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
+ && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD
+ && !fndecl_built_in_p (funcdecl, BUILT_IN_CLASSIFY_TYPE))))
? N_("vector argument passed to unprototyped function")
: NULL);
}
@@ -17515,7 +17513,8 @@ s390_hard_fp_reg_p (rtx x)
static rtx_insn *
s390_md_asm_adjust (vec<rtx> &outputs, vec<rtx> &inputs,
vec<machine_mode> &input_modes,
- vec<const char *> &constraints, vec<rtx> & /*clobbers*/,
+ vec<const char *> &constraints,
+ vec<rtx> &/*uses*/, vec<rtx> &/*clobbers*/,
HARD_REG_SET &clobbered_regs, location_t loc)
{
@@ -17604,6 +17603,10 @@ s390_md_asm_adjust (vec<rtx> &outputs, vec<rtx> &inputs,
outputs[i] = fprx2;
}
+ if (!TARGET_VXE)
+ /* Long doubles are stored in FPR pairs - nothing left to do. */
+ return after_md_seq;
+
for (unsigned i = 0; i < ninputs; i++)
{
if (GET_MODE (inputs[i]) != TFmode)
diff --git a/gcc/config/sh/sh.cc b/gcc/config/sh/sh.cc
index 6ec2eec..8c378b2 100644
--- a/gcc/config/sh/sh.cc
+++ b/gcc/config/sh/sh.cc
@@ -329,7 +329,7 @@ static bool sh_hard_regno_mode_ok (unsigned int, machine_mode);
static bool sh_modes_tieable_p (machine_mode, machine_mode);
static bool sh_can_change_mode_class (machine_mode, machine_mode, reg_class_t);
-static const struct attribute_spec sh_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (sh_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -348,9 +348,8 @@ static const struct attribute_spec sh_attribute_table[] =
{ "resbank", 0, 0, true, false, false, false,
sh_handle_resbank_handler_attribute, NULL },
{ "function_vector", 1, 1, true, false, false, false,
- sh2a_handle_function_vector_handler_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ sh2a_handle_function_vector_handler_attribute, NULL }
+});
/* Initialize the GCC target structure. */
#undef TARGET_ATTRIBUTE_TABLE
diff --git a/gcc/config/sparc/sparc.cc b/gcc/config/sparc/sparc.cc
index e90739d..c09dbcd 100644
--- a/gcc/config/sparc/sparc.cc
+++ b/gcc/config/sparc/sparc.cc
@@ -721,13 +721,12 @@ static HARD_REG_SET sparc_zero_call_used_regs (HARD_REG_SET);
#ifdef SUBTARGET_ATTRIBUTE_TABLE
/* Table of valid machine attributes. */
-static const struct attribute_spec sparc_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (sparc_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
do_diagnostic, handler, exclude } */
- SUBTARGET_ATTRIBUTE_TABLE,
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ SUBTARGET_ATTRIBUTE_TABLE
+});
#endif
char sparc_hard_reg_printed[8];
diff --git a/gcc/config/stormy16/stormy16.cc b/gcc/config/stormy16/stormy16.cc
index 1088715..071043b 100644
--- a/gcc/config/stormy16/stormy16.cc
+++ b/gcc/config/stormy16/stormy16.cc
@@ -2377,7 +2377,7 @@ static tree xstormy16_handle_interrupt_attribute
static tree xstormy16_handle_below100_attribute
(tree *, tree, tree, int, bool *);
-static const struct attribute_spec xstormy16_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (xstormy16_attribute_table,
{
/* name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude. */
@@ -2386,9 +2386,8 @@ static const struct attribute_spec xstormy16_attribute_table[] =
{ "BELOW100", 0, 0, false, false, false, false,
xstormy16_handle_below100_attribute, NULL },
{ "below100", 0, 0, false, false, false, false,
- xstormy16_handle_below100_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ xstormy16_handle_below100_attribute, NULL }
+});
/* Handle an "interrupt" attribute;
arguments as in struct attribute_spec.handler. */
diff --git a/gcc/config/v850/v850.cc b/gcc/config/v850/v850.cc
index 416c284..50c91c6 100644
--- a/gcc/config/v850/v850.cc
+++ b/gcc/config/v850/v850.cc
@@ -3114,7 +3114,7 @@ v850_adjust_insn_length (rtx_insn *insn, int length)
/* V850 specific attributes. */
-static const struct attribute_spec v850_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (v850_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -3127,9 +3127,8 @@ static const struct attribute_spec v850_attribute_table[] =
{ "tda", 0, 0, true, false, false, false,
v850_handle_data_area_attribute, NULL },
{ "zda", 0, 0, true, false, false, false,
- v850_handle_data_area_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
-};
+ v850_handle_data_area_attribute, NULL }
+});
static void
v850_option_override (void)
diff --git a/gcc/config/vax/vax.cc b/gcc/config/vax/vax.cc
index 032de71..ccaf14b 100644
--- a/gcc/config/vax/vax.cc
+++ b/gcc/config/vax/vax.cc
@@ -58,7 +58,8 @@ static bool vax_rtx_costs (rtx, machine_mode, int, int, int *, bool);
static machine_mode vax_cc_modes_compatible (machine_mode, machine_mode);
static rtx_insn *vax_md_asm_adjust (vec<rtx> &, vec<rtx> &,
vec<machine_mode> &, vec<const char *> &,
- vec<rtx> &, HARD_REG_SET &, location_t);
+ vec<rtx> &, vec<rtx> &, HARD_REG_SET &,
+ location_t);
static rtx vax_function_arg (cumulative_args_t, const function_arg_info &);
static void vax_function_arg_advance (cumulative_args_t,
const function_arg_info &);
@@ -1180,6 +1181,7 @@ vax_md_asm_adjust (vec<rtx> &outputs ATTRIBUTE_UNUSED,
vec<rtx> &inputs ATTRIBUTE_UNUSED,
vec<machine_mode> &input_modes ATTRIBUTE_UNUSED,
vec<const char *> &constraints ATTRIBUTE_UNUSED,
+ vec<rtx> &/*uses*/,
vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs,
location_t /*loc*/)
{
diff --git a/gcc/config/visium/visium.cc b/gcc/config/visium/visium.cc
index 5fadbc8..0691ea2 100644
--- a/gcc/config/visium/visium.cc
+++ b/gcc/config/visium/visium.cc
@@ -145,14 +145,13 @@ static inline bool current_function_has_lr_slot (void);
/* Supported attributes:
interrupt -- specifies this function is an interrupt handler. */
-static const struct attribute_spec visium_attribute_table[] =
+TARGET_GNU_ATTRIBUTES (visium_attribute_table,
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "interrupt", 0, 0, true, false, false, false, visium_handle_interrupt_attr,
- NULL},
- { NULL, 0, 0, false, false, false, false, NULL, NULL },
-};
+ NULL}
+});
static struct machine_function *visium_init_machine_status (void);
@@ -190,7 +189,7 @@ static tree visium_build_builtin_va_list (void);
static rtx_insn *visium_md_asm_adjust (vec<rtx> &, vec<rtx> &,
vec<machine_mode> &,
vec<const char *> &, vec<rtx> &,
- HARD_REG_SET &, location_t);
+ vec<rtx> &, HARD_REG_SET &, location_t);
static bool visium_legitimate_constant_p (machine_mode, rtx);
@@ -795,7 +794,8 @@ visium_conditional_register_usage (void)
static rtx_insn *
visium_md_asm_adjust (vec<rtx> & /*outputs*/, vec<rtx> & /*inputs*/,
vec<machine_mode> & /*input_modes*/,
- vec<const char *> & /*constraints*/, vec<rtx> &clobbers,
+ vec<const char *> & /*constraints*/,
+ vec<rtx> &/*uses*/, vec<rtx> &clobbers,
HARD_REG_SET &clobbered_regs, location_t /*loc*/)
{
clobbers.safe_push (gen_rtx_REG (CCmode, FLAGS_REGNUM));
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index fb257ad..308d02b 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,270 @@
+2023-12-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/112795
+ * cp-tree.h (cp_check_pragma_unroll): Declare.
+ * semantics.cc (cp_check_pragma_unroll): New function.
+ * parser.cc (cp_parser_pragma_unroll): Use cp_check_pragma_unroll.
+ * pt.cc (tsubst_expr) <case ANNOTATE_EXPR>: Likewise.
+ (tsubst_stmt) <case RANGE_FOR_STMT>: Likwsie.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * tree.cc (cxx_gnu_attribute_table): Add extra braces to work
+ around PR 16333 in older compilers.
+
+2023-12-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/110734
+ * parser.cc (cp_parser_block_declaration): Implement C++ DR 2262
+ - Attributes for asm-definition. Call cp_parser_asm_definition
+ even if RID_ASM token is only seen after sequence of standard
+ attributes.
+ (cp_parser_asm_definition): Parse standard attributes before
+ RID_ASM token and warn for them with -Wattributes.
+
+2023-12-05 Marek Polacek <polacek@redhat.com>
+
+ PR c++/107687
+ PR c++/110997
+ * call.cc (in_immediate_context): No longer static.
+ * constexpr.cc (cxx_eval_call_expression): Adjust assert.
+ * cp-gimplify.cc (deferred_escalating_exprs): New vec.
+ (remember_escalating_expr): New.
+ (enum fold_flags): Remove ff_fold_immediate.
+ (immediate_escalating_function_p): New.
+ (unchecked_immediate_escalating_function_p): New.
+ (promote_function_to_consteval): New.
+ (cp_fold_immediate): Move above. Return non-null if any errors were
+ emitted.
+ (maybe_explain_promoted_consteval): New.
+ (cp_gimplify_expr) <case CALL_EXPR>: Assert we've handled all
+ immediate invocations.
+ (taking_address_of_imm_fn_error): New.
+ (cp_fold_immediate_r): Merge ADDR_EXPR and PTRMEM_CST cases. Implement
+ P2564 - promoting functions to consteval.
+ <case CALL_EXPR>: Implement P2564 - promoting functions to consteval.
+ (cp_fold_r): If an expression turns into a CALL_EXPR after cp_fold,
+ call cp_fold_immediate_r on the CALL_EXPR.
+ (cp_fold_function): Set DECL_ESCALATION_CHECKED_P if
+ deferred_escalating_exprs does not contain current_function_decl.
+ (process_and_check_pending_immediate_escalating_fns): New.
+ * cp-tree.h (struct lang_decl_fn): Add escalated_p bit-field.
+ (DECL_ESCALATION_CHECKED_P): New.
+ (immediate_invocation_p): Declare.
+ (process_pending_immediate_escalating_fns): Likewise.
+ * decl2.cc (c_parse_final_cleanups): Set at_eof to 2 after all
+ templates have been instantiated; and to 3 at the end of the function.
+ Call process_pending_immediate_escalating_fns.
+ * error.cc (dump_template_bindings): Check at_eof against an updated
+ value.
+ * module.cc (trees_out::lang_decl_bools): Stream escalated_p.
+ (trees_in::lang_decl_bools): Likewise.
+ * pt.cc (push_tinst_level_loc): Set at_eof to 3, not 2.
+ * typeck.cc (cp_build_addr_expr_1): Don't check
+ DECL_IMMEDIATE_FUNCTION_P.
+
+2023-12-04 Jason Merrill <jason@redhat.com>
+
+ * constexpr.cc (potential_constant_expression_1): Fix
+ check for loading volatile lvalue.
+
+2023-12-04 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/112795
+ * cp-tree.h (cp_convert_range_for): Change UNROLL type from
+ unsigned short to tree.
+ (finish_while_stmt_cond, finish_do_stmt, finish_for_cond): Likewise.
+ * parser.cc (cp_parser_statement): Pass NULL_TREE rather than 0 to
+ cp_parser_iteration_statement UNROLL argument.
+ (cp_parser_for, cp_parser_c_for): Change UNROLL type from
+ unsigned short to tree.
+ (cp_parser_range_for): Likewise. Set RANGE_FOR_UNROLL to just UNROLL
+ rather than build_int_cst from it.
+ (cp_convert_range_for, cp_parser_iteration_statement): Change UNROLL
+ type from unsigned short to tree.
+ (cp_parser_omp_loop_nest): Pass NULL_TREE rather than 0 to
+ cp_parser_range_for UNROLL argument.
+ (cp_parser_pragma_unroll): Return tree rather than unsigned short.
+ If parsed expression is type dependent, just return it, don't diagnose
+ issues with value if it is value dependent.
+ (cp_parser_pragma): Change UNROLL type from unsigned short to tree.
+ * semantics.cc (finish_while_stmt_cond): Change UNROLL type from
+ unsigned short to tree. Build ANNOTATE_EXPR with UNROLL as its last
+ operand rather than build_int_cst from it.
+ (finish_do_stmt, finish_for_cond): Likewise.
+ * pt.cc (tsubst_stmt) <case RANGE_FOR_STMT>: Change UNROLL type from
+ unsigned short to tree and set it to RECUR on RANGE_FOR_UNROLL (t).
+ (tsubst_expr) <case ANNOTATE_EXPR>: For annot_expr_unroll_kind repeat
+ checks on UNROLL value from cp_parser_pragma_unroll.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * cp-tree.h (cxx_attribute_table): Delete.
+ (cxx_gnu_attribute_table, std_attribute_table): Declare.
+ * cp-objcp-common.h (LANG_HOOKS_COMMON_ATTRIBUTE_TABLE): Delete.
+ (LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE): Delete.
+ (cp_objcp_attribute_table): New table.
+ (LANG_HOOKS_ATTRIBUTE_TABLE): Redefine.
+ * tree.cc (cxx_attribute_table): Replace with...
+ (cxx_gnu_attributes, cxx_gnu_attribute_table): ...these globals.
+ (std_attribute_table): Change type to scoped_attribute_specs, using...
+ (std_attributes): ...this as the underlying array.
+ (init_tree): Remove call to register_scoped_attributes.
+
+2023-12-01 Jason Merrill <jason@redhat.com>
+
+ * mangle.cc (write_type): Mangle placeholder as its template.
+
+2023-12-01 Jason Merrill <jason@redhat.com>
+
+ * cp-tree.h (TEMPLATE_ARGS_TYPE_CONSTRAINT_P): New.
+ (get_concept_check_template): Declare.
+ * constraint.cc (combine_constraint_expressions)
+ (finish_shorthand_constraint): Use UNKNOWN_LOCATION.
+ * pt.cc (convert_generic_types_to_packs): Likewise.
+ * mangle.cc (write_constraint_expression)
+ (write_tparms_constraints, write_type_constraint)
+ (template_parm_natural_p, write_requirement)
+ (write_requires_expr): New.
+ (write_encoding): Mangle trailing requires-clause.
+ (write_name): Pass parms to write_template_args.
+ (write_template_param_decl): Factor out from...
+ (write_closure_template_head): ...here.
+ (write_template_args): Mangle non-natural parms
+ and requires-clause.
+ (write_expression): Handle REQUIRES_EXPR.
+
+2023-12-01 Jason Merrill <jason@redhat.com>
+
+ * semantics.cc (finish_non_static_data_member)
+ (finish_decltype_type, capture_decltype):
+ Handle deduced closure parameter.
+
+2023-11-30 Marek Polacek <polacek@redhat.com>
+
+ PR c++/112744
+ * typeck.cc (finish_class_member_access_expr): When accessing
+ a static data member, use ba_any for lookup_base.
+
+2023-11-30 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/110349
+ * cp-tree.h: Implement C++26 P2169R4 - Placeholder variables with no
+ name.
+ (OVL_NAME_INDEPENDENT_DECL_P): Define.
+ (add_capture): Add unsigned * argument.
+ (name_independent_decl_p): New inline function.
+ * name-lookup.cc (class name_lookup): Make ambiguous and
+ add_value members public.
+ (name_independent_linear_search): New function.
+ (get_class_binding_direct): Handle member_vec_binary_search
+ returning OVL_NAME_INDEPENDENT_DECL_P OVERLOAD. Use
+ name_independent_linear_search rather than fields_linear_search
+ for linear lookup of _ name if !want_type.
+ (member_name_cmp): Sort name-independent declarations first.
+ (member_vec_dedup): Handle name-independent declarations.
+ (pop_local_binding): Handle binding->value being a TREE_LIST for
+ ambiguous name-independent declarations.
+ (supplement_binding): Handle name-independent declarations.
+ (update_binding): Likewise.
+ (check_local_shadow): Return tree rather than void, normally
+ NULL_TREE but old for name-independent declarations which used
+ to conflict with outer scope declaration. Don't emit -Wshadow*
+ warnings for name-independent declarations.
+ (pushdecl): Handle name-independent declarations.
+ * search.cc (lookup_field_r): Handle nval being a TREE_LIST.
+ * lambda.cc (build_capture_proxy): Adjust for ___.<number>
+ names of members.
+ (add_capture): Add NAME_INDEPENDENT_CNT argument. Use ___.<number>
+ name rather than ___ for second and following capture with
+ _ name.
+ (add_default_capture): Adjust add_capture caller.
+ * decl.cc (poplevel): Don't warn about name-independent declarations.
+ (duplicate_decls): If in C++26 a _ named declaration conflicts with
+ earlier declarations, emit explaining note why the new declaration
+ is not name-independent.
+ (reshape_init_class): If field is a TREE_LIST, emit an ambiguity
+ error with list of candidates rather than error about non-existing
+ non-static data member.
+ * parser.cc (cp_parser_lambda_introducer): Adjust add_capture callers.
+ Allow name-independent capture redeclarations.
+ (cp_parser_decomposition_declaration): Set decl_specs.storage_class
+ to sc_static for static structured bindings.
+ * pt.cc (tsubst_lambda_expr): Adjust add_capture caller.
+
+2023-11-30 Alexandre Oliva <oliva@adacore.com>
+
+ * decl.cc (finish_enum_value_list): Set TYPE_PACKED if
+ use_short_enum, and propagate it to variants.
+
+2023-11-30 Jason Merrill <jason@redhat.com>
+
+ * cp-tree.h (LAMBDA_EXPR_MUTABLE_P): Remove.
+ * cp-tree.def: Remove documentation.
+ * lambda.cc (build_lambda_expr): Remove reference.
+ * parser.cc (cp_parser_lambda_declarator_opt): Likewise.
+ * pt.cc (tsubst_lambda_expr): Likewise.
+ * ptree.cc (cxx_print_lambda_node): Likewise.
+ * semantics.cc (capture_decltype): Get the object quals
+ from the object instead.
+
+2023-11-29 Marek Polacek <polacek@redhat.com>
+
+ PR c++/106650
+ * constexpr.cc (cxx_eval_constant_expression) <case PARM_DECL>: Allow
+ reference to unknown/this as per P2280.
+ <case VAR_DECL>: Allow reference to unknown as per P2280.
+
+2023-11-29 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/112765
+ * pt.cc (tsubst_expr) <case MODOP_EXPR>: Look through implicit
+ INDIRECT_REF when propagating -Wparentheses warning suppression.
+ * semantics.cc (maybe_warn_unparenthesized_assignment): Replace
+ REFERENCE_REF_P handling with STRIP_REFERENCE_REF.
+ (finish_parenthesized_expr): Likewise.
+
+2023-11-29 Jakub Jelinek <jakub@redhat.com>
+
+ * semantics.cc (finish_static_assert): Free buf on error return.
+
+2023-11-29 Alexandre Oliva <oliva@adacore.com>
+
+ * decl.cc (maybe_prepare_return_this): Split out of...
+ (maybe_return_this): ... this.
+ * cp-tree.h (maybe_prepare_return_this): Declare.
+ * class.cc (build_clone): Call it.
+
+2023-11-29 Alexandre Oliva <oliva@adacore.com>
+
+ * contracts.cc (check_postcondition_result): Cope with
+ cdtor_return_this.
+
+2023-11-28 Jason Merrill <jason@redhat.com>
+
+ PR c++/94264
+ PR c++/53220
+ * call.cc (convert_like_internal): Remove obsolete comment.
+ * typeck.cc (decay_conversion): Allow array prvalue.
+ (maybe_warn_about_returning_address_of_local): Check
+ for returning pointer to temporary.
+
+2023-11-28 Jakub Jelinek <jakub@redhat.com>
+
+ * cp-objcp-common.cc (cp_feature_table): Evaluate
+ __has_extension (cxx_init_captures) to 1 even for -std=c++11.
+
+2023-11-27 Alex Coplan <alex.coplan@arm.com>
+ Iain Sandoe <iain@sandoe.co.uk>
+
+ PR c++/60512
+ * cp-lang.cc (c_family_register_lang_features): New.
+ * cp-objcp-common.cc (struct cp_feature_selector): New.
+ (cp_feature_selector::has_feature): New.
+ (struct cp_feature_info): New.
+ (cp_register_features): New.
+ * cp-objcp-common.h (cp_register_features): New.
+
2023-11-25 Nathaniel Shead <nathanieloshead@gmail.com>
* name-lookup.cc (check_can_export_using_decl): New.
diff --git a/gcc/cp/call.cc b/gcc/cp/call.cc
index 81b104f..c7efc5b 100644
--- a/gcc/cp/call.cc
+++ b/gcc/cp/call.cc
@@ -8578,8 +8578,6 @@ convert_like_internal (conversion *convs, tree expr, tree fn, int argnum,
array = finish_compound_literal (array, new_ctor, complain);
/* This is dubious now, should be blessed by P2752. */
DECL_MERGEABLE (TARGET_EXPR_SLOT (array)) = true;
- /* Take the address explicitly rather than via decay_conversion
- to avoid the error about taking the address of a temporary. */
array = cp_build_addr_expr (array, complain);
}
else
@@ -9744,7 +9742,7 @@ in_immediate_context ()
/* Return true if a call to FN with number of arguments NARGS
is an immediate invocation. */
-static bool
+bool
immediate_invocation_p (tree fn)
{
return (TREE_CODE (fn) == FUNCTION_DECL
diff --git a/gcc/cp/class.cc b/gcc/cp/class.cc
index 4766b7c..6fdb56a 100644
--- a/gcc/cp/class.cc
+++ b/gcc/cp/class.cc
@@ -5053,6 +5053,8 @@ build_clone (tree fn, tree name, bool need_vtt_parm_p,
clone = copy_fndecl_with_name (fn, name, ERROR_MARK,
need_vtt_parm_p, omit_inherited_parms_p);
DECL_CLONED_FUNCTION (clone) = fn;
+
+ maybe_prepare_return_this (clone);
}
/* Remember where this function came from. */
diff --git a/gcc/cp/constexpr.cc b/gcc/cp/constexpr.cc
index 344107d..58187a4 100644
--- a/gcc/cp/constexpr.cc
+++ b/gcc/cp/constexpr.cc
@@ -3128,11 +3128,11 @@ cxx_eval_call_expression (const constexpr_ctx *ctx, tree t,
/* OK */;
else if (!DECL_SAVED_TREE (fun))
{
- /* When at_eof >= 2, cgraph has started throwing away
+ /* When at_eof >= 3, cgraph has started throwing away
DECL_SAVED_TREE, so fail quietly. FIXME we get here because of
late code generation for VEC_INIT_EXPR, which needs to be
completely reconsidered. */
- gcc_assert (at_eof >= 2 && ctx->quiet);
+ gcc_assert (at_eof >= 3 && ctx->quiet);
*non_constant_p = true;
}
else if (tree copy = get_fundef_copy (new_call.fundef))
@@ -7336,7 +7336,9 @@ cxx_eval_constant_expression (const constexpr_ctx *ctx, tree t,
if (TREE_CODE (r) == TARGET_EXPR
&& TREE_CODE (TARGET_EXPR_INITIAL (r)) == CONSTRUCTOR)
r = TARGET_EXPR_INITIAL (r);
- if (DECL_P (r))
+ if (DECL_P (r)
+ /* P2280 allows references to unknown. */
+ && !(VAR_P (t) && TYPE_REF_P (TREE_TYPE (t))))
{
if (!ctx->quiet)
non_const_var_error (loc, r, /*fundef_p*/false);
@@ -7378,6 +7380,10 @@ cxx_eval_constant_expression (const constexpr_ctx *ctx, tree t,
r = build_constructor (TREE_TYPE (t), NULL);
TREE_CONSTANT (r) = true;
}
+ else if (TYPE_REF_P (TREE_TYPE (t)))
+ /* P2280 allows references to unknown... */;
+ else if (is_this_parameter (t))
+ /* ...as well as the this pointer. */;
else
{
if (!ctx->quiet)
@@ -9381,7 +9387,8 @@ potential_constant_expression_1 (tree t, bool want_rval, bool strict, bool now,
available, so we don't bother with switch tracking. */
return true;
- if (TREE_THIS_VOLATILE (t) && want_rval)
+ if (TREE_THIS_VOLATILE (t) && want_rval
+ && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (t)))
{
if (flags & tf_error)
constexpr_error (loc, fundef_p, "lvalue-to-rvalue conversion of "
diff --git a/gcc/cp/constraint.cc b/gcc/cp/constraint.cc
index 64b64e1..d9972d6 100644
--- a/gcc/cp/constraint.cc
+++ b/gcc/cp/constraint.cc
@@ -240,7 +240,9 @@ combine_constraint_expressions (tree lhs, tree rhs)
return rhs;
if (!rhs)
return lhs;
- return finish_constraint_and_expr (input_location, lhs, rhs);
+ /* Use UNKNOWN_LOCATION so write_template_args can tell the difference
+ between this and a && the user wrote. */
+ return finish_constraint_and_expr (UNKNOWN_LOCATION, lhs, rhs);
}
/* Extract the template-id from a concept check. For standard and variable
@@ -1605,9 +1607,11 @@ finish_shorthand_constraint (tree decl, tree constr)
check = ovl_make (tmpl);
check = build_concept_check (check, arg, args, tf_warning_or_error);
- /* Make the check a fold-expression if needed. */
+ /* Make the check a fold-expression if needed.
+ Use UNKNOWN_LOCATION so write_template_args can tell the
+ difference between this and a fold the user wrote. */
if (apply_to_each_p && declared_pack_p)
- check = finish_left_unary_fold_expr (DECL_SOURCE_LOCATION (decl),
+ check = finish_left_unary_fold_expr (UNKNOWN_LOCATION,
check, TRUTH_ANDIF_EXPR);
return check;
diff --git a/gcc/cp/contracts.cc b/gcc/cp/contracts.cc
index 66d2298..035ca48 100644
--- a/gcc/cp/contracts.cc
+++ b/gcc/cp/contracts.cc
@@ -636,7 +636,11 @@ make_postcondition_variable (cp_expr id)
bool
check_postcondition_result (tree decl, tree type, location_t loc)
{
- if (VOID_TYPE_P (type))
+ /* Do not be confused by targetm.cxx.cdtor_return_this ();
+ conceptually, cdtors have no return value. */
+ if (VOID_TYPE_P (type)
+ || DECL_CONSTRUCTOR_P (decl)
+ || DECL_DESTRUCTOR_P (decl))
{
error_at (loc,
DECL_CONSTRUCTOR_P (decl)
diff --git a/gcc/cp/cp-gimplify.cc b/gcc/cp/cp-gimplify.cc
index 795c811..5abb91b 100644
--- a/gcc/cp/cp-gimplify.cc
+++ b/gcc/cp/cp-gimplify.cc
@@ -43,6 +43,21 @@ along with GCC; see the file COPYING3. If not see
#include "omp-general.h"
#include "opts.h"
+/* Keep track of forward references to immediate-escalating functions in
+ case they become consteval. This vector contains ADDR_EXPRs and
+ PTRMEM_CSTs; it also stores FUNCTION_DECLs that had an escalating
+ function call in them, to check that they can be evaluated to a constant,
+ and immediate-escalating functions that may become consteval. */
+static GTY(()) hash_set<tree> *deferred_escalating_exprs;
+
+static void
+remember_escalating_expr (tree t)
+{
+ if (!deferred_escalating_exprs)
+ deferred_escalating_exprs = hash_set<tree>::create_ggc (37);
+ deferred_escalating_exprs->add (t);
+}
+
/* Flags for cp_fold and cp_fold_r. */
enum fold_flags {
@@ -53,8 +68,6 @@ enum fold_flags {
definitely not in a manifestly constant-evaluated
context. */
ff_mce_false = 1 << 1,
- /* Whether we're being called from cp_fold_immediate. */
- ff_fold_immediate = 1 << 2,
};
using fold_flags_t = int;
@@ -72,6 +85,7 @@ static tree cp_genericize_r (tree *, int *, void *);
static tree cp_fold_r (tree *, int *, void *);
static void cp_genericize_tree (tree*, bool);
static tree cp_fold (tree, fold_flags_t);
+static tree cp_fold_immediate_r (tree *, int *, void *);
/* Genericize a TRY_BLOCK. */
@@ -428,6 +442,104 @@ lvalue_has_side_effects (tree e)
return TREE_SIDE_EFFECTS (e);
}
+/* Return true if FN is an immediate-escalating function. */
+
+static bool
+immediate_escalating_function_p (tree fn)
+{
+ if (!fn || !flag_immediate_escalation)
+ return false;
+
+ gcc_checking_assert (TREE_CODE (fn) == FUNCTION_DECL);
+
+ if (DECL_IMMEDIATE_FUNCTION_P (fn))
+ return false;
+
+ /* An immediate-escalating function is
+ -- the call operator of a lambda that is not declared with the consteval
+ specifier */
+ if (LAMBDA_FUNCTION_P (fn))
+ return true;
+ /* -- a defaulted special member function that is not declared with the
+ consteval specifier */
+ special_function_kind sfk = special_memfn_p (fn);
+ if (sfk != sfk_none && DECL_DEFAULTED_FN (fn))
+ return true;
+ /* -- a function that results from the instantiation of a templated entity
+ defined with the constexpr specifier. */
+ return is_instantiation_of_constexpr (fn);
+}
+
+/* Return true if FN is an immediate-escalating function that has not been
+ checked for escalating expressions.. */
+
+static bool
+unchecked_immediate_escalating_function_p (tree fn)
+{
+ return (immediate_escalating_function_p (fn)
+ && !DECL_ESCALATION_CHECKED_P (fn));
+}
+
+/* Promote FN to an immediate function, including its clones. */
+
+static void
+promote_function_to_consteval (tree fn)
+{
+ SET_DECL_IMMEDIATE_FUNCTION_P (fn);
+ DECL_ESCALATION_CHECKED_P (fn) = true;
+ tree clone;
+ FOR_EACH_CLONE (clone, fn)
+ {
+ SET_DECL_IMMEDIATE_FUNCTION_P (clone);
+ DECL_ESCALATION_CHECKED_P (clone) = true;
+ }
+}
+
+/* A wrapper around cp_fold_immediate_r. Return a non-null tree if
+ we found a non-constant immediate function, or taking the address
+ of an immediate function. */
+
+tree
+cp_fold_immediate (tree *tp, mce_value manifestly_const_eval,
+ tree decl /*= current_function_decl*/)
+{
+ if (cxx_dialect <= cxx17)
+ return NULL_TREE;
+
+ temp_override<tree> cfd (current_function_decl, decl);
+
+ fold_flags_t flags = ff_none;
+ if (manifestly_const_eval == mce_false)
+ flags |= ff_mce_false;
+
+ cp_fold_data data (flags);
+ int save_errorcount = errorcount;
+ tree r = cp_walk_tree_without_duplicates (tp, cp_fold_immediate_r, &data);
+ if (errorcount > save_errorcount)
+ return integer_one_node;
+ return r;
+}
+
+/* Maybe say that FN (a function decl with DECL_IMMEDIATE_FUNCTION_P set)
+ was initially not an immediate function, but was promoted to one because
+ its body contained an immediate-escalating expression or conversion. */
+
+static void
+maybe_explain_promoted_consteval (location_t loc, tree fn)
+{
+ if (DECL_ESCALATION_CHECKED_P (fn))
+ {
+ /* See if we can figure out what made the function consteval. */
+ tree x = cp_fold_immediate (&DECL_SAVED_TREE (fn), mce_unknown, NULL_TREE);
+ if (x)
+ inform (cp_expr_loc_or_loc (x, loc),
+ "%qD was promoted to an immediate function because its "
+ "body contains an immediate-escalating expression %qE", fn, x);
+ else
+ inform (loc, "%qD was promoted to an immediate function", fn);
+ }
+}
+
/* Gimplify *EXPR_P as rvalue into an expression that can't be modified
by expressions with side-effects in other operands. */
@@ -746,7 +858,9 @@ cp_gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
if (ret != GS_ERROR)
{
tree decl = cp_get_callee_fndecl_nofold (*expr_p);
- if (decl && fndecl_built_in_p (decl, BUILT_IN_FRONTEND))
+ if (!decl)
+ break;
+ if (fndecl_built_in_p (decl, BUILT_IN_FRONTEND))
switch (DECL_FE_FUNCTION_CODE (decl))
{
case CP_BUILT_IN_IS_CONSTANT_EVALUATED:
@@ -771,10 +885,12 @@ cp_gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
default:
break;
}
- else if (decl
- && fndecl_built_in_p (decl, BUILT_IN_CLZG, BUILT_IN_CTZG))
+ else if (fndecl_built_in_p (decl, BUILT_IN_CLZG, BUILT_IN_CTZG))
ret = (enum gimplify_status) c_gimplify_expr (expr_p, pre_p,
post_p);
+ else
+ /* All consteval functions should have been processed by now. */
+ gcc_checking_assert (!immediate_invocation_p (decl));
}
break;
@@ -1035,6 +1151,20 @@ struct cp_genericize_data
bool handle_invisiref_parm_p;
};
+/* Emit an error about taking the address of an immediate function.
+ EXPR is the whole expression; DECL is the immediate function. */
+
+static void
+taking_address_of_imm_fn_error (tree expr, tree decl)
+{
+ auto_diagnostic_group d;
+ const location_t loc = (TREE_CODE (expr) == PTRMEM_CST
+ ? PTRMEM_CST_LOCATION (expr)
+ : EXPR_LOCATION (expr));
+ error_at (loc, "taking address of an immediate function %qD", decl);
+ maybe_explain_promoted_consteval (loc, decl);
+}
+
/* A subroutine of cp_fold_r to handle immediate functions. */
static tree
@@ -1045,90 +1175,128 @@ cp_fold_immediate_r (tree *stmt_p, int *walk_subtrees, void *data_)
/* The purpose of this is not to emit errors for mce_unknown. */
const tsubst_flags_t complain = (data->flags & ff_mce_false
? tf_error : tf_none);
+ const tree_code code = TREE_CODE (stmt);
/* No need to look into types or unevaluated operands.
NB: This affects cp_fold_r as well. */
- if (TYPE_P (stmt) || unevaluated_p (TREE_CODE (stmt)))
+ if (TYPE_P (stmt) || unevaluated_p (code) || in_immediate_context ())
{
*walk_subtrees = 0;
return NULL_TREE;
}
- switch (TREE_CODE (stmt))
- {
- case PTRMEM_CST:
- if (TREE_CODE (PTRMEM_CST_MEMBER (stmt)) == FUNCTION_DECL
- && DECL_IMMEDIATE_FUNCTION_P (PTRMEM_CST_MEMBER (stmt)))
- {
- if (!data->pset.add (stmt) && (complain & tf_error))
- {
- error_at (PTRMEM_CST_LOCATION (stmt),
- "taking address of an immediate function %qD",
- PTRMEM_CST_MEMBER (stmt));
- *stmt_p = build_zero_cst (TREE_TYPE (stmt));
- }
- return error_mark_node;
- }
- break;
+ tree decl = NULL_TREE;
+ bool call_p = false;
- /* Expand immediate invocations. */
+ /* We are looking for &fn or fn(). */
+ switch (code)
+ {
case CALL_EXPR:
case AGGR_INIT_EXPR:
if (tree fn = cp_get_callee (stmt))
if (TREE_CODE (fn) != ADDR_EXPR || ADDR_EXPR_DENOTES_CALL_P (fn))
- if (tree fndecl = cp_get_fndecl_from_callee (fn, /*fold*/false))
- if (DECL_IMMEDIATE_FUNCTION_P (fndecl))
- {
- stmt = cxx_constant_value (stmt, complain);
- if (stmt == error_mark_node)
- {
- if (complain & tf_error)
- *stmt_p = error_mark_node;
- return error_mark_node;
- }
- *stmt_p = stmt;
- }
+ decl = cp_get_fndecl_from_callee (fn, /*fold*/false);
+ call_p = true;
+ break;
+ case PTRMEM_CST:
+ decl = PTRMEM_CST_MEMBER (stmt);
break;
-
case ADDR_EXPR:
- if (TREE_CODE (TREE_OPERAND (stmt, 0)) == FUNCTION_DECL
- && DECL_IMMEDIATE_FUNCTION_P (TREE_OPERAND (stmt, 0))
- && !ADDR_EXPR_DENOTES_CALL_P (stmt))
- {
- if (complain & tf_error)
- {
- error_at (EXPR_LOCATION (stmt),
- "taking address of an immediate function %qD",
- TREE_OPERAND (stmt, 0));
- *stmt_p = build_zero_cst (TREE_TYPE (stmt));
- }
- return error_mark_node;
- }
+ if (!ADDR_EXPR_DENOTES_CALL_P (stmt))
+ decl = TREE_OPERAND (stmt, 0);
break;
-
default:
- break;
+ return NULL_TREE;
}
- return NULL_TREE;
-}
+ if (!decl || TREE_CODE (decl) != FUNCTION_DECL)
+ return NULL_TREE;
-/* A wrapper around cp_fold_immediate_r. Return true if we found
- a non-constant immediate function, or taking the address of an
- immediate function. */
+ /* Fully escalate once all templates have been instantiated. What we're
+ calling is not a consteval function but it may become one. This
+ requires recursing; DECL may be promoted to consteval because it
+ contains an escalating expression E, but E itself may have to be
+ promoted first, etc. */
+ if (at_eof > 1 && unchecked_immediate_escalating_function_p (decl))
+ {
+ /* Set before the actual walk to avoid endless recursion. */
+ DECL_ESCALATION_CHECKED_P (decl) = true;
+ /* We're only looking for the first escalating expression. Let us not
+ walk more trees than necessary, hence mce_unknown. */
+ cp_fold_immediate (&DECL_SAVED_TREE (decl), mce_unknown, decl);
+ }
-bool
-cp_fold_immediate (tree *tp, mce_value manifestly_const_eval)
-{
- if (cxx_dialect <= cxx17)
- return false;
+ /* [expr.const]p16 "An expression or conversion is immediate-escalating if
+ it is not initially in an immediate function context and it is either
+ -- an immediate invocation that is not a constant expression and is not
+ a subexpression of an immediate invocation."
- fold_flags_t flags = ff_fold_immediate;
- if (manifestly_const_eval == mce_false)
- flags |= ff_mce_false;
+ If we are in an immediate-escalating function, the immediate-escalating
+ expression or conversion makes it an immediate function. So STMT does
+ not need to produce a constant expression. */
+ if (DECL_IMMEDIATE_FUNCTION_P (decl))
+ {
+ tree e = cxx_constant_value (stmt, tf_none);
+ if (e == error_mark_node)
+ {
+ /* This takes care of, e.g.,
+ template <typename T>
+ constexpr int f(T t)
+ {
+ return id(t);
+ }
+ where id (consteval) causes f<int> to be promoted. */
+ if (immediate_escalating_function_p (current_function_decl))
+ promote_function_to_consteval (current_function_decl);
+ else if (complain & tf_error)
+ {
+ if (call_p)
+ {
+ auto_diagnostic_group d;
+ location_t loc = cp_expr_loc_or_input_loc (stmt);
+ error_at (loc, "call to consteval function %qE is "
+ "not a constant expression", stmt);
+ /* Explain why it's not a constant expression. */
+ *stmt_p = cxx_constant_value (stmt, complain);
+ maybe_explain_promoted_consteval (loc, decl);
+ }
+ else if (!data->pset.add (stmt))
+ {
+ taking_address_of_imm_fn_error (stmt, decl);
+ *stmt_p = build_zero_cst (TREE_TYPE (stmt));
+ }
+ /* If we're giving hard errors, continue the walk rather than
+ bailing out after the first error. */
+ return NULL_TREE;
+ }
+ *walk_subtrees = 0;
+ return stmt;
+ }
+ /* We've evaluated the consteval function call. */
+ if (call_p)
+ *stmt_p = e;
+ }
+ /* We've encountered a function call that may turn out to be consteval
+ later. Store its caller so that we can ensure that the call is
+ a constant expression. */
+ else if (unchecked_immediate_escalating_function_p (decl))
+ {
+ /* Make sure we're not inserting new elements while walking
+ the deferred_escalating_exprs hash table; if we are, it's
+ likely that a function wasn't properly marked checked for
+ i-e expressions. */
+ gcc_checking_assert (at_eof <= 1);
+ if (current_function_decl)
+ remember_escalating_expr (current_function_decl);
+ /* auto p = &f<int>; in the global scope won't be ensconced in
+ a function we could store for later at this point. (If there's
+ no c_f_d at this point and we're dealing with a call, we should
+ see the call when cp_fold_function __static_i_and_d.) */
+ else if (!call_p)
+ remember_escalating_expr (stmt);
+ }
- cp_fold_data data (flags);
- return !!cp_walk_tree_without_duplicates (tp, cp_fold_immediate_r, &data);
+ return NULL_TREE;
}
/* Perform any pre-gimplification folding of C++ front end trees to
@@ -1178,11 +1346,19 @@ cp_fold_r (tree *stmt_p, int *walk_subtrees, void *data_)
*walk_subtrees = 0;
/* Don't return yet, still need the cp_fold below. */
}
- cp_fold_immediate_r (stmt_p, walk_subtrees, data);
+ else
+ cp_fold_immediate_r (stmt_p, walk_subtrees, data);
}
*stmt_p = stmt = cp_fold (*stmt_p, data->flags);
+ /* For certain trees, like +foo(), the cp_fold above will remove the +,
+ and the subsequent tree walk would go straight down to the CALL_EXPR's
+ operands, meaning that cp_fold_immediate_r would never see the
+ CALL_EXPR. Ew :(. */
+ if (TREE_CODE (stmt) == CALL_EXPR && code != CALL_EXPR)
+ cp_fold_immediate_r (stmt_p, walk_subtrees, data);
+
if (data->pset.add (stmt))
{
/* Don't walk subtrees of stmts we've already walked once, otherwise
@@ -1304,6 +1480,44 @@ cp_fold_function (tree fndecl)
pass ff_mce_false. */
cp_fold_data data (ff_genericize | ff_mce_false);
cp_walk_tree (&DECL_SAVED_TREE (fndecl), cp_fold_r, &data, NULL);
+
+ /* This is merely an optimization: if FNDECL has no i-e expressions,
+ we'll not save c_f_d, and we can safely say that FNDECL will not
+ be promoted to consteval. */
+ if (deferred_escalating_exprs
+ && !deferred_escalating_exprs->contains (current_function_decl))
+ DECL_ESCALATION_CHECKED_P (fndecl) = true;
+}
+
+/* We've stashed immediate-escalating functions. Now see if they indeed
+ ought to be promoted to consteval. */
+
+void
+process_and_check_pending_immediate_escalating_fns ()
+{
+ /* This will be null for -fno-immediate-escalation. */
+ if (!deferred_escalating_exprs)
+ return;
+
+ for (auto e : *deferred_escalating_exprs)
+ if (TREE_CODE (e) == FUNCTION_DECL && !DECL_ESCALATION_CHECKED_P (e))
+ cp_fold_immediate (&DECL_SAVED_TREE (e), mce_false, e);
+
+ /* We've escalated every function that could have been promoted to
+ consteval. Check that we are not taking the address of a consteval
+ function. */
+ for (auto e : *deferred_escalating_exprs)
+ {
+ if (TREE_CODE (e) == FUNCTION_DECL)
+ continue;
+ tree decl = (TREE_CODE (e) == PTRMEM_CST
+ ? PTRMEM_CST_MEMBER (e)
+ : TREE_OPERAND (e, 0));
+ if (DECL_IMMEDIATE_FUNCTION_P (decl))
+ taking_address_of_imm_fn_error (e, decl);
+ }
+
+ deferred_escalating_exprs = nullptr;
}
/* Turn SPACESHIP_EXPR EXPR into GENERIC. */
diff --git a/gcc/cp/cp-objcp-common.cc b/gcc/cp/cp-objcp-common.cc
index 70f9e4a..9439c4d 100644
--- a/gcc/cp/cp-objcp-common.cc
+++ b/gcc/cp/cp-objcp-common.cc
@@ -145,7 +145,7 @@ static constexpr cp_feature_info cp_feature_table[] =
{ "cxx_contextual_conversions", { cxx14, cxx98 } },
{ "cxx_decltype_auto", cxx14 },
{ "cxx_aggregate_nsdmi", cxx14 },
- { "cxx_init_captures", cxx14 },
+ { "cxx_init_captures", { cxx14, cxx11 } },
{ "cxx_generic_lambdas", cxx14 },
{ "cxx_relaxed_constexpr", cxx14 },
{ "cxx_return_type_deduction", cxx14 },
diff --git a/gcc/cp/cp-objcp-common.h b/gcc/cp/cp-objcp-common.h
index 5b175b0..b53d11e 100644
--- a/gcc/cp/cp-objcp-common.h
+++ b/gcc/cp/cp-objcp-common.h
@@ -123,13 +123,16 @@ extern tree cxx_simulate_record_decl (location_t, const char *,
#undef LANG_HOOKS_FINALIZE_EARLY_DEBUG
#define LANG_HOOKS_FINALIZE_EARLY_DEBUG c_common_finalize_early_debug
-/* Attribute hooks. */
-#undef LANG_HOOKS_COMMON_ATTRIBUTE_TABLE
-#define LANG_HOOKS_COMMON_ATTRIBUTE_TABLE c_common_attribute_table
-#undef LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE
-#define LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE c_common_format_attribute_table
+static const scoped_attribute_specs *const cp_objcp_attribute_table[] =
+{
+ &std_attribute_table,
+ &cxx_gnu_attribute_table,
+ &c_common_gnu_attribute_table,
+ &c_common_format_attribute_table
+};
+
#undef LANG_HOOKS_ATTRIBUTE_TABLE
-#define LANG_HOOKS_ATTRIBUTE_TABLE cxx_attribute_table
+#define LANG_HOOKS_ATTRIBUTE_TABLE cp_objcp_attribute_table
#undef LANG_HOOKS_TREE_INLINING_VAR_MOD_TYPE_P
#define LANG_HOOKS_TREE_INLINING_VAR_MOD_TYPE_P cp_var_mod_type_p
diff --git a/gcc/cp/cp-tree.def b/gcc/cp/cp-tree.def
index bf3bcd1..fe47b0a 100644
--- a/gcc/cp/cp-tree.def
+++ b/gcc/cp/cp-tree.def
@@ -446,8 +446,7 @@ DEFTREECODE (TRAIT_TYPE, "trait_type", tcc_type, 0)
LAMBDA_EXPR_CAPTURE_LIST holds the capture-list, including `this'.
LAMBDA_EXPR_THIS_CAPTURE goes straight to the capture of `this', if it exists.
LAMBDA_EXPR_PENDING_PROXIES is a vector of capture proxies which need to
- be pushed once scope returns to the lambda.
- LAMBDA_EXPR_MUTABLE_P signals whether this lambda was declared mutable. */
+ be pushed once scope returns to the lambda. */
DEFTREECODE (LAMBDA_EXPR, "lambda_expr", tcc_exceptional, 0)
/* The declared type of an expression. This is a C++0x extension.
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 7b0b7c6..795152c 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -461,7 +461,6 @@ extern GTY(()) tree cp_global_trees[CPTI_MAX];
TYPENAME_IS_CLASS_P (in TYPENAME_TYPE)
STMT_IS_FULL_EXPR_P (in _STMT)
TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR)
- LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR)
DECL_FINAL_P (in FUNCTION_DECL)
QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF)
CONSTRUCTOR_IS_DEPENDENT (in CONSTRUCTOR)
@@ -523,6 +522,7 @@ extern GTY(()) tree cp_global_trees[CPTI_MAX];
RANGE_FOR_IVDEP (in RANGE_FOR_STMT)
CALL_EXPR_OPERATOR_SYNTAX (in CALL_EXPR, AGGR_INIT_EXPR)
CONSTRUCTOR_IS_DESIGNATED_INIT (in CONSTRUCTOR)
+ OVL_NAME_INDEPENDENT_DECL_P (in OVERLOAD)
Usage of TYPE_LANG_FLAG_?:
0: TYPE_DEPENDENT_P
@@ -815,6 +815,9 @@ typedef struct ptrmem_cst * ptrmem_cst_t;
#define OVL_LOOKUP_P(NODE) TREE_LANG_FLAG_4 (OVERLOAD_CHECK (NODE))
/* If set, this OVL_USING_P overload is exported. */
#define OVL_EXPORT_P(NODE) TREE_LANG_FLAG_5 (OVERLOAD_CHECK (NODE))
+/* If set, this overload includes name-independent declarations. */
+#define OVL_NAME_INDEPENDENT_DECL_P(NODE) \
+ TREE_LANG_FLAG_6 (OVERLOAD_CHECK (NODE))
/* The first decl of an overload. */
#define OVL_FIRST(NODE) ovl_first (NODE)
@@ -1478,10 +1481,6 @@ enum cp_lambda_default_capture_mode_type {
#define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \
LAMBDA_EXPR_THIS_CAPTURE(NODE)
-/* Predicate tracking whether the lambda was declared 'mutable'. */
-#define LAMBDA_EXPR_MUTABLE_P(NODE) \
- TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE))
-
/* True iff uses of a const variable capture were optimized away. */
#define LAMBDA_EXPR_CAPTURE_OPTIMIZED(NODE) \
TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE))
@@ -2947,8 +2946,9 @@ struct GTY(()) lang_decl_fn {
unsigned maybe_deleted : 1;
unsigned coroutine_p : 1;
unsigned implicit_constexpr : 1;
+ unsigned escalated_p : 1;
- unsigned spare : 9;
+ unsigned spare : 8;
/* 32-bits padding on 64-bit host. */
@@ -3400,6 +3400,14 @@ struct GTY(()) lang_decl {
#define DECL_MAYBE_DELETED(NODE) \
(LANG_DECL_FN_CHECK (NODE)->maybe_deleted)
+/* Nonzero for FUNCTION_DECL means that this function's body has been
+ checked for immediate-escalating expressions and maybe promoted. It
+ does *not* mean the function is consteval. It must not be set in
+ a function that was marked consteval by the user, so that we can
+ distinguish between explicitly consteval functions and promoted consteval
+ functions. */
+#define DECL_ESCALATION_CHECKED_P(NODE) (LANG_DECL_FN_CHECK (NODE)->escalated_p)
+
/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an
invalid overrider for a function from a base class. Once we have
complained about an invalid overrider we avoid complaining about it
@@ -3800,6 +3808,12 @@ struct GTY(()) lang_decl {
: TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE))
#endif
+/* True iff NODE represents the template args for a type-constraint,
+ in which case the first one represents the constrained type.
+ Currently only set during mangling. */
+#define TEMPLATE_ARGS_TYPE_CONSTRAINT_P(NODE) \
+ TREE_PRIVATE (TREE_VEC_CHECK (NODE))
+
/* The list of access checks that were deferred during parsing
which need to be performed at template instantiation time.
@@ -5877,7 +5891,8 @@ extern GTY(()) vec<tree, va_gc> *keyed_classes;
/* Nonzero if we're done parsing and into end-of-file activities.
- Two if we're done with front-end processing. */
+ 2 if all templates have been instantiated.
+ 3 if we're done with front-end processing. */
extern int at_eof;
@@ -6769,6 +6784,7 @@ extern tree perform_direct_initialization_if_possible (tree, tree, bool,
extern vec<tree,va_gc> *resolve_args (vec<tree,va_gc>*, tsubst_flags_t);
extern tree in_charge_arg_for_name (tree);
extern bool in_immediate_context ();
+extern bool immediate_invocation_p (tree);
extern tree build_cxx_call (tree, int, tree *,
tsubst_flags_t,
tree = NULL_TREE);
@@ -6979,6 +6995,7 @@ extern tree lookup_enumerator (tree, tree);
extern bool start_preparsed_function (tree, tree, int);
extern bool start_function (cp_decl_specifier_seq *,
const cp_declarator *, tree);
+extern tree maybe_prepare_return_this (tree);
extern void maybe_return_this (void);
extern tree begin_function_body (void);
extern void finish_function_body (tree);
@@ -7371,7 +7388,7 @@ extern bool maybe_clone_body (tree);
/* In parser.cc */
extern tree cp_convert_range_for (tree, tree, tree, cp_decomp *, bool,
- unsigned short, bool);
+ tree, bool);
extern void cp_convert_omp_range_for (tree &, tree &, tree &,
tree &, tree &, tree &, tree &, tree &);
extern void cp_finish_omp_range_for (tree, tree);
@@ -7692,19 +7709,16 @@ extern void begin_else_clause (tree);
extern void finish_else_clause (tree);
extern void finish_if_stmt (tree);
extern tree begin_while_stmt (void);
-extern void finish_while_stmt_cond (tree, tree, bool, unsigned short,
- bool);
+extern void finish_while_stmt_cond (tree, tree, bool, tree, bool);
extern void finish_while_stmt (tree);
extern tree begin_do_stmt (void);
extern void finish_do_body (tree);
-extern void finish_do_stmt (tree, tree, bool, unsigned short,
- bool);
+extern void finish_do_stmt (tree, tree, bool, tree, bool);
extern tree finish_return_stmt (tree);
extern tree begin_for_scope (tree *);
extern tree begin_for_stmt (tree, tree);
extern void finish_init_stmt (tree);
-extern void finish_for_cond (tree, tree, bool, unsigned short,
- bool);
+extern void finish_for_cond (tree, tree, bool, tree, bool);
extern void finish_for_expr (tree, tree);
extern void finish_for_stmt (tree);
extern tree begin_range_for_stmt (tree, tree);
@@ -7863,7 +7877,7 @@ extern tree lambda_capture_field_type (tree, bool, bool);
extern tree lambda_proxy_type (tree);
extern tree lambda_function (tree);
extern void apply_deduced_return_type (tree, tree);
-extern tree add_capture (tree, tree, tree, bool, bool);
+extern tree add_capture (tree, tree, tree, bool, bool, unsigned *);
extern tree add_default_capture (tree, tree, tree);
extern void insert_capture_proxy (tree);
extern void insert_pending_capture_proxies (void);
@@ -7904,6 +7918,7 @@ extern tree most_general_lambda (tree);
extern tree finish_omp_target (location_t, tree, tree, bool);
extern void finish_omp_target_clauses (location_t, tree, tree *);
extern void maybe_warn_unparenthesized_assignment (tree, tsubst_flags_t);
+extern tree cp_check_pragma_unroll (location_t, tree);
/* in tree.cc */
extern int cp_tree_operand_length (const_tree);
@@ -8028,7 +8043,8 @@ extern tree maybe_dummy_object (tree, tree *);
extern bool is_dummy_object (const_tree);
extern bool is_byte_access_type (tree);
extern bool is_byte_access_type_not_plain_char (tree);
-extern const struct attribute_spec cxx_attribute_table[];
+extern const struct scoped_attribute_specs cxx_gnu_attribute_table;
+extern const struct scoped_attribute_specs std_attribute_table;
extern tree make_ptrmem_cst (tree, tree);
extern tree cp_build_type_attribute_variant (tree, tree);
extern tree cp_build_reference_type (tree, bool);
@@ -8411,7 +8427,9 @@ extern tree process_stmt_assume_attribute (tree, tree, location_t);
extern bool simple_empty_class_p (tree, tree, tree_code);
extern tree fold_builtin_source_location (const_tree);
extern tree get_source_location_impl_type ();
-extern bool cp_fold_immediate (tree *, mce_value);
+extern tree cp_fold_immediate (tree *, mce_value,
+ tree = current_function_decl);
+extern void process_and_check_pending_immediate_escalating_fns ();
/* in name-lookup.cc */
extern tree strip_using_decl (tree);
@@ -8509,6 +8527,7 @@ struct processing_constraint_expression_sentinel
extern bool processing_constraint_expression_p ();
extern tree unpack_concept_check (tree);
+extern tree get_concept_check_template (tree);
extern tree evaluate_concept_check (tree);
extern bool constraints_satisfied_p (tree, tree = NULL_TREE);
extern bool* lookup_subsumption_result (tree, tree);
@@ -8929,6 +8948,18 @@ extended_float_type_p (tree type)
return false;
}
+/* True if DECL is name-independent declaration. */
+
+inline bool
+name_independent_decl_p (tree decl)
+{
+ return ((VAR_P (decl) || TREE_CODE (decl) == FIELD_DECL)
+ && DECL_NAME (decl)
+ && id_equal (DECL_NAME (decl), "_")
+ && !TREE_STATIC (decl)
+ && !DECL_EXTERNAL (decl));
+}
+
#if CHECKING_P
namespace selftest {
extern void run_cp_tests (void);
diff --git a/gcc/cp/decl.cc b/gcc/cp/decl.cc
index e269f68..4b68527 100644
--- a/gcc/cp/decl.cc
+++ b/gcc/cp/decl.cc
@@ -680,6 +680,8 @@ poplevel (int keep, int reverse, int functionbody)
subobjects. */
&& (DECL_DECOMPOSITION_P (decl) ? !DECL_DECOMP_BASE (decl)
: (DECL_NAME (decl) && !DECL_ARTIFICIAL (decl)))
+ /* Don't warn about name-independent declarations. */
+ && !name_independent_decl_p (decl)
&& type != error_mark_node
&& (!CLASS_TYPE_P (type)
|| !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)
@@ -2063,6 +2065,44 @@ duplicate_decls (tree newdecl, tree olddecl, bool hiding, bool was_hidden)
(DECL_INITIAL (olddecl) && namespace_bindings_p ())
? G_("%q#D previously defined here")
: G_("%q#D previously declared here"), olddecl);
+ if (cxx_dialect >= cxx26
+ && DECL_NAME (newdecl)
+ && id_equal (DECL_NAME (newdecl), "_")
+ && !name_independent_decl_p (newdecl))
+ {
+ if (TREE_CODE (newdecl) == PARM_DECL)
+ inform (newdecl_loc,
+ "parameter declaration is not name-independent");
+ else if (DECL_DECOMPOSITION_P (newdecl))
+ {
+ if (at_namespace_scope_p ())
+ inform (newdecl_loc,
+ "structured binding at namespace scope is not "
+ "name-independent");
+ else if (TREE_STATIC (newdecl))
+ inform (newdecl_loc,
+ "static structured binding is not "
+ "name-independent");
+ else if (DECL_EXTERNAL (newdecl))
+ inform (newdecl_loc,
+ "extern structured binding is not "
+ "name-independent");
+ }
+ else if (at_class_scope_p ()
+ && VAR_P (newdecl)
+ && TREE_STATIC (newdecl))
+ inform (newdecl_loc,
+ "static data member is not name-independent");
+ else if (VAR_P (newdecl) && at_namespace_scope_p ())
+ inform (newdecl_loc,
+ "variable at namespace scope is not name-independent");
+ else if (VAR_P (newdecl) && TREE_STATIC (newdecl))
+ inform (newdecl_loc,
+ "static variable is not name-independent");
+ else if (VAR_P (newdecl) && DECL_EXTERNAL (newdecl))
+ inform (newdecl_loc,
+ "extern variable is not name-independent");
+ }
return error_mark_node;
}
else if (TREE_CODE (olddecl) == FUNCTION_DECL
@@ -6869,8 +6909,17 @@ reshape_init_class (tree type, reshape_iter *d, bool first_initializer_p,
if (!field || TREE_CODE (field) != FIELD_DECL)
{
if (complain & tf_error)
- error ("%qT has no non-static data member named %qD", type,
- d->cur->index);
+ {
+ if (field && TREE_CODE (field) == TREE_LIST)
+ {
+ error ("request for member %qD is ambiguous",
+ d->cur->index);
+ print_candidates (field);
+ }
+ else
+ error ("%qT has no non-static data member named %qD", type,
+ d->cur->index);
+ }
return error_mark_node;
}
@@ -16913,6 +16962,12 @@ finish_enum_value_list (tree enumtype)
/* If -fstrict-enums, still constrain TYPE_MIN/MAX_VALUE. */
if (flag_strict_enums)
set_min_and_max_values_for_integral_type (enumtype, precision, sgn);
+
+ if (use_short_enum)
+ {
+ TYPE_PACKED (enumtype) = use_short_enum;
+ fixup_attribute_variants (enumtype);
+ }
}
else
underlying_type = ENUM_UNDERLYING_TYPE (enumtype);
@@ -17926,16 +17981,31 @@ store_parm_decls (tree current_function_parms)
}
+/* Mark CDTOR's implicit THIS argument for returning, if required by
+ the ABI.. Return the decl for THIS, if it is to be returned, and
+ NULL otherwise. */
+
+tree
+maybe_prepare_return_this (tree cdtor)
+{
+ if (targetm.cxx.cdtor_returns_this ())
+ if (tree val = DECL_ARGUMENTS (cdtor))
+ {
+ suppress_warning (val, OPT_Wuse_after_free);
+ return val;
+ }
+
+ return NULL_TREE;
+}
+
/* Set the return value of the [cd]tor if the ABI wants that. */
void
-maybe_return_this (void)
+maybe_return_this ()
{
- if (targetm.cxx.cdtor_returns_this ())
+ if (tree val = maybe_prepare_return_this (current_function_decl))
{
/* Return the address of the object. */
- tree val = DECL_ARGUMENTS (current_function_decl);
- suppress_warning (val, OPT_Wuse_after_free);
val = fold_convert (TREE_TYPE (DECL_RESULT (current_function_decl)), val);
val = build2 (MODIFY_EXPR, TREE_TYPE (val),
DECL_RESULT (current_function_decl), val);
diff --git a/gcc/cp/decl2.cc b/gcc/cp/decl2.cc
index 9e666e5..bee8487 100644
--- a/gcc/cp/decl2.cc
+++ b/gcc/cp/decl2.cc
@@ -169,7 +169,9 @@ typedef hash_map<unsigned/*Priority*/, tree/*List*/,
one for init. The fini table is only ever used when !cxa_atexit. */
static GTY(()) priority_map_t *static_init_fini_fns[2];
-/* Nonzero if we're done parsing and into end-of-file activities. */
+/* Nonzero if we're done parsing and into end-of-file activities.
+ 2 if all templates have been instantiated.
+ 3 if we're done with front-end processing. */
int at_eof;
@@ -4987,6 +4989,7 @@ c_parse_final_cleanups (void)
tree decl;
locus_at_end_of_parsing = input_location;
+ /* We're done parsing. */
at_eof = 1;
/* Bad parse errors. Just forget about it. */
@@ -5252,6 +5255,9 @@ c_parse_final_cleanups (void)
reconsider = true;
}
+ /* All templates have been instantiated. */
+ at_eof = 2;
+
void *module_cookie = finish_module_processing (parse_in);
lower_var_init ();
@@ -5294,7 +5300,11 @@ c_parse_final_cleanups (void)
if (static_init_fini_fns[true])
for (auto iter : *static_init_fini_fns[true])
iter.second = nreverse (iter.second);
-
+
+ /* Now we've instantiated all templates. Now we can escalate the functions
+ we squirreled away earlier. */
+ process_and_check_pending_immediate_escalating_fns ();
+
/* Then, do the Objective-C stuff. This is where all the
Objective-C module stuff gets generated (symtab,
class/protocol/selector lists etc). This must be done after C++
@@ -5376,7 +5386,7 @@ c_parse_final_cleanups (void)
timevar_start (TV_PHASE_PARSING);
/* Indicate that we're done with front end processing. */
- at_eof = 2;
+ at_eof = 3;
}
/* Perform any post compilation-proper cleanups for the C++ front-end.
diff --git a/gcc/cp/error.cc b/gcc/cp/error.cc
index 785909c..3b1b5de 100644
--- a/gcc/cp/error.cc
+++ b/gcc/cp/error.cc
@@ -478,7 +478,7 @@ dump_template_bindings (cxx_pretty_printer *pp, tree parms, tree args,
/* Don't try to do this once cgraph starts throwing away front-end
information. */
- if (at_eof >= 2)
+ if (at_eof >= 3)
return;
FOR_EACH_VEC_SAFE_ELT (typenames, i, t)
diff --git a/gcc/cp/lambda.cc b/gcc/cp/lambda.cc
index 34d0190..5990a6de 100644
--- a/gcc/cp/lambda.cc
+++ b/gcc/cp/lambda.cc
@@ -44,7 +44,6 @@ build_lambda_expr (void)
LAMBDA_EXPR_THIS_CAPTURE (lambda) = NULL_TREE;
LAMBDA_EXPR_REGEN_INFO (lambda) = NULL_TREE;
LAMBDA_EXPR_PENDING_PROXIES (lambda) = NULL;
- LAMBDA_EXPR_MUTABLE_P (lambda) = false;
return lambda;
}
@@ -412,7 +411,11 @@ build_capture_proxy (tree member, tree init)
object = TREE_OPERAND (object, 0);
/* Remove the __ inserted by add_capture. */
- name = get_identifier (IDENTIFIER_POINTER (DECL_NAME (member)) + 2);
+ if (IDENTIFIER_POINTER (DECL_NAME (member))[2] == '_'
+ && IDENTIFIER_POINTER (DECL_NAME (member))[3] == '.')
+ name = get_identifier ("_");
+ else
+ name = get_identifier (IDENTIFIER_POINTER (DECL_NAME (member)) + 2);
type = lambda_proxy_type (object);
@@ -516,7 +519,7 @@ vla_capture_type (tree array_type)
tree
add_capture (tree lambda, tree id, tree orig_init, bool by_reference_p,
- bool explicit_init_p)
+ bool explicit_init_p, unsigned *name_independent_cnt)
{
char *buf;
tree type, member, name;
@@ -610,11 +613,28 @@ add_capture (tree lambda, tree id, tree orig_init, bool by_reference_p,
won't find the field with name lookup. We can't just leave the name
unset because template instantiation uses the name to find
instantiated fields. */
- buf = (char *) alloca (IDENTIFIER_LENGTH (id) + 3);
- buf[1] = buf[0] = '_';
- memcpy (buf + 2, IDENTIFIER_POINTER (id),
- IDENTIFIER_LENGTH (id) + 1);
- name = get_identifier (buf);
+ if (id_equal (id, "_") && name_independent_cnt)
+ {
+ if (*name_independent_cnt == 0)
+ name = get_identifier ("___");
+ else
+ {
+ /* For 2nd and later name-independent capture use
+ unique names. */
+ char buf2[5 + (HOST_BITS_PER_INT + 2) / 3];
+ sprintf (buf2, "___.%u", *name_independent_cnt);
+ name = get_identifier (buf2);
+ }
+ name_independent_cnt[0]++;
+ }
+ else
+ {
+ buf = XALLOCAVEC (char, IDENTIFIER_LENGTH (id) + 3);
+ buf[1] = buf[0] = '_';
+ memcpy (buf + 2, IDENTIFIER_POINTER (id),
+ IDENTIFIER_LENGTH (id) + 1);
+ name = get_identifier (buf);
+ }
if (variadic)
{
@@ -718,7 +738,7 @@ add_default_capture (tree lambda_stack, tree id, tree initializer)
(this_capture_p
|| (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda)
== CPLD_REFERENCE)),
- /*explicit_init_p=*/false);
+ /*explicit_init_p=*/false, NULL);
initializer = convert_from_reference (var);
/* Warn about deprecated implicit capture of this via [=]. */
diff --git a/gcc/cp/mangle.cc b/gcc/cp/mangle.cc
index 5137305..0684f0e 100644
--- a/gcc/cp/mangle.cc
+++ b/gcc/cp/mangle.cc
@@ -221,7 +221,7 @@ static void write_function_type (const tree);
static void write_bare_function_type (const tree, const int, const tree);
static void write_method_parms (tree, const int, const tree);
static void write_class_enum_type (const tree);
-static void write_template_args (tree);
+static void write_template_args (tree, tree = NULL_TREE);
static void write_expression (tree);
static void write_template_arg_literal (const tree);
static void write_template_arg (tree);
@@ -842,6 +842,70 @@ mangle_return_type_p (tree decl)
&& maybe_template_info (decl));
}
+/* <constraint-expression> ::= <expression> */
+
+static void
+write_constraint_expression (tree expr)
+{
+ write_expression (expr);
+}
+
+/* Mangle a requires-clause following a template-head, if any.
+
+ Q <constraint_expression> E */
+
+static void
+write_tparms_constraints (tree constraints)
+{
+ /* In a declaration with shorthand constraints in the template-head, followed
+ by a requires-clause, followed by shorthand constraints in the
+ function-parameter-list, the full constraints will be some && with the
+ parameter constraints on the RHS, around an && with the requires-clause on
+ the RHS. Find the requires-clause, if any.
+
+ This logic relies on the && and ... from combine_constraint_expressions,
+ finish_shorthand_constraint, and convert_generic_types_to_packs having
+ UNKNOWN_LOCATION. If they need to have an actual location, we could move
+ to using a TREE_LANG_FLAG. */
+ if (constraints && abi_check (19))
+ {
+ tree probe = constraints;
+ while (probe
+ && !EXPR_LOCATION (probe)
+ && TREE_CODE (probe) == TRUTH_ANDIF_EXPR)
+ {
+ tree op1 = TREE_OPERAND (probe, 1);
+ probe = (EXPR_LOCATION (op1) ? op1
+ : TREE_OPERAND (probe, 0));
+ }
+ if (probe && EXPR_LOCATION (probe))
+ {
+ write_char ('Q');
+ write_constraint_expression (probe);
+ }
+ }
+}
+
+/* <type-constraint> ::= <name> */
+
+static void
+write_type_constraint (tree cnst)
+{
+ if (!cnst) return;
+
+ cnst = unpack_concept_check (cnst);
+ gcc_checking_assert (TREE_CODE (cnst) == TEMPLATE_ID_EXPR);
+
+ tree concept_decl = get_concept_check_template (cnst);
+ write_name (concept_decl, 0);
+ tree args = TREE_OPERAND (cnst, 1);
+ if (TREE_VEC_LENGTH (args) > 1)
+ {
+ TEMPLATE_ARGS_TYPE_CONSTRAINT_P (args) = true;
+ write_template_args (args);
+ }
+}
+
/* <encoding> ::= <function name> <bare-function-type>
::= <data name> */
@@ -886,6 +950,14 @@ write_encoding (const tree decl)
mangle_return_type_p (decl),
d);
+ if (tree c = get_trailing_function_requirements (decl))
+ if (abi_check (19))
+ {
+ ++G.parm_depth;
+ write_char ('Q');
+ write_constraint_expression (c);
+ --G.parm_depth;
+ }
}
}
@@ -1037,7 +1109,13 @@ write_name (tree decl, const int ignore_local_scope)
{
/* Yes: use <unscoped-template-name>. */
write_unscoped_template_name (TI_TEMPLATE (info));
- write_template_args (TI_ARGS (info));
+ /* Pass down the parms of a function template in case we need to
+ mangle them; we don't mangle the parms of a non-overloadable
+ template. */
+ tree parms = (TREE_CODE (decl) == FUNCTION_DECL
+ ? DECL_TEMPLATE_PARMS (TI_TEMPLATE (info))
+ : NULL_TREE);
+ write_template_args (TI_ARGS (info), parms);
}
else
/* Everything else gets an <unqualified-name>. */
@@ -1722,10 +1800,136 @@ write_unnamed_type_name (const tree type)
write_compact_number (discriminator);
}
+/* ABI issue #47: if a function template parameter is not "natural" for its
+ argument we must mangle the parameter. */
+
+static bool
+template_parm_natural_p (tree arg, tree parm)
+{
+ tree decl = TREE_VALUE (parm);
+
+ /* A template parameter is "natural" if: */
+
+ if (template_parameter_pack_p (decl))
+ {
+ tree args = ARGUMENT_PACK_ARGS (arg);
+ if (TREE_VEC_LENGTH (args) == 0)
+ {
+#if 0
+ /* the argument is an empty pack and the parameter is an
+ unconstrained template type parameter pack; */
+ if (TREE_CODE (decl) != TYPE_DECL)
+ return false;
+#else
+ /* Defer changing the mangling of C++11 code like
+ template <int i> int max();
+ template <int i, int j, int... rest> int max(); */
+ return true;
+#endif
+ }
+ else
+ /* the argument is a non-empty pack and a non-pack variant of the
+ parameter would be natural for the first element of the pack; */
+ arg = TREE_VEC_ELT (args, 0);
+ }
+
+ /* the argument is a template and the parameter has the exact
+ same template head; */
+ if (TREE_CODE (decl) == TEMPLATE_DECL)
+ return template_heads_equivalent_p (arg, decl);
+
+ /* the argument is a type and the parameter is unconstrained; or */
+ else if (TREE_CODE (decl) == TYPE_DECL)
+ return !TEMPLATE_PARM_CONSTRAINTS (parm);
+
+ /* the argument is a non-type template argument and the declared parameter
+ type neither is instantiation dependent nor contains deduced types. */
+ else if (TREE_CODE (decl) == PARM_DECL)
+ {
+#if 0
+ return !uses_template_parms (TREE_TYPE (decl));
+#else
+ /* Defer changing the mangling of C++98 code like
+ template <class T, T V> .... */
+ return !type_uses_auto (TREE_TYPE (decl));
+#endif
+ }
+
+ gcc_unreachable ();
+}
+
+/* Used for lambda template head and non-natural function template parameters.
+
+ <template-param-decl> ::= Ty # template type parameter
+ ::= Tk <type-constraint> # constrained type parameter
+ ::= Tn <type> # template non-type parameter
+ ::= Tt <template-param-decl>* [Q <constraint-expression] E # ttp
+ ::= Tp <non-pack template-param-decl> # template parameter pack */
+
+static void
+write_template_param_decl (tree parm)
+{
+ tree decl = TREE_VALUE (parm);
+
+ if (template_parameter_pack_p (decl))
+ write_string ("Tp");
+
+ switch (TREE_CODE (decl))
+ {
+ case PARM_DECL:
+ {
+ write_string ("Tn");
+
+ tree type = TREE_TYPE (decl);
+ if (tree c = (is_auto (type)
+ ? PLACEHOLDER_TYPE_CONSTRAINTS (type)
+ : NULL_TREE))
+ {
+ if (AUTO_IS_DECLTYPE (type))
+ write_string ("DK");
+ else
+ write_string ("Dk");
+ write_type_constraint (c);
+ }
+ else
+ write_type (type);
+ }
+ break;
+
+ case TEMPLATE_DECL:
+ {
+ write_string ("Tt");
+ tree parms = DECL_INNERMOST_TEMPLATE_PARMS (decl);
+ for (tree node : tree_vec_range (parms))
+ write_template_param_decl (node);
+ write_char ('E');
+ }
+ break;
+
+ case TYPE_DECL:
+ if (tree c = TEMPLATE_PARM_CONSTRAINTS (parm))
+ {
+ if (TREE_CODE (c) == UNARY_LEFT_FOLD_EXPR)
+ {
+ c = FOLD_EXPR_PACK (c);
+ c = PACK_EXPANSION_PATTERN (c);
+ }
+ if (TREE_CODE (decl) == TYPE_DECL)
+ {
+ write_string ("Tk");
+ write_type_constraint (c);
+ }
+ }
+ else
+ write_string ("Ty");
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
// A template head, for templated lambdas.
-// <template-head> ::= Tp* Ty
-// Tp* Tn <type>
-// Tp* Tt <template-head> E
// New in ABI=18. Returns true iff we emitted anything -- used for ABI
// version warning.
@@ -1735,50 +1939,26 @@ write_closure_template_head (tree tmpl)
bool any = false;
// We only need one level of template parms
- tree inner = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl));
+ tree parms = DECL_TEMPLATE_PARMS (tmpl);
+ tree inner = INNERMOST_TEMPLATE_PARMS (parms);
for (int ix = 0, len = TREE_VEC_LENGTH (inner); ix != len; ix++)
{
tree parm = TREE_VEC_ELT (inner, ix);
if (parm == error_mark_node)
continue;
- parm = TREE_VALUE (parm);
- if (DECL_IMPLICIT_TEMPLATE_PARM_P (parm))
+ if (DECL_IMPLICIT_TEMPLATE_PARM_P (TREE_VALUE (parm)))
// A synthetic parm, we're done.
break;
any = true;
if (abi_version_at_least (18))
- {
- if (TREE_CODE (parm) == PARM_DECL
- ? TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))
- : TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm)))
- write_string ("Tp");
-
- switch (TREE_CODE (parm))
- {
- default:
- gcc_unreachable ();
-
- case TYPE_DECL:
- write_string ("Ty");
- break;
-
- case PARM_DECL:
- write_string ("Tn");
- write_type (TREE_TYPE (parm));
- break;
-
- case TEMPLATE_DECL:
- write_string ("Tt");
- write_closure_template_head (parm);
- write_string ("E");
- break;
- }
- }
+ write_template_param_decl (parm);
}
+ write_tparms_constraints (TEMPLATE_PARMS_CONSTRAINTS (parms));
+
return any;
}
@@ -2359,6 +2539,16 @@ write_type (tree type)
case TEMPLATE_TYPE_PARM:
if (is_auto (type))
{
+ if (template_placeholder_p (type)
+ && abi_check (19))
+ {
+ /* ABI #109: placeholder is mangled as its template. */
+ type = CLASS_PLACEHOLDER_TEMPLATE (type);
+ if (find_substitution (type))
+ return;
+ write_name (type, 0);
+ break;
+ }
if (AUTO_IS_DECLTYPE (type))
write_identifier ("Dc");
else
@@ -2893,13 +3083,84 @@ write_class_enum_type (const tree type)
write_name (TYPE_NAME (type), /*ignore_local_scope=*/0);
}
+/* Mangle a requirement REQ in a requires-expression. */
+
+static void
+write_requirement (tree req)
+{
+ tree op = TREE_OPERAND (req, 0);
+
+ switch (tree_code code = TREE_CODE (req))
+ {
+ /* # simple-requirement or compound-requirement
+ <requirement> ::= X <expression> [ N ] [ R <type-constraint> ] */
+ case SIMPLE_REQ:
+ case COMPOUND_REQ:
+ write_char ('X');
+ write_expression (op);
+ if (code == SIMPLE_REQ)
+ break;
+ if (COMPOUND_REQ_NOEXCEPT_P (req))
+ write_char ('N');
+ if (tree constr = TREE_OPERAND (req, 1))
+ {
+ write_char ('R');
+ write_type_constraint (PLACEHOLDER_TYPE_CONSTRAINTS (constr));
+ }
+ break;
+
+ /* <requirement> ::= T <type> # type-requirement */
+ case TYPE_REQ:
+ write_char ('T');
+ write_type (op);
+ break;
+
+ /* <requirement> ::= Q <constraint-expression> # nested-requirement */
+ case NESTED_REQ:
+ write_char ('Q');
+ write_constraint_expression (op);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* # requires { ... }
+ <expression> ::= rq <requirement>+ E
+ # requires (...) { ... }
+ <expression> ::= rQ <bare-function-type> _ <requirement>+ E */
+
+static void
+write_requires_expr (tree expr)
+{
+ tree parms = REQUIRES_EXPR_PARMS (expr);
+ if (parms)
+ {
+ write_string ("rQ");
+ ++G.parm_depth;
+ for (; parms; parms = DECL_CHAIN (parms))
+ write_type (cv_unqualified (TREE_TYPE (parms)));
+ --G.parm_depth;
+ write_char ('_');
+ }
+ else
+ write_string ("rq");
+
+ for (tree reqs = REQUIRES_EXPR_REQS (expr); reqs;
+ reqs = TREE_CHAIN (reqs))
+ write_requirement (TREE_VALUE (reqs));
+
+ write_char ('E');
+}
+
/* Non-terminal <template-args>. ARGS is a TREE_VEC of template
arguments.
- <template-args> ::= I <template-arg>* E */
+ <template-args> ::= I <template-arg>* [Q <constraint-expr>] E */
static void
-write_template_args (tree args)
+write_template_args (tree args, tree parms /*= NULL_TREE*/)
{
int i;
int length = 0;
@@ -2911,6 +3172,13 @@ write_template_args (tree args)
if (args)
length = TREE_VEC_LENGTH (args);
+ tree constraints = NULL_TREE;
+ if (parms)
+ {
+ constraints = TEMPLATE_PARMS_CONSTRAINTS (parms);
+ parms = INNERMOST_TEMPLATE_PARMS (parms);
+ }
+
if (args && length && TREE_CODE (TREE_VEC_ELT (args, 0)) == TREE_VEC)
{
/* We have nested template args. We want the innermost template
@@ -2918,8 +3186,38 @@ write_template_args (tree args)
args = TREE_VEC_ELT (args, length - 1);
length = TREE_VEC_LENGTH (args);
}
- for (i = 0; i < length; ++i)
- write_template_arg (TREE_VEC_ELT (args, i));
+ if (TEMPLATE_ARGS_TYPE_CONSTRAINT_P (args))
+ /* Skip the constrained type. */
+ i = 1;
+ else
+ i = 0;
+ bool implicit_parm_scope = false;
+ for (; i < length; ++i)
+ {
+ tree arg = TREE_VEC_ELT (args, i);
+ if (parms)
+ {
+ tree parm = TREE_VEC_ELT (parms, i);
+ tree decl = TREE_VALUE (parm);
+ if (DECL_IMPLICIT_TEMPLATE_PARM_P (decl)
+ && !implicit_parm_scope)
+ {
+ /* The rest of the template parameters are based on generic
+ function parameters, so any expressions in their
+ type-constraints are in parameter scope. */
+ implicit_parm_scope = true;
+ ++G.parm_depth;
+ }
+ if (!template_parm_natural_p (arg, parm)
+ && abi_check (19))
+ write_template_param_decl (parm);
+ }
+ write_template_arg (arg);
+ }
+ if (implicit_parm_scope)
+ --G.parm_depth;
+
+ write_tparms_constraints (constraints);
write_char ('E');
}
@@ -3107,6 +3405,7 @@ write_expression (tree expr)
write_char ('f');
if (delta != 0)
{
+ gcc_checking_assert (delta > 0);
if (abi_check (5))
{
/* Let L be the number of function prototype scopes from the
@@ -3431,6 +3730,8 @@ write_expression (tree expr)
write_type (LAMBDA_EXPR_CLOSURE (expr));
write_char ('E');
}
+ else if (code == REQUIRES_EXPR)
+ write_requires_expr (expr);
else if (dependent_name (expr))
{
tree name = dependent_name (expr);
diff --git a/gcc/cp/module.cc b/gcc/cp/module.cc
index 33fcf39..1b57fbe 100644
--- a/gcc/cp/module.cc
+++ b/gcc/cp/module.cc
@@ -5683,6 +5683,8 @@ trees_out::lang_decl_bools (tree t)
WB (lang->u.fn.has_dependent_explicit_spec_p);
WB (lang->u.fn.immediate_fn_p);
WB (lang->u.fn.maybe_deleted);
+ WB (lang->u.fn.escalated_p);
+ /* We do not stream lang->u.fn.implicit_constexpr. */
goto lds_min;
case lds_decomp: /* lang_decl_decomp. */
@@ -5751,6 +5753,8 @@ trees_in::lang_decl_bools (tree t)
RB (lang->u.fn.has_dependent_explicit_spec_p);
RB (lang->u.fn.immediate_fn_p);
RB (lang->u.fn.maybe_deleted);
+ RB (lang->u.fn.escalated_p);
+ /* We do not stream lang->u.fn.implicit_constexpr. */
goto lds_min;
case lds_decomp: /* lang_decl_decomp. */
diff --git a/gcc/cp/name-lookup.cc b/gcc/cp/name-lookup.cc
index d19ea5d..76f1d44 100644
--- a/gcc/cp/name-lookup.cc
+++ b/gcc/cp/name-lookup.cc
@@ -511,10 +511,11 @@ private:
void preserve_state ();
void restore_state ();
-private:
+public:
static tree ambiguous (tree thing, tree current);
- void add_overload (tree fns);
void add_value (tree new_val);
+private:
+ void add_overload (tree fns);
void add_type (tree new_type);
bool process_binding (tree val_bind, tree type_bind);
unsigned process_module_binding (tree val_bind, tree type_bind, unsigned);
@@ -1806,6 +1807,71 @@ fields_linear_search (tree klass, tree name, bool want_type)
return NULL_TREE;
}
+/* Like fields_linear_search, but specific for "_" name. There can be multiple
+ name-independent non-static data members and in that case a TREE_LIST with the
+ ambiguous decls should be returned. */
+
+static tree
+name_independent_linear_search (tree val, tree klass, tree name)
+{
+ for (tree fields = TYPE_FIELDS (klass); fields; fields = DECL_CHAIN (fields))
+ {
+ tree decl = fields;
+
+ if (TREE_CODE (decl) == FIELD_DECL
+ && ANON_AGGR_TYPE_P (TREE_TYPE (decl)))
+ {
+ if (tree temp = search_anon_aggr (TREE_TYPE (decl), name, false))
+ {
+ decl = temp;
+ goto add;
+ }
+ }
+
+ if (DECL_NAME (decl) != name)
+ continue;
+
+ if (TREE_CODE (decl) == USING_DECL)
+ {
+ decl = strip_using_decl (decl);
+ if (is_overloaded_fn (decl))
+ continue;
+ }
+
+ if (DECL_DECLARES_FUNCTION_P (decl))
+ /* Functions are found separately. */
+ continue;
+
+ add:
+ if (val == NULL_TREE)
+ val = decl;
+ else
+ {
+ if (TREE_CODE (val) != TREE_LIST)
+ {
+ if (TREE_CODE (val) == OVERLOAD
+ && OVL_DEDUP_P (val)
+ && TREE_CODE (decl) == USING_DECL)
+ {
+ val = ovl_make (decl, val);
+ continue;
+ }
+ val = tree_cons (NULL_TREE, val, NULL_TREE);
+ TREE_TYPE (val) = error_mark_node;
+ }
+ if (TREE_CODE (decl) == TREE_LIST)
+ val = chainon (decl, val);
+ else
+ {
+ val = tree_cons (NULL_TREE, decl, val);
+ TREE_TYPE (val) = error_mark_node;
+ }
+ }
+ }
+
+ return val;
+}
+
/* Look for NAME member inside of anonymous aggregate ANON. Although
such things should only contain FIELD_DECLs, we check that too
late, and would give very confusing errors if we weren't
@@ -1843,6 +1909,50 @@ get_class_binding_direct (tree klass, tree name, bool want_type)
val = member_vec_binary_search (member_vec, lookup);
if (!val)
;
+ else if (TREE_CODE (val) == OVERLOAD
+ && OVL_NAME_INDEPENDENT_DECL_P (val))
+ {
+ if (want_type)
+ {
+ while (TREE_CODE (val) == OVERLOAD
+ && OVL_NAME_INDEPENDENT_DECL_P (val))
+ val = OVL_CHAIN (val);
+ if (STAT_HACK_P (val))
+ val = STAT_TYPE (val);
+ else if (!DECL_DECLARES_TYPE_P (val))
+ val = NULL_TREE;
+ }
+ else
+ {
+ /* OVERLOAD with a special OVL_NAME_INDEPENDENT_DECL_P
+ flag is used under the hood to represent lookup
+ results which include name-independent declarations,
+ and get_class_binding_direct is turning that into
+ TREE_LIST representation (which the callers expect for
+ ambiguous lookups) instead.
+ There are 2 reasons for that:
+ 1) in order to keep the member_vec binary search fast, I
+ think it is better to keep OVL_NAME usable on all elements
+ because having to special case TREE_LIST would slow
+ everything down;
+ 2) the callers need to be able to chain the results anyway
+ and so need an unshared TREE_LIST they can tweak/destroy. */
+ tree ovl = val;
+ val = NULL_TREE;
+ while (TREE_CODE (ovl) == OVERLOAD
+ && OVL_NAME_INDEPENDENT_DECL_P (ovl))
+ {
+ val = tree_cons (NULL_TREE, OVL_FUNCTION (ovl), val);
+ TREE_TYPE (val) = error_mark_node;
+ ovl = OVL_CHAIN (ovl);
+ }
+ if (STAT_HACK_P (ovl))
+ val = tree_cons (NULL_TREE, STAT_DECL (ovl), val);
+ else
+ val = tree_cons (NULL_TREE, ovl, val);
+ TREE_TYPE (val) = error_mark_node;
+ }
+ }
else if (STAT_HACK_P (val))
val = want_type ? STAT_TYPE (val) : STAT_DECL (val);
else if (want_type && !DECL_DECLARES_TYPE_P (val))
@@ -1853,7 +1963,9 @@ get_class_binding_direct (tree klass, tree name, bool want_type)
if (member_vec && !want_type)
val = member_vec_linear_search (member_vec, lookup);
- if (!val || (TREE_CODE (val) == OVERLOAD && OVL_DEDUP_P (val)))
+ if (id_equal (lookup, "_") && !want_type)
+ val = name_independent_linear_search (val, klass, lookup);
+ else if (!val || (TREE_CODE (val) == OVERLOAD && OVL_DEDUP_P (val)))
/* Dependent using declarations are a 'field', make sure we
return that even if we saw an overload already. */
if (tree field_val = fields_linear_search (klass, lookup, want_type))
@@ -2049,6 +2161,25 @@ member_name_cmp (const void *a_p, const void *b_p)
if (TREE_CODE (b) == OVERLOAD)
b = OVL_FUNCTION (b);
+ if (id_equal (name_a, "_"))
+ {
+ /* Sort name-independent members first. */
+ if (name_independent_decl_p (a))
+ {
+ if (name_independent_decl_p (b))
+ {
+ if (DECL_UID (a) != DECL_UID (b))
+ return DECL_UID (a) < DECL_UID (b) ? -1 : +1;
+ gcc_assert (a == b);
+ return 0;
+ }
+ else
+ return -1;
+ }
+ else if (name_independent_decl_p (b))
+ return +1;
+ }
+
/* We're in STAT_HACK or USING_DECL territory (or possibly error-land). */
if (TREE_CODE (a) != TREE_CODE (b))
{
@@ -2183,14 +2314,15 @@ member_vec_append_enum_values (vec<tree, va_gc> *member_vec, tree enumtype)
/* MEMBER_VEC has just had new DECLs added to it, but is sorted.
DeDup adjacent DECLS of the same name. We already dealt with
conflict resolution when adding the fields or methods themselves.
- There are three cases (which could all be combined):
+ There are four cases (which could all be combined):
1) a TYPE_DECL and non TYPE_DECL. Deploy STAT_HACK as appropriate.
2) a USING_DECL and an overload. If the USING_DECL is dependent,
it wins. Otherwise the OVERLOAD does.
- 3) two USING_DECLS. ...
+ 3) two USING_DECLS.
+ 4) name-independent members plus others. ...
member_name_cmp will have ordered duplicates as
- <fns><using><type> */
+ <name_independent><fns><using><type> */
static void
member_vec_dedup (vec<tree, va_gc> *member_vec)
@@ -2208,6 +2340,7 @@ member_vec_dedup (vec<tree, va_gc> *member_vec)
tree to_type = NULL_TREE;
tree to_using = NULL_TREE;
tree marker = NULL_TREE;
+ unsigned name_independent = ix;
for (jx = ix; jx < len; jx++)
{
@@ -2251,7 +2384,9 @@ member_vec_dedup (vec<tree, va_gc> *member_vec)
continue;
}
- if (!current)
+ if (name_independent_decl_p (next))
+ name_independent = jx + 1;
+ else if (!current)
current = next;
}
@@ -2271,6 +2406,17 @@ member_vec_dedup (vec<tree, va_gc> *member_vec)
current = stat_hack (current, to_type);
}
+ for (unsigned kx = name_independent; kx > ix; --kx)
+ if (!current)
+ current = (*member_vec)[kx - 1];
+ else if (current == to_type)
+ current = stat_hack ((*member_vec)[kx - 1], to_type);
+ else
+ {
+ current = ovl_make ((*member_vec)[kx - 1], current);
+ OVL_NAME_INDEPENDENT_DECL_P (current) = 1;
+ }
+
if (current)
{
if (marker)
@@ -2479,10 +2625,27 @@ pop_local_binding (tree id, tree decl)
away. */
if (binding->value == decl)
binding->value = NULL_TREE;
+ else if (binding->type == decl)
+ binding->type = NULL_TREE;
else
{
- gcc_checking_assert (binding->type == decl);
- binding->type = NULL_TREE;
+ /* Name-independent variable was found after at least one declaration
+ with the same name. */
+ gcc_assert (TREE_CODE (binding->value) == TREE_LIST);
+ if (TREE_VALUE (binding->value) != decl)
+ {
+ binding->value = nreverse (binding->value);
+ /* Skip over TREE_LISTs added in pushdecl for check_local_shadow
+ detected declarations, formerly at the tail, now at the start
+ of the list. */
+ while (TREE_PURPOSE (binding->value) == error_mark_node)
+ binding->value = TREE_CHAIN (binding->value);
+ }
+ gcc_assert (TREE_VALUE (binding->value) == decl);
+ binding->value = TREE_CHAIN (binding->value);
+ while (binding->value
+ && TREE_PURPOSE (binding->value) == error_mark_node)
+ binding->value = TREE_CHAIN (binding->value);
}
if (!binding->value && !binding->type)
@@ -2579,6 +2742,10 @@ supplement_binding (cxx_binding *binding, tree decl)
tree bval = binding->value;
bool ok = true;
+ if (bval
+ && TREE_CODE (bval) == TREE_LIST
+ && name_independent_decl_p (TREE_VALUE (bval)))
+ bval = TREE_VALUE (bval);
tree target_bval = strip_using_decl (bval);
tree target_decl = strip_using_decl (decl);
@@ -2682,6 +2849,14 @@ supplement_binding (cxx_binding *binding, tree decl)
&& CONST_DECL_USING_P (decl))
/* Let the clone hide the using-decl that introduced it. */
binding->value = decl;
+ else if (name_independent_decl_p (decl))
+ {
+ if (cxx_dialect < cxx26)
+ pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wc__26_extensions,
+ "name-independent declarations only available with "
+ "%<-std=c++2c%> or %<-std=gnu++2c%>");
+ binding->value = name_lookup::ambiguous (decl, binding->value);
+ }
else
{
if (!error_operand_p (bval))
@@ -2786,6 +2961,7 @@ update_binding (cp_binding_level *level, cxx_binding *binding, tree *slot,
tree old_type = NULL_TREE;
bool hide_type = false;
bool hide_value = false;
+ bool name_independent_p = false;
if (!slot)
{
@@ -2793,6 +2969,7 @@ update_binding (cp_binding_level *level, cxx_binding *binding, tree *slot,
hide_type = HIDDEN_TYPE_BINDING_P (binding);
if (!old_type)
hide_value = hide_type, hide_type = false;
+ name_independent_p = name_independent_decl_p (decl);
}
else if (STAT_HACK_P (*slot))
{
@@ -2888,7 +3065,9 @@ update_binding (cp_binding_level *level, cxx_binding *binding, tree *slot,
}
else if (old)
{
- if (TREE_CODE (old) != TREE_CODE (decl))
+ if (name_independent_p)
+ to_val = name_lookup::ambiguous (decl, old);
+ else if (TREE_CODE (old) != TREE_CODE (decl))
/* Different kinds of decls conflict. */
goto conflict;
else if (TREE_CODE (old) == TYPE_DECL)
@@ -3088,13 +3267,13 @@ inform_shadowed (tree shadowed)
/* DECL is being declared at a local scope. Emit suitable shadow
warnings. */
-static void
+static tree
check_local_shadow (tree decl)
{
/* Don't complain about the parms we push and then pop
while tentatively parsing a function declarator. */
if (TREE_CODE (decl) == PARM_DECL && !DECL_CONTEXT (decl))
- return;
+ return NULL_TREE;
tree old = NULL_TREE;
cp_binding_level *old_scope = NULL;
@@ -3129,7 +3308,7 @@ check_local_shadow (tree decl)
error_at (DECL_SOURCE_LOCATION (old),
"lambda parameter %qD "
"previously declared as a capture", old);
- return;
+ return NULL_TREE;
}
/* Don't complain if it's from an enclosing function. */
else if (DECL_CONTEXT (old) == current_function_decl
@@ -3153,6 +3332,9 @@ check_local_shadow (tree decl)
in the outermost block of the function definition. */
if (b->kind == sk_function_parms)
{
+ if (name_independent_decl_p (decl))
+ return old;
+
auto_diagnostic_group d;
bool emit = true;
if (DECL_EXTERNAL (decl))
@@ -3165,7 +3347,7 @@ check_local_shadow (tree decl)
if (emit)
inform (DECL_SOURCE_LOCATION (old),
"%q#D previously declared here", old);
- return;
+ return NULL_TREE;
}
}
@@ -3177,7 +3359,7 @@ check_local_shadow (tree decl)
scope != old_scope; scope = scope->level_chain)
if (scope->kind == sk_class
&& !LAMBDA_TYPE_P (scope->this_entity))
- return;
+ return NULL_TREE;
}
/* Error if redeclaring a local declared in a
init-statement or in the condition of an if or
@@ -3189,6 +3371,9 @@ check_local_shadow (tree decl)
&& old_scope == current_binding_level->level_chain
&& (old_scope->kind == sk_cond || old_scope->kind == sk_for))
{
+ if (name_independent_decl_p (decl))
+ return old;
+
auto_diagnostic_group d;
bool emit = true;
if (DECL_EXTERNAL (decl))
@@ -3200,7 +3385,7 @@ check_local_shadow (tree decl)
if (emit)
inform (DECL_SOURCE_LOCATION (old),
"%q#D previously declared here", old);
- return;
+ return NULL_TREE;
}
/* C++11:
3.3.3/3: The name declared in an exception-declaration (...)
@@ -3212,6 +3397,9 @@ check_local_shadow (tree decl)
&& old_scope == current_binding_level->level_chain
&& old_scope->kind == sk_catch)
{
+ if (name_independent_decl_p (decl))
+ return old;
+
auto_diagnostic_group d;
bool emit;
if (DECL_EXTERNAL (decl))
@@ -3223,9 +3411,13 @@ check_local_shadow (tree decl)
if (emit)
inform (DECL_SOURCE_LOCATION (old),
"%q#D previously declared here", old);
- return;
+ return NULL_TREE;
}
+ /* Don't emit -Wshadow* warnings for name-independent decls. */
+ if (name_independent_decl_p (decl) || name_independent_decl_p (old))
+ return NULL_TREE;
+
/* If '-Wshadow=compatible-local' is specified without other
-Wshadow= flags, we will warn only when the type of the
shadowing variable (DECL) can be converted to that of the
@@ -3278,15 +3470,19 @@ check_local_shadow (tree decl)
auto_diagnostic_group d;
if (warning_at (DECL_SOURCE_LOCATION (decl), warning_code, msg, decl))
inform_shadowed (old);
- return;
+ return NULL_TREE;
}
if (!warn_shadow)
- return;
+ return NULL_TREE;
+
+ /* Don't emit -Wshadow for name-independent decls. */
+ if (name_independent_decl_p (decl))
+ return NULL_TREE;
/* Don't warn for artificial things that are not implicit typedefs. */
if (DECL_ARTIFICIAL (decl) && !DECL_IMPLICIT_TYPEDEF_P (decl))
- return;
+ return NULL_TREE;
if (nonlambda_method_basetype ())
if (tree member = lookup_member (current_nonlambda_class_type (),
@@ -3314,7 +3510,7 @@ check_local_shadow (tree decl)
suppress_warning (decl, OPT_Wshadow);
}
}
- return;
+ return NULL_TREE;
}
/* Now look for a namespace shadow. */
@@ -3337,10 +3533,10 @@ check_local_shadow (tree decl)
inform_shadowed (old);
suppress_warning (decl, OPT_Wshadow);
}
- return;
+ return NULL_TREE;
}
- return;
+ return NULL_TREE;
}
/* DECL is being pushed inside function CTX. Set its context, if
@@ -3659,6 +3855,8 @@ pushdecl (tree decl, bool hiding)
tree *slot = NULL; /* Binding slot in namespace. */
tree *mslot = NULL; /* Current module slot in namespace. */
tree old = NULL_TREE;
+ bool name_independent_p = false;
+ bool name_independent_diagnosed_p = false;
if (level->kind == sk_namespace)
{
@@ -3682,56 +3880,82 @@ pushdecl (tree decl, bool hiding)
binding = find_local_binding (level, name);
if (binding)
old = binding->value;
+ name_independent_p = name_independent_decl_p (decl);
}
if (old == error_mark_node)
old = NULL_TREE;
- for (ovl_iterator iter (old); iter; ++iter)
- if (iter.using_p ())
- ; /* Ignore using decls here. */
- else if (iter.hidden_p ()
- && TREE_CODE (*iter) == FUNCTION_DECL
- && DECL_LANG_SPECIFIC (*iter)
- && DECL_MODULE_IMPORT_P (*iter))
- ; /* An undeclared builtin imported from elsewhere. */
- else if (tree match
- = duplicate_decls (decl, *iter, hiding, iter.hidden_p ()))
- {
- if (match == error_mark_node)
- ;
- else if (TREE_CODE (match) == TYPE_DECL)
- gcc_checking_assert (REAL_IDENTIFIER_TYPE_VALUE (name)
- == (level->kind == sk_namespace
- ? NULL_TREE : TREE_TYPE (match)));
- else if (iter.hidden_p () && !hiding)
+ tree oldi, oldn;
+ for (oldi = old; oldi; oldi = oldn)
+ {
+ if (TREE_CODE (oldi) == TREE_LIST)
+ {
+ gcc_checking_assert (level->kind != sk_namespace
+ && name_independent_decl_p
+ (TREE_VALUE (old)));
+ oldn = TREE_CHAIN (oldi);
+ oldi = TREE_VALUE (oldi);
+ }
+ else
+ oldn = NULL_TREE;
+ for (ovl_iterator iter (oldi); iter; ++iter)
+ if (iter.using_p ())
+ ; /* Ignore using decls here. */
+ else if (iter.hidden_p ()
+ && TREE_CODE (*iter) == FUNCTION_DECL
+ && DECL_LANG_SPECIFIC (*iter)
+ && DECL_MODULE_IMPORT_P (*iter))
+ ; /* An undeclared builtin imported from elsewhere. */
+ else if (name_independent_p)
+ {
+ /* Ignore name-independent declarations. */
+ if (cxx_dialect < cxx26 && !name_independent_diagnosed_p)
+ pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wc__26_extensions,
+ "name-independent declarations only available with "
+ "%<-std=c++2c%> or %<-std=gnu++2c%>");
+ name_independent_diagnosed_p = true;
+ }
+ else if (tree match
+ = duplicate_decls (decl, *iter, hiding, iter.hidden_p ()))
{
- /* Unhiding a previously hidden decl. */
- tree head = iter.reveal_node (old);
- if (head != old)
+ if (match == error_mark_node)
+ ;
+ else if (TREE_CODE (match) == TYPE_DECL)
+ gcc_checking_assert (REAL_IDENTIFIER_TYPE_VALUE (name)
+ == (level->kind == sk_namespace
+ ? NULL_TREE : TREE_TYPE (match)));
+ else if (iter.hidden_p () && !hiding)
+ {
+ /* Unhiding a previously hidden decl. */
+ tree head = iter.reveal_node (oldi);
+ if (head != oldi)
+ {
+ gcc_checking_assert (ns);
+ if (STAT_HACK_P (*slot))
+ STAT_DECL (*slot) = head;
+ else
+ *slot = head;
+ }
+ if (DECL_EXTERN_C_P (match))
+ /* We need to check and register the decl now. */
+ check_extern_c_conflict (match);
+ }
+ else if (slot
+ && !hiding
+ && STAT_HACK_P (*slot)
+ && STAT_DECL_HIDDEN_P (*slot))
{
- gcc_checking_assert (ns);
- if (STAT_HACK_P (*slot))
- STAT_DECL (*slot) = head;
+ /* Unhide the non-function. */
+ gcc_checking_assert (oldi == match);
+ if (!STAT_TYPE (*slot))
+ *slot = match;
else
- *slot = head;
+ STAT_DECL (*slot) = match;
}
- if (DECL_EXTERN_C_P (match))
- /* We need to check and register the decl now. */
- check_extern_c_conflict (match);
- }
- else if (slot && !hiding
- && STAT_HACK_P (*slot) && STAT_DECL_HIDDEN_P (*slot))
- {
- /* Unhide the non-function. */
- gcc_checking_assert (old == match);
- if (!STAT_TYPE (*slot))
- *slot = match;
- else
- STAT_DECL (*slot) = match;
+ return match;
}
- return match;
- }
+ }
/* Check for redeclaring an import. */
if (slot && *slot && TREE_CODE (*slot) == BINDING_VECTOR)
@@ -3780,7 +4004,28 @@ pushdecl (tree decl, bool hiding)
if (level->kind != sk_namespace)
{
- check_local_shadow (decl);
+ tree local_shadow = check_local_shadow (decl);
+ if (name_independent_p && local_shadow)
+ {
+ if (cxx_dialect < cxx26 && !name_independent_diagnosed_p)
+ pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wc__26_extensions,
+ "name-independent declarations only available with "
+ "%<-std=c++2c%> or %<-std=gnu++2c%>");
+ name_independent_diagnosed_p = true;
+ /* When a name-independent declaration is pushed into a scope
+ which itself does not contain a _ named declaration yet (so
+ _ name lookups wouldn't be normally ambiguous), but it
+ shadows a _ declaration in some outer scope in cases
+ described in [basic.scope.block]/2 where if the names of
+ the shadowed and shadowing declarations were different it
+ would be ill-formed program, arrange for _ name lookups
+ in this scope to be ambiguous. */
+ if (old == NULL_TREE)
+ {
+ old = build_tree_list (error_mark_node, local_shadow);
+ TREE_TYPE (old) = error_mark_node;
+ }
+ }
if (TREE_CODE (decl) == NAMESPACE_DECL)
/* A local namespace alias. */
diff --git a/gcc/cp/parser.cc b/gcc/cp/parser.cc
index 2464d1a..732d2a9 100644
--- a/gcc/cp/parser.cc
+++ b/gcc/cp/parser.cc
@@ -2391,15 +2391,15 @@ static tree cp_parser_selection_statement
static tree cp_parser_condition
(cp_parser *);
static tree cp_parser_iteration_statement
- (cp_parser *, bool *, bool, unsigned short, bool);
+ (cp_parser *, bool *, bool, tree, bool);
static bool cp_parser_init_statement
(cp_parser *, tree *decl);
static tree cp_parser_for
- (cp_parser *, bool, unsigned short, bool);
+ (cp_parser *, bool, tree, bool);
static tree cp_parser_c_for
- (cp_parser *, tree, tree, bool, unsigned short, bool);
+ (cp_parser *, tree, tree, bool, tree, bool);
static tree cp_parser_range_for
- (cp_parser *, tree, tree, tree, bool, unsigned short, bool, bool);
+ (cp_parser *, tree, tree, tree, bool, tree, bool, bool);
static void do_range_for_auto_deduction
(tree, tree, cp_decomp *);
static tree cp_parser_perform_range_for_lookup
@@ -11381,6 +11381,7 @@ cp_parser_lambda_introducer (cp_parser* parser, tree lambda_expr)
hash_set<tree, true> ids;
tree first_capture_id = NULL_TREE;
+ unsigned name_independent_cnt = 0;
while (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_SQUARE))
{
cp_token* capture_token;
@@ -11425,7 +11426,7 @@ cp_parser_lambda_introducer (cp_parser* parser, tree lambda_expr)
else
add_capture (lambda_expr, /*id=*/this_identifier,
/*initializer=*/finish_this_expr (),
- /*by_reference_p=*/true, explicit_init_p);
+ /*by_reference_p=*/true, explicit_init_p, NULL);
continue;
}
@@ -11447,7 +11448,7 @@ cp_parser_lambda_introducer (cp_parser* parser, tree lambda_expr)
else
add_capture (lambda_expr, /*id=*/this_identifier,
/*initializer=*/finish_this_expr (),
- /*by_reference_p=*/false, explicit_init_p);
+ /*by_reference_p=*/false, explicit_init_p, NULL);
continue;
}
@@ -11634,13 +11635,15 @@ cp_parser_lambda_introducer (cp_parser* parser, tree lambda_expr)
ids.add (first_capture_id);
ids.add (capture_id);
}
+ if (found && explicit_init_p && id_equal (capture_id, "_"))
+ found = false;
if (found)
pedwarn (input_location, 0,
"already captured %qD in lambda expression", capture_id);
else
add_capture (lambda_expr, capture_id, capture_init_expr,
/*by_reference_p=*/capture_kind == BY_REFERENCE,
- explicit_init_p);
+ explicit_init_p, &name_independent_cnt);
/* If there is any qualification still in effect, clear it
now; we will be starting fresh with the next capture. */
@@ -11770,7 +11773,6 @@ cp_parser_lambda_declarator_opt (cp_parser* parser, tree lambda_expr)
if (lambda_specs.storage_class == sc_mutable)
{
- LAMBDA_EXPR_MUTABLE_P (lambda_expr) = 1;
quals = TYPE_UNQUALIFIED;
}
else if (lambda_specs.storage_class == sc_static)
@@ -12519,8 +12521,8 @@ cp_parser_statement (cp_parser* parser, tree in_statement_expr,
case RID_DO:
case RID_FOR:
std_attrs = process_stmt_hotness_attribute (std_attrs, attrs_loc);
- statement = cp_parser_iteration_statement (parser, if_p, false, 0,
- false);
+ statement = cp_parser_iteration_statement (parser, if_p, false,
+ NULL_TREE, false);
break;
case RID_BREAK:
@@ -13804,8 +13806,7 @@ cp_parser_condition (cp_parser* parser)
not included. */
static tree
-cp_parser_for (cp_parser *parser, bool ivdep, unsigned short unroll,
- bool novector)
+cp_parser_for (cp_parser *parser, bool ivdep, tree unroll, bool novector)
{
tree init, scope, decl;
bool is_range_for;
@@ -13842,7 +13843,7 @@ cp_parser_for (cp_parser *parser, bool ivdep, unsigned short unroll,
static tree
cp_parser_c_for (cp_parser *parser, tree scope, tree init, bool ivdep,
- unsigned short unroll, bool novector)
+ tree unroll, bool novector)
{
/* Normal for loop */
tree condition = NULL_TREE;
@@ -13893,8 +13894,7 @@ cp_parser_c_for (cp_parser *parser, tree scope, tree init, bool ivdep,
static tree
cp_parser_range_for (cp_parser *parser, tree scope, tree init, tree range_decl,
- bool ivdep, unsigned short unroll, bool novector,
- bool is_omp)
+ bool ivdep, tree unroll, bool novector, bool is_omp)
{
tree stmt, range_expr;
auto_vec <cxx_binding *, 16> bindings;
@@ -13967,7 +13967,7 @@ cp_parser_range_for (cp_parser *parser, tree scope, tree init, tree range_decl,
if (ivdep)
RANGE_FOR_IVDEP (stmt) = 1;
if (unroll)
- RANGE_FOR_UNROLL (stmt) = build_int_cst (integer_type_node, unroll);
+ RANGE_FOR_UNROLL (stmt) = unroll;
if (novector)
RANGE_FOR_NOVECTOR (stmt) = 1;
finish_range_for_decl (stmt, range_decl, range_expr);
@@ -14156,7 +14156,7 @@ warn_for_range_copy (tree decl, tree expr)
tree
cp_convert_range_for (tree statement, tree range_decl, tree range_expr,
- cp_decomp *decomp, bool ivdep, unsigned short unroll,
+ cp_decomp *decomp, bool ivdep, tree unroll,
bool novector)
{
tree begin, end;
@@ -14381,7 +14381,7 @@ cp_parser_range_for_member_function (tree range, tree identifier)
static tree
cp_parser_iteration_statement (cp_parser* parser, bool *if_p, bool ivdep,
- unsigned short unroll, bool novector)
+ tree unroll, bool novector)
{
cp_token *token;
enum rid keyword;
@@ -15398,7 +15398,6 @@ cp_parser_block_declaration (cp_parser *parser,
/* Peek at the next token to figure out which kind of declaration is
present. */
cp_token *token1 = cp_lexer_peek_token (parser->lexer);
- size_t attr_idx;
/* If the next keyword is `asm', we have an asm-definition. */
if (token1->keyword == RID_ASM)
@@ -15452,22 +15451,36 @@ cp_parser_block_declaration (cp_parser *parser,
/* If the next token is `static_assert' we have a static assertion. */
else if (token1->keyword == RID_STATIC_ASSERT)
cp_parser_static_assert (parser, /*member_p=*/false);
- /* If the next tokens after attributes is `using namespace', then we have
- a using-directive. */
- else if ((attr_idx = cp_parser_skip_std_attribute_spec_seq (parser, 1)) != 1
- && cp_lexer_nth_token_is_keyword (parser->lexer, attr_idx,
- RID_USING)
- && cp_lexer_nth_token_is_keyword (parser->lexer, attr_idx + 1,
- RID_NAMESPACE))
+ else
{
- if (statement_p)
- cp_parser_commit_to_tentative_parse (parser);
- cp_parser_using_directive (parser);
+ size_t attr_idx = cp_parser_skip_std_attribute_spec_seq (parser, 1);
+ cp_token *after_attr = NULL;
+ if (attr_idx != 1)
+ after_attr = cp_lexer_peek_nth_token (parser->lexer, attr_idx);
+ /* If the next tokens after attributes is `using namespace', then we have
+ a using-directive. */
+ if (after_attr
+ && after_attr->keyword == RID_USING
+ && cp_lexer_nth_token_is_keyword (parser->lexer, attr_idx + 1,
+ RID_NAMESPACE))
+ {
+ if (statement_p)
+ cp_parser_commit_to_tentative_parse (parser);
+ cp_parser_using_directive (parser);
+ }
+ /* If the next token after attributes is `asm', then we have
+ an asm-definition. */
+ else if (after_attr && after_attr->keyword == RID_ASM)
+ {
+ if (statement_p)
+ cp_parser_commit_to_tentative_parse (parser);
+ cp_parser_asm_definition (parser);
+ }
+ /* Anything else must be a simple-declaration. */
+ else
+ cp_parser_simple_declaration (parser, !statement_p,
+ /*maybe_range_for_decl*/NULL);
}
- /* Anything else must be a simple-declaration. */
- else
- cp_parser_simple_declaration (parser, !statement_p,
- /*maybe_range_for_decl*/NULL);
}
/* Parse a simple-declaration.
@@ -15874,6 +15887,8 @@ cp_parser_decomposition_declaration (cp_parser *parser,
cp_decl_specifier_seq decl_specs;
clear_decl_specs (&decl_specs);
decl_specs.type = make_auto ();
+ if (decl_specifiers->storage_class == sc_static)
+ decl_specs.storage_class = sc_static;
tree prev = decl;
FOR_EACH_VEC_ELT (v, i, e)
{
@@ -22422,6 +22437,7 @@ cp_parser_asm_definition (cp_parser* parser)
bool invalid_inputs_p = false;
bool invalid_outputs_p = false;
required_token missing = RT_NONE;
+ tree std_attrs = cp_parser_std_attribute_spec_seq (parser);
location_t asm_loc = cp_lexer_peek_token (parser->lexer)->location;
/* Look for the `asm' keyword. */
@@ -22655,6 +22671,10 @@ cp_parser_asm_definition (cp_parser* parser)
else
symtab->finalize_toplevel_asm (string);
}
+
+ if (std_attrs)
+ warning_at (asm_loc, OPT_Wattributes,
+ "attributes ignored on %<asm%> declaration");
}
/* Given the type TYPE of a declaration with declarator DECLARATOR, return the
@@ -44299,7 +44319,7 @@ cp_parser_omp_loop_nest (cp_parser *parser, bool *if_p)
cp_parser_require (parser, CPP_COLON, RT_COLON);
init = cp_parser_range_for (parser, NULL_TREE, NULL_TREE, decl,
- false, 0, false, true);
+ false, NULL_TREE, false, true);
cp_convert_omp_range_for (this_pre_body, sl, decl,
orig_decl, init, orig_init,
@@ -50236,30 +50256,12 @@ cp_parser_pragma_ivdep (cp_parser *parser, cp_token *pragma_tok)
/* Parse a pragma GCC unroll. */
-static unsigned short
+static tree
cp_parser_pragma_unroll (cp_parser *parser, cp_token *pragma_tok)
{
location_t location = cp_lexer_peek_token (parser->lexer)->location;
- tree expr = cp_parser_constant_expression (parser);
- unsigned short unroll;
- expr = maybe_constant_value (expr);
- HOST_WIDE_INT lunroll = 0;
- if (!INTEGRAL_TYPE_P (TREE_TYPE (expr))
- || TREE_CODE (expr) != INTEGER_CST
- || (lunroll = tree_to_shwi (expr)) < 0
- || lunroll >= USHRT_MAX)
- {
- error_at (location, "%<#pragma GCC unroll%> requires an"
- " assignment-expression that evaluates to a non-negative"
- " integral constant less than %u", USHRT_MAX);
- unroll = 0;
- }
- else
- {
- unroll = (unsigned short)lunroll;
- if (unroll == 0)
- unroll = 1;
- }
+ tree unroll = cp_parser_constant_expression (parser);
+ unroll = cp_check_pragma_unroll (location, fold_non_dependent_expr (unroll));
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return unroll;
}
@@ -50593,7 +50595,7 @@ cp_parser_pragma (cp_parser *parser, enum pragma_context context, bool *if_p)
case PRAGMA_NOVECTOR:
{
bool ivdep;
- unsigned short unroll = 0;
+ tree unroll = NULL_TREE;
bool novector = false;
const char *pragma_str;
diff --git a/gcc/cp/pt.cc b/gcc/cp/pt.cc
index 092e6fd..669d2ad 100644
--- a/gcc/cp/pt.cc
+++ b/gcc/cp/pt.cc
@@ -11107,7 +11107,7 @@ push_tinst_level_loc (tree tldcl, tree targs, location_t loc)
if (tinst_depth >= max_tinst_depth)
{
/* Tell error.cc not to try to instantiate any templates. */
- at_eof = 2;
+ at_eof = 3;
fatal_error (input_location,
"template instantiation depth exceeds maximum of %d"
" (use %<-ftemplate-depth=%> to increase the maximum)",
@@ -18407,23 +18407,24 @@ tsubst_stmt (tree t, tree args, tsubst_flags_t complain, tree in_decl)
complain, in_decl, decomp);
}
+ tree unroll = RECUR (RANGE_FOR_UNROLL (t));
+ if (unroll)
+ unroll
+ = cp_check_pragma_unroll (EXPR_LOCATION (RANGE_FOR_UNROLL (t)),
+ unroll);
if (processing_template_decl)
{
RANGE_FOR_IVDEP (stmt) = RANGE_FOR_IVDEP (t);
- RANGE_FOR_UNROLL (stmt) = RANGE_FOR_UNROLL (t);
+ RANGE_FOR_UNROLL (stmt) = unroll;
RANGE_FOR_NOVECTOR (stmt) = RANGE_FOR_NOVECTOR (t);
finish_range_for_decl (stmt, decl, expr);
if (decomp && decl != error_mark_node)
cp_finish_decomp (decl, decomp);
}
else
- {
- unsigned short unroll = (RANGE_FOR_UNROLL (t)
- ? tree_to_uhwi (RANGE_FOR_UNROLL (t)) : 0);
- stmt = cp_convert_range_for (stmt, decl, expr, decomp,
- RANGE_FOR_IVDEP (t), unroll,
- RANGE_FOR_NOVECTOR (t));
- }
+ stmt = cp_convert_range_for (stmt, decl, expr, decomp,
+ RANGE_FOR_IVDEP (t), unroll,
+ RANGE_FOR_NOVECTOR (t));
bool prev = note_iteration_stmt_body_start ();
RECUR (RANGE_FOR_BODY (t));
@@ -19341,7 +19342,6 @@ tsubst_lambda_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl)
= LAMBDA_EXPR_LOCATION (t);
LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (r)
= LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (t);
- LAMBDA_EXPR_MUTABLE_P (r) = LAMBDA_EXPR_MUTABLE_P (t);
if (tree ti = LAMBDA_EXPR_REGEN_INFO (t))
LAMBDA_EXPR_REGEN_INFO (r)
= build_template_info (t, add_to_template_args (TI_ARGS (ti),
@@ -19354,7 +19354,7 @@ tsubst_lambda_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl)
&& LAMBDA_EXPR_PENDING_PROXIES (t) == NULL);
vec<tree,va_gc>* field_packs = NULL;
-
+ unsigned name_independent_cnt = 0;
for (tree cap = LAMBDA_EXPR_CAPTURE_LIST (t); cap;
cap = TREE_CHAIN (cap))
{
@@ -19384,7 +19384,8 @@ tsubst_lambda_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl)
bool by_ref = (TYPE_REF_P (ftype)
|| (TREE_CODE (ftype) == DECLTYPE_TYPE
&& DECLTYPE_FOR_REF_CAPTURE (ftype)));
- add_capture (r, name, init, by_ref, !DECL_NORMAL_CAPTURE_P (ofield));
+ add_capture (r, name, init, by_ref, !DECL_NORMAL_CAPTURE_P (ofield),
+ &name_independent_cnt);
continue;
}
@@ -20279,7 +20280,7 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl)
build_x_modify_expr sets it and it must not be reset
here. */
if (warning_suppressed_p (t, OPT_Wparentheses))
- suppress_warning (r, OPT_Wparentheses);
+ suppress_warning (STRIP_REFERENCE_REF (r), OPT_Wparentheses);
RETURN (r);
}
@@ -21501,11 +21502,17 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl)
}
case ANNOTATE_EXPR:
- op1 = RECUR (TREE_OPERAND (t, 0));
- RETURN (build3_loc (EXPR_LOCATION (t), ANNOTATE_EXPR,
- TREE_TYPE (op1), op1,
- RECUR (TREE_OPERAND (t, 1)),
- RECUR (TREE_OPERAND (t, 2))));
+ {
+ op1 = RECUR (TREE_OPERAND (t, 0));
+ tree op2 = RECUR (TREE_OPERAND (t, 1));
+ tree op3 = RECUR (TREE_OPERAND (t, 2));
+ if (TREE_CODE (op2) == INTEGER_CST
+ && wi::to_widest (op2) == (int) annot_expr_unroll_kind)
+ op3 = cp_check_pragma_unroll (EXPR_LOCATION (TREE_OPERAND (t, 2)),
+ op3);
+ RETURN (build3_loc (EXPR_LOCATION (t), ANNOTATE_EXPR,
+ TREE_TYPE (op1), op1, op2, op3));
+ }
default:
/* Handle Objective-C++ constructs, if appropriate. */
@@ -25191,27 +25198,61 @@ more_specialized_fn (tree pat1, tree pat2, int len)
bool lose1 = false;
bool lose2 = false;
- /* Remove the this parameter from non-static member functions. If
- one is a non-static member function and the other is not a static
- member function, remove the first parameter from that function
- also. This situation occurs for operator functions where we
- locate both a member function (with this pointer) and non-member
- operator (with explicit first operand). */
- if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl1))
+ /* C++17 [temp.func.order]/3 (CWG532)
+
+ If only one of the function templates M is a non-static member of some
+ class A, M is considered to have a new first parameter inserted in its
+ function parameter list. Given cv as the cv-qualifiers of M (if any), the
+ new parameter is of type "rvalue reference to cv A" if the optional
+ ref-qualifier of M is && or if M has no ref-qualifier and the first
+ parameter of the other template has rvalue reference type. Otherwise, the
+ new parameter is of type "lvalue reference to cv A". */
+
+ if (DECL_STATIC_FUNCTION_P (decl1) || DECL_STATIC_FUNCTION_P (decl2))
{
- len--; /* LEN is the number of significant arguments for DECL1 */
- args1 = TREE_CHAIN (args1);
- if (!DECL_STATIC_FUNCTION_P (decl2))
+ /* Note C++20 DR2445 extended the above to static member functions, but
+ I think think the old G++ behavior of just skipping the object
+ parameter when comparing to a static member function was better, so
+ let's stick with that for now. This is CWG2834. --jason 2023-12 */
+ if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl1)) /* FIXME or explicit */
+ {
+ len--; /* LEN is the number of significant arguments for DECL1 */
+ args1 = TREE_CHAIN (args1);
+ }
+ else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl2)) /* FIXME or explicit */
args2 = TREE_CHAIN (args2);
}
- else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl2))
+ else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl1) /* FIXME implicit only */
+ && DECL_NONSTATIC_MEMBER_FUNCTION_P (decl2))
{
+ /* Note DR2445 also (IMO wrongly) removed the "only one" above, which
+ would break e.g. cpp1y/lambda-generic-variadic5.C. */
+ len--;
+ args1 = TREE_CHAIN (args1);
args2 = TREE_CHAIN (args2);
- if (!DECL_STATIC_FUNCTION_P (decl1))
+ }
+ else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl1) /* FIXME implicit only */
+ || DECL_NONSTATIC_MEMBER_FUNCTION_P (decl2))
+ {
+ /* The other is a non-member or explicit object member function;
+ rewrite the implicit object parameter to a reference. */
+ tree ns = DECL_NONSTATIC_MEMBER_FUNCTION_P (decl2) ? decl2 : decl1;
+ tree &nsargs = ns == decl2 ? args2 : args1;
+ tree obtype = TREE_TYPE (TREE_VALUE (nsargs));
+
+ nsargs = TREE_CHAIN (nsargs);
+
+ cp_ref_qualifier rqual = type_memfn_rqual (TREE_TYPE (ns));
+ if (rqual == REF_QUAL_NONE)
{
- len--;
- args1 = TREE_CHAIN (args1);
+ tree otherfirst = ns == decl1 ? args2 : args1;
+ otherfirst = TREE_VALUE (otherfirst);
+ if (TREE_CODE (otherfirst) == REFERENCE_TYPE
+ && TYPE_REF_IS_RVALUE (otherfirst))
+ rqual = REF_QUAL_RVALUE;
}
+ obtype = cp_build_reference_type (obtype, rqual == REF_QUAL_RVALUE);
+ nsargs = tree_cons (NULL_TREE, obtype, nsargs);
}
/* If only one is a conversion operator, they are unordered. */
@@ -30951,7 +30992,9 @@ convert_generic_types_to_packs (tree parm, int start_idx, int end_idx)
{
tree id = unpack_concept_check (constr);
TREE_VEC_ELT (TREE_OPERAND (id, 1), 0) = t;
- location_t loc = DECL_SOURCE_LOCATION (TYPE_NAME (t));
+ /* Use UNKNOWN_LOCATION so write_template_args can tell the
+ difference between this and a fold the user wrote. */
+ location_t loc = UNKNOWN_LOCATION;
tree fold = finish_left_unary_fold_expr (loc, constr,
TRUTH_ANDIF_EXPR);
TEMPLATE_PARM_CONSTRAINTS (node) = fold;
diff --git a/gcc/cp/ptree.cc b/gcc/cp/ptree.cc
index 32c5b52..d1f5892 100644
--- a/gcc/cp/ptree.cc
+++ b/gcc/cp/ptree.cc
@@ -265,8 +265,6 @@ cxx_print_identifier (FILE *file, tree node, int indent)
void
cxx_print_lambda_node (FILE *file, tree node, int indent)
{
- if (LAMBDA_EXPR_MUTABLE_P (node))
- fprintf (file, " /mutable");
fprintf (file, " default_capture_mode=[");
switch (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (node))
{
diff --git a/gcc/cp/search.cc b/gcc/cp/search.cc
index cd80f28..ac79b62 100644
--- a/gcc/cp/search.cc
+++ b/gcc/cp/search.cc
@@ -1091,13 +1091,24 @@ lookup_field_r (tree binfo, void *data)
}
/* Add the new value. */
- lfi->ambiguous = tree_cons (NULL_TREE, nval, lfi->ambiguous);
- TREE_TYPE (lfi->ambiguous) = error_mark_node;
+ if (TREE_CODE (nval) == TREE_LIST)
+ lfi->ambiguous = chainon (nval, lfi->ambiguous);
+ else
+ {
+ lfi->ambiguous = tree_cons (NULL_TREE, nval, lfi->ambiguous);
+ TREE_TYPE (lfi->ambiguous) = error_mark_node;
+ }
}
}
else
{
- lfi->rval = nval;
+ if (TREE_CODE (nval) == TREE_LIST)
+ {
+ lfi->ambiguous = chainon (nval, lfi->ambiguous);
+ lfi->rval = TREE_VALUE (nval);
+ }
+ else
+ lfi->rval = nval;
lfi->rval_binfo = binfo;
}
diff --git a/gcc/cp/semantics.cc b/gcc/cp/semantics.cc
index 3bf5864..6634acf 100644
--- a/gcc/cp/semantics.cc
+++ b/gcc/cp/semantics.cc
@@ -871,8 +871,7 @@ is_assignment_op_expr_p (tree t)
void
maybe_warn_unparenthesized_assignment (tree t, tsubst_flags_t complain)
{
- if (REFERENCE_REF_P (t))
- t = TREE_OPERAND (t, 0);
+ t = STRIP_REFERENCE_REF (t);
if ((complain & tf_warning)
&& warn_parentheses
@@ -1167,7 +1166,7 @@ begin_while_stmt (void)
void
finish_while_stmt_cond (tree cond, tree while_stmt, bool ivdep,
- unsigned short unroll, bool novector)
+ tree unroll, bool novector)
{
cond = maybe_convert_cond (cond);
finish_cond (&WHILE_COND (while_stmt), cond);
@@ -1185,8 +1184,7 @@ finish_while_stmt_cond (tree cond, tree while_stmt, bool ivdep,
WHILE_COND (while_stmt),
build_int_cst (integer_type_node,
annot_expr_unroll_kind),
- build_int_cst (integer_type_node,
- unroll));
+ unroll);
if (novector && cond != error_mark_node)
WHILE_COND (while_stmt) = build3 (ANNOTATE_EXPR,
TREE_TYPE (WHILE_COND (while_stmt)),
@@ -1238,7 +1236,7 @@ finish_do_body (tree do_stmt)
COND is as indicated. */
void
-finish_do_stmt (tree cond, tree do_stmt, bool ivdep, unsigned short unroll,
+finish_do_stmt (tree cond, tree do_stmt, bool ivdep, tree unroll,
bool novector)
{
cond = maybe_convert_cond (cond);
@@ -1255,7 +1253,7 @@ finish_do_stmt (tree cond, tree do_stmt, bool ivdep, unsigned short unroll,
if (unroll && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node, annot_expr_unroll_kind),
- build_int_cst (integer_type_node, unroll));
+ unroll);
if (novector && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node, annot_expr_no_vector_kind),
@@ -1358,7 +1356,8 @@ finish_init_stmt (tree for_stmt)
FOR_STMT. */
void
-finish_for_cond (tree cond, tree for_stmt, bool ivdep, unsigned short unroll, bool novector)
+finish_for_cond (tree cond, tree for_stmt, bool ivdep, tree unroll,
+ bool novector)
{
cond = maybe_convert_cond (cond);
finish_cond (&FOR_COND (for_stmt), cond);
@@ -1376,8 +1375,7 @@ finish_for_cond (tree cond, tree for_stmt, bool ivdep, unsigned short unroll, bo
FOR_COND (for_stmt),
build_int_cst (integer_type_node,
annot_expr_unroll_kind),
- build_int_cst (integer_type_node,
- unroll));
+ unroll);
if (novector && cond != error_mark_node)
FOR_COND (for_stmt) = build3 (ANNOTATE_EXPR,
TREE_TYPE (FOR_COND (for_stmt)),
@@ -2176,8 +2174,7 @@ finish_parenthesized_expr (cp_expr expr)
{
/* This inhibits warnings in maybe_warn_unparenthesized_assignment
and c_common_truthvalue_conversion. */
- tree inner = REFERENCE_REF_P (expr) ? TREE_OPERAND (expr, 0) : *expr;
- suppress_warning (inner, OPT_Wparentheses);
+ suppress_warning (STRIP_REFERENCE_REF (*expr), OPT_Wparentheses);
}
if (TREE_CODE (expr) == OFFSET_REF
@@ -2264,6 +2261,16 @@ finish_non_static_data_member (tree decl, tree object, tree qualifying_scope,
else if (PACK_EXPANSION_P (type))
/* Don't bother trying to represent this. */
type = NULL_TREE;
+ else if (WILDCARD_TYPE_P (TREE_TYPE (object)))
+ /* We don't know what the eventual quals will be, so punt until
+ instantiation time.
+
+ This can happen when called from build_capture_proxy for an explicit
+ object lambda. It's a bit marginal to call this function in that
+ case, since this function is for references to members of 'this',
+ but the deduced type is required to be derived from the closure
+ type, so it works. */
+ type = NULL_TREE;
else
{
/* Set the cv qualifiers. */
@@ -11582,6 +11589,7 @@ finish_static_assert (tree condition, tree message, location_t location,
error_at (location,
"%<static_assert%> message %<data()[%d]%> "
"must be a constant expression", i);
+ XDELETEVEC (buf);
return;
}
if (msg == NULL)
@@ -11683,6 +11691,7 @@ finish_decltype_type (tree expr, bool id_expression_or_member_access_p,
A<decltype(sizeof(T))>::U doesn't require 'typename'. */
if (instantiation_dependent_uneval_expression_p (expr))
{
+ dependent:
type = cxx_make_type (DECLTYPE_TYPE);
DECLTYPE_TYPE_EXPR (type) = expr;
DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (type)
@@ -11857,7 +11866,11 @@ finish_decltype_type (tree expr, bool id_expression_or_member_access_p,
if (outer_automatic_var_p (STRIP_REFERENCE_REF (expr))
&& current_function_decl
&& LAMBDA_FUNCTION_P (current_function_decl))
- type = capture_decltype (STRIP_REFERENCE_REF (expr));
+ {
+ type = capture_decltype (STRIP_REFERENCE_REF (expr));
+ if (!type)
+ goto dependent;
+ }
else if (error_operand_p (expr))
type = error_mark_node;
else if (expr == current_class_ptr)
@@ -12755,7 +12768,8 @@ apply_deduced_return_type (tree fco, tree return_type)
/* DECL is a local variable or parameter from the surrounding scope of a
lambda-expression. Returns the decltype for a use of the capture field
- for DECL even if it hasn't been captured yet. */
+ for DECL even if it hasn't been captured yet. Or NULL_TREE if we can't give
+ a correct answer at this point and we should build a DECLTYPE_TYPE. */
static tree
capture_decltype (tree decl)
@@ -12793,9 +12807,14 @@ capture_decltype (tree decl)
if (!TYPE_REF_P (type))
{
- if (!LAMBDA_EXPR_MUTABLE_P (lam))
- type = cp_build_qualified_type (type, (cp_type_quals (type)
- |TYPE_QUAL_CONST));
+ tree obtype = TREE_TYPE (DECL_ARGUMENTS (current_function_decl));
+ if (WILDCARD_TYPE_P (non_reference (obtype)))
+ /* We don't know what the eventual obtype quals will be. */
+ return NULL_TREE;
+ int quals = cp_type_quals (type);
+ if (INDIRECT_TYPE_P (obtype))
+ quals |= cp_type_quals (TREE_TYPE (obtype));
+ type = cp_build_qualified_type (type, quals);
type = build_reference_type (type);
}
return type;
@@ -12997,4 +13016,33 @@ cp_build_bit_cast (location_t loc, tree type, tree arg,
return ret;
}
+/* Diagnose invalid #pragma GCC unroll argument and adjust
+ it if needed. */
+
+tree
+cp_check_pragma_unroll (location_t loc, tree unroll)
+{
+ HOST_WIDE_INT lunroll = 0;
+ if (type_dependent_expression_p (unroll))
+ ;
+ else if (!INTEGRAL_TYPE_P (TREE_TYPE (unroll))
+ || (!value_dependent_expression_p (unroll)
+ && (!tree_fits_shwi_p (unroll)
+ || (lunroll = tree_to_shwi (unroll)) < 0
+ || lunroll >= USHRT_MAX)))
+ {
+ error_at (loc, "%<#pragma GCC unroll%> requires an"
+ " assignment-expression that evaluates to a non-negative"
+ " integral constant less than %u", USHRT_MAX);
+ unroll = integer_one_node;
+ }
+ else if (TREE_CODE (unroll) == INTEGER_CST)
+ {
+ unroll = fold_convert (integer_type_node, unroll);
+ if (integer_zerop (unroll))
+ unroll = integer_one_node;
+ }
+ return unroll;
+}
+
#include "gt-cp-semantics.h"
diff --git a/gcc/cp/tree.cc b/gcc/cp/tree.cc
index 5279579..da4d5c5 100644
--- a/gcc/cp/tree.cc
+++ b/gcc/cp/tree.cc
@@ -5086,7 +5086,7 @@ handle_likeliness_attribute (tree *node, tree name, tree args,
}
/* Table of valid C++ attributes. */
-const struct attribute_spec cxx_attribute_table[] =
+static const attribute_spec cxx_gnu_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -5094,11 +5094,15 @@ const struct attribute_spec cxx_attribute_table[] =
handle_init_priority_attribute, NULL },
{ "abi_tag", 1, -1, false, false, false, true,
handle_abi_tag_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+};
+
+const scoped_attribute_specs cxx_gnu_attribute_table =
+{
+ "gnu", { cxx_gnu_attributes }
};
/* Table of C++ standard attributes. */
-const struct attribute_spec std_attribute_table[] =
+static const attribute_spec std_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -5119,8 +5123,12 @@ const struct attribute_spec std_attribute_table[] =
{ "pre", 0, -1, false, false, false, false,
handle_contract_attribute, NULL },
{ "post", 0, -1, false, false, false, false,
- handle_contract_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+ handle_contract_attribute, NULL }
+};
+
+const scoped_attribute_specs std_attribute_table =
+{
+ nullptr, { std_attributes }
};
/* Handle an "init_priority" attribute; arguments as in
@@ -5718,7 +5726,6 @@ void
init_tree (void)
{
list_hash_table = hash_table<list_hasher>::create_ggc (61);
- register_scoped_attributes (std_attribute_table, NULL);
}
/* Returns the kind of special function that DECL (a FUNCTION_DECL)
diff --git a/gcc/cp/typeck.cc b/gcc/cp/typeck.cc
index e995fb6..8e4cfae 100644
--- a/gcc/cp/typeck.cc
+++ b/gcc/cp/typeck.cc
@@ -2534,15 +2534,6 @@ decay_conversion (tree exp,
return error_mark_node;
}
- /* Don't let an array compound literal decay to a pointer. It can
- still be used to initialize an array or bind to a reference. */
- if (TREE_CODE (exp) == TARGET_EXPR)
- {
- if (complain & tf_error)
- error_at (loc, "taking address of temporary array");
- return error_mark_node;
- }
-
ptrtype = build_pointer_type (TREE_TYPE (type));
if (VAR_P (exp))
@@ -3476,7 +3467,7 @@ finish_class_member_access_expr (cp_expr object, tree name, bool template_p,
name, scope);
return error_mark_node;
}
-
+
if (TREE_SIDE_EFFECTS (object))
val = build2 (COMPOUND_EXPR, TREE_TYPE (val), object, val);
return val;
@@ -3493,9 +3484,24 @@ finish_class_member_access_expr (cp_expr object, tree name, bool template_p,
return error_mark_node;
}
+ /* NAME may refer to a static data member, in which case there is
+ one copy of the data member that is shared by all the objects of
+ the class. So NAME can be unambiguously referred to even if
+ there are multiple indirect base classes containing NAME. */
+ const base_access ba = [scope, name] ()
+ {
+ if (identifier_p (name))
+ {
+ tree m = lookup_member (scope, name, /*protect=*/0,
+ /*want_type=*/false, tf_none);
+ if (!m || shared_member_p (m))
+ return ba_any;
+ }
+ return ba_check;
+ } ();
+
/* Find the base of OBJECT_TYPE corresponding to SCOPE. */
- access_path = lookup_base (object_type, scope, ba_check,
- NULL, complain);
+ access_path = lookup_base (object_type, scope, ba, NULL, complain);
if (access_path == error_mark_node)
return error_mark_node;
if (!access_path)
@@ -7263,11 +7269,9 @@ cp_build_addr_expr_1 (tree arg, bool strict_lvalue, tsubst_flags_t complain)
complain);
}
- /* For addresses of immediate functions ensure we have EXPR_LOCATION
- set for possible later diagnostics. */
+ /* Ensure we have EXPR_LOCATION set for possible later diagnostics. */
if (TREE_CODE (val) == ADDR_EXPR
- && TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL
- && DECL_IMMEDIATE_FUNCTION_P (TREE_OPERAND (val, 0)))
+ && TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL)
SET_EXPR_LOCATION (val, input_location);
return val;
@@ -10535,6 +10539,9 @@ maybe_warn_about_returning_address_of_local (tree retval, location_t loc)
if (TYPE_REF_P (valtype))
warning_at (loc, OPT_Wreturn_local_addr,
"returning reference to temporary");
+ else if (TYPE_PTR_P (valtype))
+ warning_at (loc, OPT_Wreturn_local_addr,
+ "returning pointer to temporary");
else if (is_std_init_list (valtype))
warning_at (loc, OPT_Winit_list_lifetime,
"returning temporary %<initializer_list%> does not extend "
diff --git a/gcc/d/ChangeLog b/gcc/d/ChangeLog
index 2454da4..b002b45 100644
--- a/gcc/d/ChangeLog
+++ b/gcc/d/ChangeLog
@@ -1,3 +1,26 @@
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * d-attribs.cc (d_langhook_common_attribute_table): Add extra braces
+ to work around PR 16333 in older compilers.
+ (d_langhook_gnu_attribute_table): Likewise.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * d-tree.h (d_langhook_attribute_table): Replace with...
+ (d_langhook_gnu_attribute_table): ...this.
+ (d_langhook_common_attribute_table): Change type to
+ scoped_attribute_specs.
+ * d-attribs.cc (d_langhook_common_attribute_table): Change type to
+ scoped_attribute_specs, using...
+ (d_langhook_common_attributes): ...this as the underlying array.
+ (d_langhook_attribute_table): Replace with...
+ (d_langhook_gnu_attributes, d_langhook_gnu_attribute_table): ...these
+ new globals.
+ (uda_attribute_p): Update accordingly, and update for new
+ targetm.attribute_table type.
+ * d-lang.cc (d_langhook_attribute_table): New global.
+ (LANG_HOOKS_COMMON_ATTRIBUTE_TABLE): Delete.
+
2023-11-21 Iain Buclaw <ibuclaw@gdcproject.org>
* dmd/MERGE: Merge upstream dmd ff57fec515.
diff --git a/gcc/d/d-attribs.cc b/gcc/d/d-attribs.cc
index c0dc0e2..3b69c53 100644
--- a/gcc/d/d-attribs.cc
+++ b/gcc/d/d-attribs.cc
@@ -162,7 +162,7 @@ extern const struct attribute_spec::exclusions attr_cold_hot_exclusions[] =
/* Table of machine-independent attributes.
For internal use (marking of built-ins) only. */
-const attribute_spec d_langhook_common_attribute_table[] =
+static const attribute_spec d_langhook_common_attributes[] =
{
ATTR_SPEC ("noreturn", 0, 0, true, false, false, false,
handle_noreturn_attribute, attr_noreturn_exclusions),
@@ -190,11 +190,15 @@ const attribute_spec d_langhook_common_attribute_table[] =
handle_fnspec_attribute, NULL),
ATTR_SPEC ("omp declare simd", 0, -1, true, false, false, false,
handle_omp_declare_simd_attribute, NULL),
- ATTR_SPEC (NULL, 0, 0, false, false, false, false, NULL, NULL),
+};
+
+const scoped_attribute_specs d_langhook_common_attribute_table =
+{
+ "gnu", { d_langhook_common_attributes }
};
/* Table of D language attributes exposed by `gcc.attribute' UDAs. */
-const attribute_spec d_langhook_attribute_table[] =
+static const attribute_spec d_langhook_gnu_attributes[] =
{
ATTR_SPEC ("noinline", 0, 0, true, false, false, false,
d_handle_noinline_attribute, attr_noinline_exclusions),
@@ -238,9 +242,12 @@ const attribute_spec d_langhook_attribute_table[] =
d_handle_used_attribute, NULL),
ATTR_SPEC ("visibility", 1, 1, false, false, false, false,
d_handle_visibility_attribute, NULL),
- ATTR_SPEC (NULL, 0, 0, false, false, false, false, NULL, NULL),
};
+const scoped_attribute_specs d_langhook_gnu_attribute_table =
+{
+ "gnu", { d_langhook_gnu_attributes }
+};
/* Insert the type attribute ATTRNAME with value VALUE into TYPE.
Returns a new variant of the original type declaration. */
@@ -283,20 +290,14 @@ uda_attribute_p (const char *name)
/* Search both our language, and target attribute tables.
Common and format attributes are kept internal. */
- for (const attribute_spec *p = d_langhook_attribute_table; p->name; p++)
- {
- if (get_identifier (p->name) == ident)
- return true;
- }
+ for (const attribute_spec &p : d_langhook_gnu_attributes)
+ if (get_identifier (p.name) == ident)
+ return true;
- if (targetm.attribute_table)
- {
- for (const attribute_spec *p = targetm.attribute_table; p->name; p++)
- {
- if (get_identifier (p->name) == ident)
- return true;
- }
- }
+ for (auto scoped_attributes : targetm.attribute_table)
+ for (const attribute_spec &p : scoped_attributes->attributes)
+ if (get_identifier (p.name) == ident)
+ return true;
return false;
}
diff --git a/gcc/d/d-lang.cc b/gcc/d/d-lang.cc
index 61fc160..dcbffec 100644
--- a/gcc/d/d-lang.cc
+++ b/gcc/d/d-lang.cc
@@ -1927,6 +1927,12 @@ d_get_sarif_source_language (const char *)
return "d";
}
+const scoped_attribute_specs *const d_langhook_attribute_table[] =
+{
+ &d_langhook_gnu_attribute_table,
+ &d_langhook_common_attribute_table,
+};
+
/* Definitions for our language-specific hooks. */
#undef LANG_HOOKS_NAME
@@ -1938,7 +1944,6 @@ d_get_sarif_source_language (const char *)
#undef LANG_HOOKS_HANDLE_OPTION
#undef LANG_HOOKS_POST_OPTIONS
#undef LANG_HOOKS_PARSE_FILE
-#undef LANG_HOOKS_COMMON_ATTRIBUTE_TABLE
#undef LANG_HOOKS_ATTRIBUTE_TABLE
#undef LANG_HOOKS_GET_ALIAS_SET
#undef LANG_HOOKS_TYPES_COMPATIBLE_P
@@ -1971,7 +1976,6 @@ d_get_sarif_source_language (const char *)
#define LANG_HOOKS_HANDLE_OPTION d_handle_option
#define LANG_HOOKS_POST_OPTIONS d_post_options
#define LANG_HOOKS_PARSE_FILE d_parse_file
-#define LANG_HOOKS_COMMON_ATTRIBUTE_TABLE d_langhook_common_attribute_table
#define LANG_HOOKS_ATTRIBUTE_TABLE d_langhook_attribute_table
#define LANG_HOOKS_GET_ALIAS_SET d_get_alias_set
#define LANG_HOOKS_TYPES_COMPATIBLE_P d_types_compatible_p
diff --git a/gcc/d/d-tree.h b/gcc/d/d-tree.h
index d19c3f5..46a2873 100644
--- a/gcc/d/d-tree.h
+++ b/gcc/d/d-tree.h
@@ -520,8 +520,8 @@ extern tree insert_decl_attribute (tree, const char *, tree = NULL_TREE);
extern void apply_user_attributes (Dsymbol *, tree);
/* In d-builtins.cc. */
-extern const attribute_spec d_langhook_attribute_table[];
-extern const attribute_spec d_langhook_common_attribute_table[];
+extern const struct scoped_attribute_specs d_langhook_gnu_attribute_table;
+extern const struct scoped_attribute_specs d_langhook_common_attribute_table;
extern Type *build_frontend_type (tree);
extern tree d_builtin_function (tree);
diff --git a/gcc/df-scan.cc b/gcc/df-scan.cc
index 95157407..934c9ca 100644
--- a/gcc/df-scan.cc
+++ b/gcc/df-scan.cc
@@ -3720,6 +3720,16 @@ df_get_exit_block_use_set (bitmap exit_block_uses)
}
#endif
+#ifdef EH_RETURN_TAKEN_RTX
+ if ((!targetm.have_epilogue () || ! epilogue_completed)
+ && crtl->calls_eh_return)
+ {
+ rtx tmp = EH_RETURN_TAKEN_RTX;
+ if (tmp && REG_P (tmp))
+ df_mark_reg (tmp, exit_block_uses);
+ }
+#endif
+
if ((!targetm.have_epilogue () || ! epilogue_completed)
&& crtl->calls_eh_return)
{
diff --git a/gcc/diagnostic-core.h b/gcc/diagnostic-core.h
index 04eba3d..965c9e9 100644
--- a/gcc/diagnostic-core.h
+++ b/gcc/diagnostic-core.h
@@ -123,6 +123,12 @@ extern bool emit_diagnostic (diagnostic_t, rich_location *, int,
const char *, ...) ATTRIBUTE_GCC_DIAG(4,5);
extern bool emit_diagnostic_valist (diagnostic_t, location_t, int, const char *,
va_list *) ATTRIBUTE_GCC_DIAG (4,0);
+extern bool emit_diagnostic_valist (diagnostic_t,
+ rich_location *,
+ const diagnostic_metadata *,
+ int,
+ const char *,
+ va_list *) ATTRIBUTE_GCC_DIAG (5,0);
extern bool seen_error (void);
#ifdef BUFSIZ
diff --git a/gcc/diagnostic-format-sarif.cc b/gcc/diagnostic-format-sarif.cc
index 1bb7286..6777592 100644
--- a/gcc/diagnostic-format-sarif.cc
+++ b/gcc/diagnostic-format-sarif.cc
@@ -569,16 +569,20 @@ sarif_builder::make_result_object (diagnostic_context *context,
free (rule_id);
}
- /* "taxa" property (SARIF v2.1.0 section 3.27.8). */
if (diagnostic->metadata)
- if (int cwe_id = diagnostic->metadata->get_cwe ())
- {
- json::array *taxa_arr = new json::array ();
- json::object *cwe_id_obj
- = make_reporting_descriptor_reference_object_for_cwe_id (cwe_id);
- taxa_arr->append (cwe_id_obj);
- result_obj->set ("taxa", taxa_arr);
- }
+ {
+ /* "taxa" property (SARIF v2.1.0 section 3.27.8). */
+ if (int cwe_id = diagnostic->metadata->get_cwe ())
+ {
+ json::array *taxa_arr = new json::array ();
+ json::object *cwe_id_obj
+ = make_reporting_descriptor_reference_object_for_cwe_id (cwe_id);
+ taxa_arr->append (cwe_id_obj);
+ result_obj->set ("taxa", taxa_arr);
+ }
+
+ diagnostic->metadata->maybe_add_sarif_properties (*result_obj);
+ }
/* "level" property (SARIF v2.1.0 section 3.27.10). */
if (const char *sarif_level = maybe_get_sarif_level (diagnostic->kind))
diff --git a/gcc/diagnostic-metadata.h b/gcc/diagnostic-metadata.h
index 8e06c89..1af80fd 100644
--- a/gcc/diagnostic-metadata.h
+++ b/gcc/diagnostic-metadata.h
@@ -21,6 +21,8 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_DIAGNOSTIC_METADATA_H
#define GCC_DIAGNOSTIC_METADATA_H
+class sarif_object;
+
/* A bundle of additional metadata that can be associated with a
diagnostic.
@@ -63,6 +65,14 @@ class diagnostic_metadata
};
diagnostic_metadata () : m_cwe (0) {}
+ virtual ~diagnostic_metadata () {}
+
+ /* Hook for SARIF output to allow for adding diagnostic-specific
+ properties to the result object's property bag. */
+ virtual void
+ maybe_add_sarif_properties (sarif_object &/*result_obj*/) const
+ {
+ }
void add_cwe (int cwe) { m_cwe = cwe; }
int get_cwe () const { return m_cwe; }
diff --git a/gcc/diagnostic-show-locus.cc b/gcc/diagnostic-show-locus.cc
index 563d282..55e7166 100644
--- a/gcc/diagnostic-show-locus.cc
+++ b/gcc/diagnostic-show-locus.cc
@@ -1295,6 +1295,15 @@ layout::maybe_add_location_range (const location_range *loc_range,
sanely relative to the primary location. */
return false;
+ /* If there's no column information, then don't try to print
+ annotation lines for this range. */
+ enum range_display_kind range_display_kind
+ = loc_range->m_range_display_kind;
+ if (start.column == 0
+ || finish.column == 0
+ || caret.column == 0)
+ range_display_kind = SHOW_LINES_WITHOUT_RANGE;
+
/* Everything is now known to be in the correct source file,
but it may require further sanitization. */
layout_range ri (exploc_with_display_col (m_file_cache,
@@ -1303,7 +1312,7 @@ layout::maybe_add_location_range (const location_range *loc_range,
exploc_with_display_col (m_file_cache,
finish, m_policy,
LOCATION_ASPECT_FINISH),
- loc_range->m_range_display_kind,
+ range_display_kind,
exploc_with_display_col (m_file_cache,
caret, m_policy,
LOCATION_ASPECT_CARET),
@@ -3297,6 +3306,20 @@ test_one_liner_simple_caret ()
pp_formatted_text (dc.printer));
}
+/* No column information (column == 0).
+ No annotation line should be printed. */
+
+static void
+test_one_liner_no_column ()
+{
+ test_diagnostic_context dc;
+ location_t caret = linemap_position_for_column (line_table, 0);
+ rich_location richloc (line_table, caret);
+ diagnostic_show_locus (&dc, &richloc, DK_ERROR);
+ ASSERT_STREQ (" foo = bar.field;\n",
+ pp_formatted_text (dc.printer));
+}
+
/* Caret and range. */
static void
@@ -3848,6 +3871,7 @@ test_diagnostic_show_locus_one_liner (const line_table_case &case_)
ASSERT_EQ (16, LOCATION_COLUMN (line_end));
test_one_liner_simple_caret ();
+ test_one_liner_no_column ();
test_one_liner_caret_and_range ();
test_one_liner_multiple_carets_and_ranges ();
test_one_liner_fixit_insert_before ();
diff --git a/gcc/diagnostic.cc b/gcc/diagnostic.cc
index b4ebcd2..2e3d37b 100644
--- a/gcc/diagnostic.cc
+++ b/gcc/diagnostic.cc
@@ -558,14 +558,12 @@ maybe_line_and_column (int line, int col)
return result;
}
-/* Return a malloc'd string describing a location e.g. "foo.c:42:10".
- The caller is responsible for freeing the memory. */
+/* Return a string describing a location e.g. "foo.c:42:10". */
-static char *
-diagnostic_get_location_text (diagnostic_context *context,
- expanded_location s)
+label_text
+diagnostic_context::get_location_text (const expanded_location &s) const
{
- pretty_printer *pp = context->printer;
+ pretty_printer *pp = this->printer;
const char *locus_cs = colorize_start (pp_show_color (pp), "locus");
const char *locus_ce = colorize_stop (pp_show_color (pp));
const char *file = s.file ? s.file : progname;
@@ -574,13 +572,13 @@ diagnostic_get_location_text (diagnostic_context *context,
if (strcmp (file, special_fname_builtin ()))
{
line = s.line;
- if (context->m_show_column)
- col = context->converted_column (s);
+ if (m_show_column)
+ col = this->converted_column (s);
}
const char *line_col = maybe_line_and_column (line, col);
- return build_message_string ("%s%s%s:%s", locus_cs, file,
- line_col, locus_ce);
+ return label_text::take (build_message_string ("%s%s%s:%s", locus_cs, file,
+ line_col, locus_ce));
}
static const char *const diagnostic_kind_text[] = {
@@ -610,12 +608,11 @@ diagnostic_build_prefix (diagnostic_context *context,
text_ce = colorize_stop (pp_show_color (pp));
}
- expanded_location s = diagnostic_expand_location (diagnostic);
- char *location_text = diagnostic_get_location_text (context, s);
+ const expanded_location s = diagnostic_expand_location (diagnostic);
+ label_text location_text = context->get_location_text (s);
- char *result = build_message_string ("%s %s%s%s", location_text,
+ char *result = build_message_string ("%s %s%s%s", location_text.get (),
text_cs, text, text_ce);
- free (location_text);
return result;
}
@@ -1091,9 +1088,8 @@ void
default_diagnostic_start_span_fn (diagnostic_context *context,
expanded_location exploc)
{
- char *text = diagnostic_get_location_text (context, exploc);
- pp_string (context->printer, text);
- free (text);
+ label_text text = context->get_location_text (exploc);
+ pp_string (context->printer, text.get ());
pp_newline (context->printer);
}
@@ -1838,6 +1834,18 @@ emit_diagnostic_valist (diagnostic_t kind, location_t location, int opt,
return diagnostic_impl (&richloc, NULL, opt, gmsgid, ap, kind);
}
+/* As above, but with rich_location and metadata. */
+
+bool
+emit_diagnostic_valist (diagnostic_t kind,
+ rich_location *richloc,
+ const diagnostic_metadata *metadata,
+ int opt,
+ const char *gmsgid, va_list *ap)
+{
+ return diagnostic_impl (richloc, metadata, opt, gmsgid, ap, kind);
+}
+
/* An informative note at LOCATION. Use this for additional details on an error
message. */
void
@@ -2852,9 +2860,8 @@ assert_location_text (const char *expected_loc_text,
xloc.data = NULL;
xloc.sysp = false;
- char *actual_loc_text = diagnostic_get_location_text (&dc, xloc);
- ASSERT_STREQ (expected_loc_text, actual_loc_text);
- free (actual_loc_text);
+ label_text actual_loc_text = dc.get_location_text (xloc);
+ ASSERT_STREQ (expected_loc_text, actual_loc_text.get ());
}
/* Verify that diagnostic_get_location_text works as expected. */
diff --git a/gcc/diagnostic.h b/gcc/diagnostic.h
index cbd2554..b57556f 100644
--- a/gcc/diagnostic.h
+++ b/gcc/diagnostic.h
@@ -541,6 +541,8 @@ public:
return m_option_callbacks.m_lang_mask;
}
+ label_text get_location_text (const expanded_location &s) const;
+
private:
bool includes_seen_p (const line_map_ordinary *map);
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index 1ae589a..af782b3 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -77,6 +77,7 @@ extensions, accepted by GCC in C90 mode and in C++.
* Function Names:: Printable strings which are the name of the current
function.
* Return Address:: Getting the return or frame address of a function.
+* Stack Scrubbing:: Stack scrubbing internal interfaces.
* Vector Extensions:: Using vector instructions through built-in functions.
* Offsetof:: Special syntax for implementing @code{offsetof}.
* __sync Builtins:: Legacy built-in functions for atomic memory access.
@@ -4204,9 +4205,8 @@ pointers. In the latter case, any function used as an initializer of
such a callback field will be treated as being called with tainted
arguments.
-The analyzer will pay particular attention to such functions when both
-@option{-fanalyzer} and @option{-fanalyzer-checker=taint} are supplied,
-potentially issuing warnings guarded by
+The analyzer will pay particular attention to such functions when
+@option{-fanalyzer} is supplied, potentially issuing warnings guarded by
@option{-Wanalyzer-tainted-allocation-size},
@option{-Wanalyzer-tainted-array-index},
@option{-Wanalyzer-tainted-divisor},
@@ -8962,6 +8962,71 @@ initialization will result in future breakage.
GCC emits warnings based on this attribute by default; use
@option{-Wno-designated-init} to suppress them.
+@cindex @code{hardbool} type attribute
+@item hardbool
+@itemx hardbool (@var{false_value})
+@itemx hardbool (@var{false_value}, @var{true_value})
+This attribute may only be applied to integral types in C, to introduce
+hardened boolean types. It turns the integral type into a boolean-like
+type with the same size and precision, that uses the specified values as
+representations for @code{false} and @code{true}. Underneath, it is
+actually an enumerate type, but its observable behavior is like that of
+@code{_Bool}, except for the strict internal representations, verified
+by runtime checks.
+
+If @var{true_value} is omitted, the bitwise negation of
+@var{false_value} is used. If @var{false_value} is omitted, zero is
+used. The named representation values must be different when converted
+to the original integral type. Narrower bitfields are rejected if the
+representations become indistinguishable.
+
+Values of such types automatically decay to @code{_Bool}, at which
+point, the selected representation values are mapped to the
+corresponding @code{_Bool} values. When the represented value is not
+determined, at compile time, to be either @var{false_value} or
+@var{true_value}, runtime verification calls @code{__builtin_trap} if it
+is neither. This is what makes them hardened boolean types.
+
+When converting scalar types to such hardened boolean types, implicitly
+or explicitly, behavior corresponds to a conversion to @code{_Bool},
+followed by a mapping from @code{false} and @code{true} to
+@var{false_value} and @var{true_value}, respectively.
+
+@smallexample
+typedef char __attribute__ ((__hardbool__ (0x5a))) hbool;
+hbool first = 0; /* False, stored as (char)0x5a. */
+hbool second = !first; /* True, stored as ~(char)0x5a. */
+
+static hbool zeroinit; /* False, stored as (char)0x5a. */
+auto hbool uninit; /* Undefined, may trap. */
+@end smallexample
+
+When zero-initializing a variable or field of hardened boolean type
+(presumably held in static storage) the implied zero initializer gets
+converted to @code{_Bool}, and then to the hardened boolean type, so
+that the initial value is the hardened representation for @code{false}.
+Using that value is well defined. This is @emph{not} the case when
+variables and fields of such types are uninitialized (presumably held in
+automatic or dynamic storage): their values are indeterminate, and using
+them invokes undefined behavior. Using them may trap or not, depending
+on the bits held in the storage (re)used for the variable, if any, and
+on optimizations the compiler may perform on the grounds that using
+uninitialized values invokes undefined behavior.
+
+Users of @option{-ftrivial-auto-var-init} should be aware that the bit
+patterns used as initializers are @emph{not} converted to
+@code{hardbool} types, so using a @code{hardbool} variable that is
+implicitly initialized by the @option{-ftrivial-auto-var-init} may trap
+if the representations values chosen for @code{false} and @code{true} do
+not match the initializer.
+
+Since this is a language extension only available in C, interoperation
+with other languages may pose difficulties. It should interoperate with
+Ada Booleans defined with the same size and equivalent representation
+clauses, and with enumerations or other languages' integral types that
+correspond to C's chosen integral type.
+
+
@cindex @code{may_alias} type attribute
@item may_alias
Accesses through pointers to types with this attribute are not subject
@@ -9161,6 +9226,268 @@ pid_t wait (wait_status_ptr_t p)
@}
@end smallexample
+@cindex @code{strub} type attribute
+@item strub
+This attribute defines stack-scrubbing properties of functions and
+variables, so that functions that access sensitive data can have their
+stack frames zeroed-out upon returning or propagating exceptions. This
+may be enabled explicitly, by selecting certain @code{strub} modes for
+specific functions, or implicitly, by means of @code{strub} variables.
+
+Being a type attribute, it attaches to types, even when specified in
+function and variable declarations. When applied to function types, it
+takes an optional string argument. When applied to a
+pointer-to-function type, if the optional argument is given, it gets
+propagated to the function type.
+
+@smallexample
+/* A strub variable. */
+int __attribute__ ((strub)) var;
+/* A strub variable that happens to be a pointer. */
+__attribute__ ((strub)) int *strub_ptr_to_int;
+/* A pointer type that may point to a strub variable. */
+typedef int __attribute__ ((strub)) *ptr_to_strub_int_type;
+
+/* A declaration of a strub function. */
+extern int __attribute__ ((strub)) foo (void);
+/* A pointer to that strub function. */
+int __attribute__ ((strub ("at-calls"))) (*ptr_to_strub_fn)(void) = foo;
+@end smallexample
+
+A function associated with @code{at-calls} @code{strub} mode
+(@code{strub("at-calls")}, or just @code{strub}) undergoes interface
+changes. Its callers are adjusted to match the changes, and to scrub
+(overwrite with zeros) the stack space used by the called function after
+it returns. The interface change makes the function type incompatible
+with an unadorned but otherwise equivalent type, so @emph{every}
+declaration and every type that may be used to call the function must be
+associated with this strub mode.
+
+A function associated with @code{internal} @code{strub} mode
+(@code{strub("internal")}) retains an unmodified, type-compatible
+interface, but it may be turned into a wrapper that calls the wrapped
+body using a custom interface. The wrapper then scrubs the stack space
+used by the wrapped body. Though the wrapped body has its stack space
+scrubbed, the wrapper does not, so arguments and return values may
+remain unscrubbed even when such a function is called by another
+function that enables @code{strub}. This is why, when compiling with
+@option{-fstrub=strict}, a @code{strub} context is not allowed to call
+@code{internal} @code{strub} functions.
+
+@smallexample
+/* A declaration of an internal-strub function. */
+extern int __attribute__ ((strub ("internal"))) bar (void);
+
+int __attribute__ ((strub))
+baz (void)
+@{
+ /* Ok, foo was declared above as an at-calls strub function. */
+ foo ();
+ /* Not allowed in strict mode, otherwise allowed. */
+ bar ();
+@}
+@end smallexample
+
+An automatically-allocated variable associated with the @code{strub}
+attribute causes the (immediately) enclosing function to have
+@code{strub} enabled.
+
+A statically-allocated variable associated with the @code{strub}
+attribute causes functions that @emph{read} it, through its @code{strub}
+data type, to have @code{strub} enabled. Reading data by dereferencing
+a pointer to a @code{strub} data type has the same effect. Note: The
+attribute does not carry over from a composite type to the types of its
+components, so the intended effect may not be obtained with non-scalar
+types.
+
+When selecting a @code{strub}-enabled mode for a function that is not
+explicitly associated with one, because of @code{strub} variables or
+data pointers, the function must satisfy @code{internal} mode viability
+requirements (see below), even when @code{at-calls} mode is also viable
+and, being more efficient, ends up selected as an optimization.
+
+@smallexample
+/* zapme is implicitly strub-enabled because of strub variables.
+ Optimization may change its strub mode, but not the requirements. */
+static int
+zapme (int i)
+@{
+ /* A local strub variable enables strub. */
+ int __attribute__ ((strub)) lvar;
+ /* Reading strub data through a pointer-to-strub enables strub. */
+ lvar = * (ptr_to_strub_int_type) &i;
+ /* Writing to a global strub variable does not enable strub. */
+ var = lvar;
+ /* Reading from a global strub variable enables strub. */
+ return var;
+@}
+@end smallexample
+
+A @code{strub} context is the body (as opposed to the interface) of a
+function that has @code{strub} enabled, be it explicitly, by
+@code{at-calls} or @code{internal} mode, or implicitly, due to
+@code{strub} variables or command-line options.
+
+A function of a type associated with the @code{disabled} @code{strub}
+mode (@code{strub("disabled")} will not have its own stack space
+scrubbed. Such functions @emph{cannot} be called from within
+@code{strub} contexts.
+
+In order to enable a function to be called from within @code{strub}
+contexts without having its stack space scrubbed, associate it with the
+@code{callable} @code{strub} mode (@code{strub("callable")}).
+
+When a function is not assigned a @code{strub} mode, explicitly or
+implicitly, the mode defaults to @code{callable}, except when compiling
+with @option{-fstrub=strict}, that causes @code{strub} mode to default
+to @code{disabled}.
+
+@example
+extern int __attribute__ ((strub ("callable"))) bac (void);
+extern int __attribute__ ((strub ("disabled"))) bad (void);
+ /* Implicitly disabled with -fstrub=strict, otherwise callable. */
+extern int bah (void);
+
+int __attribute__ ((strub))
+bal (void)
+@{
+ /* Not allowed, bad is not strub-callable. */
+ bad ();
+ /* Ok, bac is strub-callable. */
+ bac ();
+ /* Not allowed with -fstrub=strict, otherwise allowed. */
+ bah ();
+@}
+@end example
+
+Function types marked @code{callable} and @code{disabled} are not
+mutually compatible types, but the underlying interfaces are compatible,
+so it is safe to convert pointers between them, and to use such pointers
+or alternate declarations to call them. Interfaces are also
+interchangeable between them and @code{internal} (but not
+@code{at-calls}!), but adding @code{internal} to a pointer type will not
+cause the pointed-to function to perform stack scrubbing.
+
+@example
+void __attribute__ ((strub))
+bap (void)
+@{
+ /* Assign a callable function to pointer-to-disabled.
+ Flagged as not quite compatible with -Wpedantic. */
+ int __attribute__ ((strub ("disabled"))) (*d_p) (void) = bac;
+ /* Not allowed: calls disabled type in a strub context. */
+ d_p ();
+
+ /* Assign a disabled function to pointer-to-callable.
+ Flagged as not quite compatible with -Wpedantic. */
+ int __attribute__ ((strub ("callable"))) (*c_p) (void) = bad;
+ /* Ok, safe. */
+ c_p ();
+
+ /* Assign an internal function to pointer-to-callable.
+ Flagged as not quite compatible with -Wpedantic. */
+ c_p = bar;
+ /* Ok, safe. */
+ c_p ();
+
+ /* Assign an at-calls function to pointer-to-callable.
+ Flaggged as incompatible. */
+ c_p = bal;
+ /* The call through an interface-incompatible type will not use the
+ modified interface expected by the at-calls function, so it is
+ likely to misbehave at runtime. */
+ c_p ();
+@}
+@end example
+
+@code{Strub} contexts are never inlined into non-@code{strub} contexts.
+When an @code{internal}-strub function is split up, the wrapper can
+often be inlined, but the wrapped body @emph{never} is. A function
+marked as @code{always_inline}, even if explicitly assigned
+@code{internal} strub mode, will not undergo wrapping, so its body gets
+inlined as required.
+
+@example
+inline int __attribute__ ((strub ("at-calls")))
+inl_atc (void)
+@{
+ /* This body may get inlined into strub contexts. */
+@}
+
+inline int __attribute__ ((strub ("internal")))
+inl_int (void)
+@{
+ /* This body NEVER gets inlined, though its wrapper may. */
+@}
+
+inline int __attribute__ ((strub ("internal"), always_inline))
+inl_int_ali (void)
+@{
+ /* No internal wrapper, so this body ALWAYS gets inlined,
+ but it cannot be called from non-strub contexts. */
+@}
+
+void __attribute__ ((strub ("disabled")))
+bat (void)
+@{
+ /* Not allowed, cannot inline into a non-strub context. */
+ inl_int_ali ();
+@}
+@end example
+
+@cindex strub eligibility and viability
+Some @option{-fstrub=*} command line options enable @code{strub} modes
+implicitly where viable. A @code{strub} mode is only viable for a
+function if the function is eligible for that mode, and if other
+conditions, detailed below, are satisfied. If it's not eligible for a
+mode, attempts to explicitly associate it with that mode are rejected
+with an error message. If it is eligible, that mode may be assigned
+explicitly through this attribute, but implicit assignment through
+command-line options may involve additional viability requirements.
+
+A function is ineligible for @code{at-calls} @code{strub} mode if a
+different @code{strub} mode is explicitly requested, if attribute
+@code{noipa} is present, or if it calls @code{__builtin_apply_args}.
+@code{At-calls} @code{strub} mode, if not requested through the function
+type, is only viable for an eligible function if the function is not
+visible to other translation units, if it doesn't have its address
+taken, and if it is never called with a function type overrider.
+
+@smallexample
+/* bar is eligible for at-calls strub mode,
+ but not viable for that mode because it is visible to other units.
+ It is eligible and viable for internal strub mode. */
+void bav () @{@}
+
+/* setp is eligible for at-calls strub mode,
+ but not viable for that mode because its address is taken.
+ It is eligible and viable for internal strub mode. */
+void setp (void) @{ static void (*p)(void); = setp; @}
+@end smallexample
+
+A function is ineligible for @code{internal} @code{strub} mode if a
+different @code{strub} mode is explicitly requested, or if attribute
+@code{noipa} is present. For an @code{always_inline} function, meeting
+these requirements is enough to make it eligible. Any function that has
+attribute @code{noclone}, that uses such extensions as non-local labels,
+computed gotos, alternate variable argument passing interfaces,
+@code{__builtin_next_arg}, or @code{__builtin_return_address}, or that
+takes too many (about 64Ki) arguments is ineligible, unless it is
+@code{always_inline}. For @code{internal} @code{strub} mode, all
+eligible functions are viable.
+
+@smallexample
+/* flop is not eligible, thus not viable, for at-calls strub mode.
+ Likewise for internal strub mode. */
+__attribute__ ((noipa)) void flop (void) @{@}
+
+/* flip is eligible and viable for at-calls strub mode.
+ It would be ineligible for internal strub mode, because of noclone,
+ if it weren't for always_inline. With always_inline, noclone is not
+ an obstacle, so it is also eligible and viable for internal strub mode. */
+inline __attribute__ ((noclone, always_inline)) void flip (void) @{@}
+@end smallexample
+
@cindex @code{unused} type attribute
@item unused
When attached to a type (including a @code{union} or a @code{struct}),
@@ -12311,6 +12638,55 @@ option is in effect. Such calls should only be made in debugging
situations.
@enddefbuiltin
+@deftypefn {Built-in Function} {void *} __builtin_stack_address ()
+This function returns the value of the stack pointer register.
+@end deftypefn
+
+@node Stack Scrubbing
+@section Stack scrubbing internal interfaces
+
+Stack scrubbing involves cooperation between a @code{strub} context,
+i.e., a function whose stack frame is to be zeroed-out, and its callers.
+The caller initializes a stack watermark, the @code{strub} context
+updates the watermark according to its stack use, and the caller zeroes
+it out once it regains control, whether by the callee's returning or by
+an exception.
+
+Each of these steps is performed by a different builtin function call.
+Calls to these builtins are introduced automatically, in response to
+@code{strub} attributes and command-line options; they are not expected
+to be explicitly called by source code.
+
+The functions that implement the builtins are available in libgcc but,
+depending on optimization levels, they are expanded internally, adjusted
+to account for inlining, and sometimes combined/deferred (e.g. passing
+the caller-supplied watermark on to callees, refraining from erasing
+stack areas that the caller will) to enable tail calls and to optimize
+for code size.
+
+@deftypefn {Built-in Function} {void} __builtin___strub_enter (void **@var{wmptr})
+This function initializes a stack @var{watermark} variable with the
+current top of the stack. A call to this builtin function is introduced
+before entering a @code{strub} context. It remains as a function call
+if optimization is not enabled.
+@end deftypefn
+
+@deftypefn {Built-in Function} {void} __builtin___strub_update (void **@var{wmptr})
+This function updates a stack @var{watermark} variable with the current
+top of the stack, if it tops the previous watermark. A call to this
+builtin function is inserted within @code{strub} contexts, whenever
+additional stack space may have been used. It remains as a function
+call at optimization levels lower than 2.
+@end deftypefn
+
+@deftypefn {Built-in Function} {void} __builtin___strub_leave (void **@var{wmptr})
+This function overwrites the memory area between the current top of the
+stack, and the @var{watermark}ed address. A call to this builtin
+function is inserted after leaving a @code{strub} context. It remains
+as a function call at optimization levels lower than 3, and it is guarded by
+a condition at level 2.
+@end deftypefn
+
@node Vector Extensions
@section Using Vector Instructions through Built-in Functions
@@ -13173,9 +13549,9 @@ after addition, conditional jump on carry etc.
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_addc (unsigned int a, unsigned int b, unsigned int carry_in, unsigned int *carry_out)}
-@defbuiltinx{unsigned long int __builtin_addcl (unsigned long int a, unsigned long int b, unsigned int carry_in, unsigned long int *carry_out)}
-@defbuiltinx{unsigned long long int __builtin_addcll (unsigned long long int a, unsigned long long int b, unsigned long long int carry_in, unsigned long long int *carry_out)}
+@defbuiltin{{unsigned int} __builtin_addc (unsigned int a, unsigned int b, unsigned int carry_in, unsigned int *carry_out)}
+@defbuiltinx{{unsigned long int} __builtin_addcl (unsigned long int a, unsigned long int b, unsigned int carry_in, unsigned long int *carry_out)}
+@defbuiltinx{{unsigned long long int} __builtin_addcll (unsigned long long int a, unsigned long long int b, unsigned long long int carry_in, unsigned long long int *carry_out)}
These built-in functions are equivalent to:
@smallexample
@@ -13195,9 +13571,9 @@ emitted if one of them (preferrably the third one) has only values
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_subc (unsigned int a, unsigned int b, unsigned int carry_in, unsigned int *carry_out)}
-@defbuiltinx{unsigned long int __builtin_subcl (unsigned long int a, unsigned long int b, unsigned int carry_in, unsigned long int *carry_out)}
-@defbuiltinx{unsigned long long int __builtin_subcll (unsigned long long int a, unsigned long long int b, unsigned long long int carry_in, unsigned long long int *carry_out)}
+@defbuiltin{{unsigned int} __builtin_subc (unsigned int a, unsigned int b, unsigned int carry_in, unsigned int *carry_out)}
+@defbuiltinx{{unsigned long int} __builtin_subcl (unsigned long int a, unsigned long int b, unsigned int carry_in, unsigned long int *carry_out)}
+@defbuiltinx{{unsigned long long int} __builtin_subcll (unsigned long long int a, unsigned long long int b, unsigned long long int carry_in, unsigned long long int *carry_out)}
These built-in functions are equivalent to:
@smallexample
@@ -15102,7 +15478,7 @@ where @var{prec} is bit width of @var{type}, except that side-effects
in @var{arg} are evaluated just once.
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_bit_width (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_bit_width (@var{type} @var{arg})}
The @code{__builtin_stdc_bit_width} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15111,7 +15487,7 @@ performed on the argument. It is equivalent to
where @var{prec} is bit width of @var{type}.
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_count_ones (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_count_ones (@var{type} @var{arg})}
The @code{__builtin_stdc_count_ones} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15119,7 +15495,7 @@ performed on the argument. It is equivalent to
@code{(unsigned int) __builtin_popcountg (@var{arg})}
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_count_zeros (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_count_zeros (@var{type} @var{arg})}
The @code{__builtin_stdc_count_zeros} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15127,7 +15503,7 @@ performed on the argument. It is equivalent to
@code{(unsigned int) __builtin_popcountg ((@var{type}) ~@var{arg})}
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_first_leading_one (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_first_leading_one (@var{type} @var{arg})}
The @code{__builtin_stdc_first_leading_one} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15135,7 +15511,7 @@ performed on the argument. It is equivalent to
@code{__builtin_clzg (@var{arg}, -1) + 1U}
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_first_leading_zero (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_first_leading_zero (@var{type} @var{arg})}
The @code{__builtin_stdc_first_leading_zero} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15143,7 +15519,7 @@ performed on the argument. It is equivalent to
@code{__builtin_clzg ((@var{type}) ~@var{arg}, -1) + 1U}
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_first_trailing_one (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_first_trailing_one (@var{type} @var{arg})}
The @code{__builtin_stdc_first_trailing_one} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15151,7 +15527,7 @@ performed on the argument. It is equivalent to
@code{__builtin_ctzg (@var{arg}, -1) + 1U}
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_first_trailing_zero (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_first_trailing_zero (@var{type} @var{arg})}
The @code{__builtin_stdc_first_trailing_zero} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15159,7 +15535,7 @@ performed on the argument. It is equivalent to
@code{__builtin_ctzg ((@var{type}) ~@var{arg}, -1) + 1U}
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_has_single_bit (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_has_single_bit (@var{type} @var{arg})}
The @code{__builtin_stdc_has_single_bit} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15167,7 +15543,7 @@ performed on the argument. It is equivalent to
@code{(_Bool) (__builtin_popcountg (@var{arg}) == 1)}
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_leading_ones (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_leading_ones (@var{type} @var{arg})}
The @code{__builtin_stdc_leading_ones} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15175,7 +15551,7 @@ performed on the argument. It is equivalent to
@code{(unsigned int) __builtin_clzg ((@var{type}) ~@var{arg}, @var{prec})}
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_leading_zeros (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_leading_zeros (@var{type} @var{arg})}
The @code{__builtin_stdc_leading_zeros} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15183,7 +15559,7 @@ performed on the argument. It is equivalent to
@code{(unsigned int) __builtin_clzg (@var{arg}, @var{prec})}
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_trailing_ones (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_trailing_ones (@var{type} @var{arg})}
The @code{__builtin_stdc_trailing_ones} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15191,7 +15567,7 @@ performed on the argument. It is equivalent to
@code{(unsigned int) __builtin_ctzg ((@var{type}) ~@var{arg}, @var{prec})}
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_stdc_trailing_zeros (@var{type} @var{arg})}
+@defbuiltin{{unsigned int} __builtin_stdc_trailing_zeros (@var{type} @var{arg})}
The @code{__builtin_stdc_trailing_zeros} function is available only
in C. It is type-generic, the argument can be any unsigned integer
(standard, extended or bit-precise). No integral argument promotions are
@@ -15268,6 +15644,8 @@ instructions, but allow the compiler to schedule those calls.
* BPF Built-in Functions::
* FR-V Built-in Functions::
* LoongArch Base Built-in Functions::
+* LoongArch SX Vector Intrinsics::
+* LoongArch ASX Vector Intrinsics::
* MIPS DSP Built-in Functions::
* MIPS Paired-Single Support::
* MIPS Loongson Built-in Functions::
@@ -17052,6 +17430,1666 @@ Returns the value that is currently set in the @samp{tp} register.
void * __builtin_thread_pointer (void)
@end smallexample
+@node LoongArch SX Vector Intrinsics
+@subsection LoongArch SX Vector Intrinsics
+
+GCC provides intrinsics to access the LSX (Loongson SIMD Extension) instructions.
+The interface is made available by including @code{<lsxintrin.h>} and using
+@option{-mlsx}.
+
+The following vectors typedefs are included in @code{lsxintrin.h}:
+
+@itemize
+@item @code{__m128i}, a 128-bit vector of fixed point;
+@item @code{__m128}, a 128-bit vector of single precision floating point;
+@item @code{__m128d}, a 128-bit vector of double precision floating point.
+@end itemize
+
+Instructions and corresponding built-ins may have additional restrictions and/or
+input/output values manipulated:
+@itemize
+@item @code{imm0_1}, an integer literal in range 0 to 1;
+@item @code{imm0_3}, an integer literal in range 0 to 3;
+@item @code{imm0_7}, an integer literal in range 0 to 7;
+@item @code{imm0_15}, an integer literal in range 0 to 15;
+@item @code{imm0_31}, an integer literal in range 0 to 31;
+@item @code{imm0_63}, an integer literal in range 0 to 63;
+@item @code{imm0_127}, an integer literal in range 0 to 127;
+@item @code{imm0_255}, an integer literal in range 0 to 255;
+@item @code{imm_n16_15}, an integer literal in range -16 to 15;
+@item @code{imm_n128_127}, an integer literal in range -128 to 127;
+@item @code{imm_n256_255}, an integer literal in range -256 to 255;
+@item @code{imm_n512_511}, an integer literal in range -512 to 511;
+@item @code{imm_n1024_1023}, an integer literal in range -1024 to 1023;
+@item @code{imm_n2048_2047}, an integer literal in range -2048 to 2047.
+@end itemize
+
+For convenience, GCC defines functions @code{__lsx_vrepli_@{b/h/w/d@}} and
+@code{__lsx_b[n]z_@{v/b/h/w/d@}}, which are implemented as follows:
+
+@smallexample
+a. @code{__lsx_vrepli_@{b/h/w/d@}}: Implemented the case where the highest
+ bit of @code{vldi} instruction @code{i13} is 1.
+
+ i13[12] == 1'b0
+ case i13[11:10] of :
+ 2'b00: __lsx_vrepli_b (imm_n512_511)
+ 2'b01: __lsx_vrepli_h (imm_n512_511)
+ 2'b10: __lsx_vrepli_w (imm_n512_511)
+ 2'b11: __lsx_vrepli_d (imm_n512_511)
+
+b. @code{__lsx_b[n]z_@{v/b/h/w/d@}}: Since the @code{vseteqz} class directive
+ cannot be used on its own, this function is defined.
+
+ _lsx_bz_v => vseteqz.v + bcnez
+ _lsx_bnz_v => vsetnez.v + bcnez
+ _lsx_bz_b => vsetanyeqz.b + bcnez
+ _lsx_bz_h => vsetanyeqz.h + bcnez
+ _lsx_bz_w => vsetanyeqz.w + bcnez
+ _lsx_bz_d => vsetanyeqz.d + bcnez
+ _lsx_bnz_b => vsetallnez.b + bcnez
+ _lsx_bnz_h => vsetallnez.h + bcnez
+ _lsx_bnz_w => vsetallnez.w + bcnez
+ _lsx_bnz_d => vsetallnez.d + bcnez
+@end smallexample
+
+@smallexample
+eg:
+ #include <lsxintrin.h>
+
+ extern __m128i @var{a};
+
+ void
+ test (void)
+ @{
+ if (__lsx_bz_v (@var{a}))
+ printf ("1\n");
+ else
+ printf ("2\n");
+ @}
+@end smallexample
+
+@emph{Note:} For directives where the intent operand is also the source operand
+(modifying only part of the bitfield of the intent register), the first parameter
+in the builtin call function is used as the intent operand.
+
+@smallexample
+eg:
+ #include <lsxintrin.h>
+
+ extern __m128i @var{dst};
+ extern int @var{src};
+
+ void
+ test (void)
+ @{
+ @var{dst} = __lsx_vinsgr2vr_b (@var{dst}, @var{src}, 3);
+ @}
+@end smallexample
+
+The intrinsics provided are listed below:
+@smallexample
+int __lsx_bnz_b (__m128i);
+int __lsx_bnz_d (__m128i);
+int __lsx_bnz_h (__m128i);
+int __lsx_bnz_v (__m128i);
+int __lsx_bnz_w (__m128i);
+int __lsx_bz_b (__m128i);
+int __lsx_bz_d (__m128i);
+int __lsx_bz_h (__m128i);
+int __lsx_bz_v (__m128i);
+int __lsx_bz_w (__m128i);
+__m128i __lsx_vabsd_b (__m128i, __m128i);
+__m128i __lsx_vabsd_bu (__m128i, __m128i);
+__m128i __lsx_vabsd_di (__m128i, __m128i);
+__m128i __lsx_vabsd_du (__m128i, __m128i);
+__m128i __lsx_vabsd_h (__m128i, __m128i);
+__m128i __lsx_vabsd_hu (__m128i, __m128i);
+__m128i __lsx_vabsd_w (__m128i, __m128i);
+__m128i __lsx_vabsd_wu (__m128i, __m128i);
+__m128i __lsx_vadda_b (__m128i, __m128i);
+__m128i __lsx_vadda_d (__m128i, __m128i);
+__m128i __lsx_vadda_h (__m128i, __m128i);
+__m128i __lsx_vadda_w (__m128i, __m128i);
+__m128i __lsx_vadd_b (__m128i, __m128i);
+__m128i __lsx_vadd_d (__m128i, __m128i);
+__m128i __lsx_vadd_h (__m128i, __m128i);
+__m128i __lsx_vaddi_bu (__m128i, imm0_31);
+__m128i __lsx_vaddi_du (__m128i, imm0_31);
+__m128i __lsx_vaddi_hu (__m128i, imm0_31);
+__m128i __lsx_vaddi_wu (__m128i, imm0_31);
+__m128i __lsx_vadd_q (__m128i, __m128i);
+__m128i __lsx_vadd_w (__m128i, __m128i);
+__m128i __lsx_vaddwev_d_w (__m128i, __m128i);
+__m128i __lsx_vaddwev_d_wu (__m128i, __m128i);
+__m128i __lsx_vaddwev_d_wu_w (__m128i, __m128i);
+__m128i __lsx_vaddwev_h_b (__m128i, __m128i);
+__m128i __lsx_vaddwev_h_bu (__m128i, __m128i);
+__m128i __lsx_vaddwev_h_bu_b (__m128i, __m128i);
+__m128i __lsx_vaddwev_q_d (__m128i, __m128i);
+__m128i __lsx_vaddwev_q_du (__m128i, __m128i);
+__m128i __lsx_vaddwev_q_du_d (__m128i, __m128i);
+__m128i __lsx_vaddwev_w_h (__m128i, __m128i);
+__m128i __lsx_vaddwev_w_hu (__m128i, __m128i);
+__m128i __lsx_vaddwev_w_hu_h (__m128i, __m128i);
+__m128i __lsx_vaddwod_d_w (__m128i, __m128i);
+__m128i __lsx_vaddwod_d_wu (__m128i, __m128i);
+__m128i __lsx_vaddwod_d_wu_w (__m128i, __m128i);
+__m128i __lsx_vaddwod_h_b (__m128i, __m128i);
+__m128i __lsx_vaddwod_h_bu (__m128i, __m128i);
+__m128i __lsx_vaddwod_h_bu_b (__m128i, __m128i);
+__m128i __lsx_vaddwod_q_d (__m128i, __m128i);
+__m128i __lsx_vaddwod_q_du (__m128i, __m128i);
+__m128i __lsx_vaddwod_q_du_d (__m128i, __m128i);
+__m128i __lsx_vaddwod_w_h (__m128i, __m128i);
+__m128i __lsx_vaddwod_w_hu (__m128i, __m128i);
+__m128i __lsx_vaddwod_w_hu_h (__m128i, __m128i);
+__m128i __lsx_vandi_b (__m128i, imm0_255);
+__m128i __lsx_vandn_v (__m128i, __m128i);
+__m128i __lsx_vand_v (__m128i, __m128i);
+__m128i __lsx_vavg_b (__m128i, __m128i);
+__m128i __lsx_vavg_bu (__m128i, __m128i);
+__m128i __lsx_vavg_d (__m128i, __m128i);
+__m128i __lsx_vavg_du (__m128i, __m128i);
+__m128i __lsx_vavg_h (__m128i, __m128i);
+__m128i __lsx_vavg_hu (__m128i, __m128i);
+__m128i __lsx_vavgr_b (__m128i, __m128i);
+__m128i __lsx_vavgr_bu (__m128i, __m128i);
+__m128i __lsx_vavgr_d (__m128i, __m128i);
+__m128i __lsx_vavgr_du (__m128i, __m128i);
+__m128i __lsx_vavgr_h (__m128i, __m128i);
+__m128i __lsx_vavgr_hu (__m128i, __m128i);
+__m128i __lsx_vavgr_w (__m128i, __m128i);
+__m128i __lsx_vavgr_wu (__m128i, __m128i);
+__m128i __lsx_vavg_w (__m128i, __m128i);
+__m128i __lsx_vavg_wu (__m128i, __m128i);
+__m128i __lsx_vbitclr_b (__m128i, __m128i);
+__m128i __lsx_vbitclr_d (__m128i, __m128i);
+__m128i __lsx_vbitclr_h (__m128i, __m128i);
+__m128i __lsx_vbitclri_b (__m128i, imm0_7);
+__m128i __lsx_vbitclri_d (__m128i, imm0_63);
+__m128i __lsx_vbitclri_h (__m128i, imm0_15);
+__m128i __lsx_vbitclri_w (__m128i, imm0_31);
+__m128i __lsx_vbitclr_w (__m128i, __m128i);
+__m128i __lsx_vbitrev_b (__m128i, __m128i);
+__m128i __lsx_vbitrev_d (__m128i, __m128i);
+__m128i __lsx_vbitrev_h (__m128i, __m128i);
+__m128i __lsx_vbitrevi_b (__m128i, imm0_7);
+__m128i __lsx_vbitrevi_d (__m128i, imm0_63);
+__m128i __lsx_vbitrevi_h (__m128i, imm0_15);
+__m128i __lsx_vbitrevi_w (__m128i, imm0_31);
+__m128i __lsx_vbitrev_w (__m128i, __m128i);
+__m128i __lsx_vbitseli_b (__m128i, __m128i, imm0_255);
+__m128i __lsx_vbitsel_v (__m128i, __m128i, __m128i);
+__m128i __lsx_vbitset_b (__m128i, __m128i);
+__m128i __lsx_vbitset_d (__m128i, __m128i);
+__m128i __lsx_vbitset_h (__m128i, __m128i);
+__m128i __lsx_vbitseti_b (__m128i, imm0_7);
+__m128i __lsx_vbitseti_d (__m128i, imm0_63);
+__m128i __lsx_vbitseti_h (__m128i, imm0_15);
+__m128i __lsx_vbitseti_w (__m128i, imm0_31);
+__m128i __lsx_vbitset_w (__m128i, __m128i);
+__m128i __lsx_vbsll_v (__m128i, imm0_31);
+__m128i __lsx_vbsrl_v (__m128i, imm0_31);
+__m128i __lsx_vclo_b (__m128i);
+__m128i __lsx_vclo_d (__m128i);
+__m128i __lsx_vclo_h (__m128i);
+__m128i __lsx_vclo_w (__m128i);
+__m128i __lsx_vclz_b (__m128i);
+__m128i __lsx_vclz_d (__m128i);
+__m128i __lsx_vclz_h (__m128i);
+__m128i __lsx_vclz_w (__m128i);
+__m128i __lsx_vdiv_b (__m128i, __m128i);
+__m128i __lsx_vdiv_bu (__m128i, __m128i);
+__m128i __lsx_vdiv_d (__m128i, __m128i);
+__m128i __lsx_vdiv_du (__m128i, __m128i);
+__m128i __lsx_vdiv_h (__m128i, __m128i);
+__m128i __lsx_vdiv_hu (__m128i, __m128i);
+__m128i __lsx_vdiv_w (__m128i, __m128i);
+__m128i __lsx_vdiv_wu (__m128i, __m128i);
+__m128i __lsx_vexth_du_wu (__m128i);
+__m128i __lsx_vexth_d_w (__m128i);
+__m128i __lsx_vexth_h_b (__m128i);
+__m128i __lsx_vexth_hu_bu (__m128i);
+__m128i __lsx_vexth_q_d (__m128i);
+__m128i __lsx_vexth_qu_du (__m128i);
+__m128i __lsx_vexth_w_h (__m128i);
+__m128i __lsx_vexth_wu_hu (__m128i);
+__m128i __lsx_vextl_q_d (__m128i);
+__m128i __lsx_vextl_qu_du (__m128i);
+__m128i __lsx_vextrins_b (__m128i, __m128i, imm0_255);
+__m128i __lsx_vextrins_d (__m128i, __m128i, imm0_255);
+__m128i __lsx_vextrins_h (__m128i, __m128i, imm0_255);
+__m128i __lsx_vextrins_w (__m128i, __m128i, imm0_255);
+__m128d __lsx_vfadd_d (__m128d, __m128d);
+__m128 __lsx_vfadd_s (__m128, __m128);
+__m128i __lsx_vfclass_d (__m128d);
+__m128i __lsx_vfclass_s (__m128);
+__m128i __lsx_vfcmp_caf_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_caf_s (__m128, __m128);
+__m128i __lsx_vfcmp_ceq_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_ceq_s (__m128, __m128);
+__m128i __lsx_vfcmp_cle_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_cle_s (__m128, __m128);
+__m128i __lsx_vfcmp_clt_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_clt_s (__m128, __m128);
+__m128i __lsx_vfcmp_cne_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_cne_s (__m128, __m128);
+__m128i __lsx_vfcmp_cor_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_cor_s (__m128, __m128);
+__m128i __lsx_vfcmp_cueq_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_cueq_s (__m128, __m128);
+__m128i __lsx_vfcmp_cule_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_cule_s (__m128, __m128);
+__m128i __lsx_vfcmp_cult_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_cult_s (__m128, __m128);
+__m128i __lsx_vfcmp_cun_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_cune_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_cune_s (__m128, __m128);
+__m128i __lsx_vfcmp_cun_s (__m128, __m128);
+__m128i __lsx_vfcmp_saf_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_saf_s (__m128, __m128);
+__m128i __lsx_vfcmp_seq_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_seq_s (__m128, __m128);
+__m128i __lsx_vfcmp_sle_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_sle_s (__m128, __m128);
+__m128i __lsx_vfcmp_slt_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_slt_s (__m128, __m128);
+__m128i __lsx_vfcmp_sne_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_sne_s (__m128, __m128);
+__m128i __lsx_vfcmp_sor_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_sor_s (__m128, __m128);
+__m128i __lsx_vfcmp_sueq_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_sueq_s (__m128, __m128);
+__m128i __lsx_vfcmp_sule_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_sule_s (__m128, __m128);
+__m128i __lsx_vfcmp_sult_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_sult_s (__m128, __m128);
+__m128i __lsx_vfcmp_sun_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_sune_d (__m128d, __m128d);
+__m128i __lsx_vfcmp_sune_s (__m128, __m128);
+__m128i __lsx_vfcmp_sun_s (__m128, __m128);
+__m128d __lsx_vfcvth_d_s (__m128);
+__m128i __lsx_vfcvt_h_s (__m128, __m128);
+__m128 __lsx_vfcvth_s_h (__m128i);
+__m128d __lsx_vfcvtl_d_s (__m128);
+__m128 __lsx_vfcvtl_s_h (__m128i);
+__m128 __lsx_vfcvt_s_d (__m128d, __m128d);
+__m128d __lsx_vfdiv_d (__m128d, __m128d);
+__m128 __lsx_vfdiv_s (__m128, __m128);
+__m128d __lsx_vffint_d_l (__m128i);
+__m128d __lsx_vffint_d_lu (__m128i);
+__m128d __lsx_vffinth_d_w (__m128i);
+__m128d __lsx_vffintl_d_w (__m128i);
+__m128 __lsx_vffint_s_l (__m128i, __m128i);
+__m128 __lsx_vffint_s_w (__m128i);
+__m128 __lsx_vffint_s_wu (__m128i);
+__m128d __lsx_vflogb_d (__m128d);
+__m128 __lsx_vflogb_s (__m128);
+__m128d __lsx_vfmadd_d (__m128d, __m128d, __m128d);
+__m128 __lsx_vfmadd_s (__m128, __m128, __m128);
+__m128d __lsx_vfmaxa_d (__m128d, __m128d);
+__m128 __lsx_vfmaxa_s (__m128, __m128);
+__m128d __lsx_vfmax_d (__m128d, __m128d);
+__m128 __lsx_vfmax_s (__m128, __m128);
+__m128d __lsx_vfmina_d (__m128d, __m128d);
+__m128 __lsx_vfmina_s (__m128, __m128);
+__m128d __lsx_vfmin_d (__m128d, __m128d);
+__m128 __lsx_vfmin_s (__m128, __m128);
+__m128d __lsx_vfmsub_d (__m128d, __m128d, __m128d);
+__m128 __lsx_vfmsub_s (__m128, __m128, __m128);
+__m128d __lsx_vfmul_d (__m128d, __m128d);
+__m128 __lsx_vfmul_s (__m128, __m128);
+__m128d __lsx_vfnmadd_d (__m128d, __m128d, __m128d);
+__m128 __lsx_vfnmadd_s (__m128, __m128, __m128);
+__m128d __lsx_vfnmsub_d (__m128d, __m128d, __m128d);
+__m128 __lsx_vfnmsub_s (__m128, __m128, __m128);
+__m128d __lsx_vfrecip_d (__m128d);
+__m128 __lsx_vfrecip_s (__m128);
+__m128d __lsx_vfrint_d (__m128d);
+__m128i __lsx_vfrintrm_d (__m128d);
+__m128i __lsx_vfrintrm_s (__m128);
+__m128i __lsx_vfrintrne_d (__m128d);
+__m128i __lsx_vfrintrne_s (__m128);
+__m128i __lsx_vfrintrp_d (__m128d);
+__m128i __lsx_vfrintrp_s (__m128);
+__m128i __lsx_vfrintrz_d (__m128d);
+__m128i __lsx_vfrintrz_s (__m128);
+__m128 __lsx_vfrint_s (__m128);
+__m128d __lsx_vfrsqrt_d (__m128d);
+__m128 __lsx_vfrsqrt_s (__m128);
+__m128i __lsx_vfrstp_b (__m128i, __m128i, __m128i);
+__m128i __lsx_vfrstp_h (__m128i, __m128i, __m128i);
+__m128i __lsx_vfrstpi_b (__m128i, __m128i, imm0_31);
+__m128i __lsx_vfrstpi_h (__m128i, __m128i, imm0_31);
+__m128d __lsx_vfsqrt_d (__m128d);
+__m128 __lsx_vfsqrt_s (__m128);
+__m128d __lsx_vfsub_d (__m128d, __m128d);
+__m128 __lsx_vfsub_s (__m128, __m128);
+__m128i __lsx_vftinth_l_s (__m128);
+__m128i __lsx_vftint_l_d (__m128d);
+__m128i __lsx_vftintl_l_s (__m128);
+__m128i __lsx_vftint_lu_d (__m128d);
+__m128i __lsx_vftintrmh_l_s (__m128);
+__m128i __lsx_vftintrm_l_d (__m128d);
+__m128i __lsx_vftintrml_l_s (__m128);
+__m128i __lsx_vftintrm_w_d (__m128d, __m128d);
+__m128i __lsx_vftintrm_w_s (__m128);
+__m128i __lsx_vftintrneh_l_s (__m128);
+__m128i __lsx_vftintrne_l_d (__m128d);
+__m128i __lsx_vftintrnel_l_s (__m128);
+__m128i __lsx_vftintrne_w_d (__m128d, __m128d);
+__m128i __lsx_vftintrne_w_s (__m128);
+__m128i __lsx_vftintrph_l_s (__m128);
+__m128i __lsx_vftintrp_l_d (__m128d);
+__m128i __lsx_vftintrpl_l_s (__m128);
+__m128i __lsx_vftintrp_w_d (__m128d, __m128d);
+__m128i __lsx_vftintrp_w_s (__m128);
+__m128i __lsx_vftintrzh_l_s (__m128);
+__m128i __lsx_vftintrz_l_d (__m128d);
+__m128i __lsx_vftintrzl_l_s (__m128);
+__m128i __lsx_vftintrz_lu_d (__m128d);
+__m128i __lsx_vftintrz_w_d (__m128d, __m128d);
+__m128i __lsx_vftintrz_w_s (__m128);
+__m128i __lsx_vftintrz_wu_s (__m128);
+__m128i __lsx_vftint_w_d (__m128d, __m128d);
+__m128i __lsx_vftint_w_s (__m128);
+__m128i __lsx_vftint_wu_s (__m128);
+__m128i __lsx_vhaddw_du_wu (__m128i, __m128i);
+__m128i __lsx_vhaddw_d_w (__m128i, __m128i);
+__m128i __lsx_vhaddw_h_b (__m128i, __m128i);
+__m128i __lsx_vhaddw_hu_bu (__m128i, __m128i);
+__m128i __lsx_vhaddw_q_d (__m128i, __m128i);
+__m128i __lsx_vhaddw_qu_du (__m128i, __m128i);
+__m128i __lsx_vhaddw_w_h (__m128i, __m128i);
+__m128i __lsx_vhaddw_wu_hu (__m128i, __m128i);
+__m128i __lsx_vhsubw_du_wu (__m128i, __m128i);
+__m128i __lsx_vhsubw_d_w (__m128i, __m128i);
+__m128i __lsx_vhsubw_h_b (__m128i, __m128i);
+__m128i __lsx_vhsubw_hu_bu (__m128i, __m128i);
+__m128i __lsx_vhsubw_q_d (__m128i, __m128i);
+__m128i __lsx_vhsubw_qu_du (__m128i, __m128i);
+__m128i __lsx_vhsubw_w_h (__m128i, __m128i);
+__m128i __lsx_vhsubw_wu_hu (__m128i, __m128i);
+__m128i __lsx_vilvh_b (__m128i, __m128i);
+__m128i __lsx_vilvh_d (__m128i, __m128i);
+__m128i __lsx_vilvh_h (__m128i, __m128i);
+__m128i __lsx_vilvh_w (__m128i, __m128i);
+__m128i __lsx_vilvl_b (__m128i, __m128i);
+__m128i __lsx_vilvl_d (__m128i, __m128i);
+__m128i __lsx_vilvl_h (__m128i, __m128i);
+__m128i __lsx_vilvl_w (__m128i, __m128i);
+__m128i __lsx_vinsgr2vr_b (__m128i, int, imm0_15);
+__m128i __lsx_vinsgr2vr_d (__m128i, long int, imm0_1);
+__m128i __lsx_vinsgr2vr_h (__m128i, int, imm0_7);
+__m128i __lsx_vinsgr2vr_w (__m128i, int, imm0_3);
+__m128i __lsx_vld (void *, imm_n2048_2047)
+__m128i __lsx_vldi (imm_n1024_1023)
+__m128i __lsx_vldrepl_b (void *, imm_n2048_2047)
+__m128i __lsx_vldrepl_d (void *, imm_n256_255)
+__m128i __lsx_vldrepl_h (void *, imm_n1024_1023)
+__m128i __lsx_vldrepl_w (void *, imm_n512_511)
+__m128i __lsx_vldx (void *, long int);
+__m128i __lsx_vmadd_b (__m128i, __m128i, __m128i);
+__m128i __lsx_vmadd_d (__m128i, __m128i, __m128i);
+__m128i __lsx_vmadd_h (__m128i, __m128i, __m128i);
+__m128i __lsx_vmadd_w (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_d_w (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_d_wu (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_d_wu_w (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_h_b (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_h_bu (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_h_bu_b (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_q_d (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_q_du (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_q_du_d (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_w_h (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_w_hu (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwev_w_hu_h (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_d_w (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_d_wu (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_d_wu_w (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_h_b (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_h_bu (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_h_bu_b (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_q_d (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_q_du (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_q_du_d (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_w_h (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_w_hu (__m128i, __m128i, __m128i);
+__m128i __lsx_vmaddwod_w_hu_h (__m128i, __m128i, __m128i);
+__m128i __lsx_vmax_b (__m128i, __m128i);
+__m128i __lsx_vmax_bu (__m128i, __m128i);
+__m128i __lsx_vmax_d (__m128i, __m128i);
+__m128i __lsx_vmax_du (__m128i, __m128i);
+__m128i __lsx_vmax_h (__m128i, __m128i);
+__m128i __lsx_vmax_hu (__m128i, __m128i);
+__m128i __lsx_vmaxi_b (__m128i, imm_n16_15)
+__m128i __lsx_vmaxi_bu (__m128i, imm0_31);
+__m128i __lsx_vmaxi_d (__m128i, imm_n16_15)
+__m128i __lsx_vmaxi_du (__m128i, imm0_31);
+__m128i __lsx_vmaxi_h (__m128i, imm_n16_15)
+__m128i __lsx_vmaxi_hu (__m128i, imm0_31);
+__m128i __lsx_vmaxi_w (__m128i, imm_n16_15)
+__m128i __lsx_vmaxi_wu (__m128i, imm0_31);
+__m128i __lsx_vmax_w (__m128i, __m128i);
+__m128i __lsx_vmax_wu (__m128i, __m128i);
+__m128i __lsx_vmin_b (__m128i, __m128i);
+__m128i __lsx_vmin_bu (__m128i, __m128i);
+__m128i __lsx_vmin_d (__m128i, __m128i);
+__m128i __lsx_vmin_du (__m128i, __m128i);
+__m128i __lsx_vmin_h (__m128i, __m128i);
+__m128i __lsx_vmin_hu (__m128i, __m128i);
+__m128i __lsx_vmini_b (__m128i, imm_n16_15)
+__m128i __lsx_vmini_bu (__m128i, imm0_31);
+__m128i __lsx_vmini_d (__m128i, imm_n16_15)
+__m128i __lsx_vmini_du (__m128i, imm0_31);
+__m128i __lsx_vmini_h (__m128i, imm_n16_15)
+__m128i __lsx_vmini_hu (__m128i, imm0_31);
+__m128i __lsx_vmini_w (__m128i, imm_n16_15)
+__m128i __lsx_vmini_wu (__m128i, imm0_31);
+__m128i __lsx_vmin_w (__m128i, __m128i);
+__m128i __lsx_vmin_wu (__m128i, __m128i);
+__m128i __lsx_vmod_b (__m128i, __m128i);
+__m128i __lsx_vmod_bu (__m128i, __m128i);
+__m128i __lsx_vmod_d (__m128i, __m128i);
+__m128i __lsx_vmod_du (__m128i, __m128i);
+__m128i __lsx_vmod_h (__m128i, __m128i);
+__m128i __lsx_vmod_hu (__m128i, __m128i);
+__m128i __lsx_vmod_w (__m128i, __m128i);
+__m128i __lsx_vmod_wu (__m128i, __m128i);
+__m128i __lsx_vmskgez_b (__m128i);
+__m128i __lsx_vmskltz_b (__m128i);
+__m128i __lsx_vmskltz_d (__m128i);
+__m128i __lsx_vmskltz_h (__m128i);
+__m128i __lsx_vmskltz_w (__m128i);
+__m128i __lsx_vmsknz_b (__m128i);
+__m128i __lsx_vmsub_b (__m128i, __m128i, __m128i);
+__m128i __lsx_vmsub_d (__m128i, __m128i, __m128i);
+__m128i __lsx_vmsub_h (__m128i, __m128i, __m128i);
+__m128i __lsx_vmsub_w (__m128i, __m128i, __m128i);
+__m128i __lsx_vmuh_b (__m128i, __m128i);
+__m128i __lsx_vmuh_bu (__m128i, __m128i);
+__m128i __lsx_vmuh_d (__m128i, __m128i);
+__m128i __lsx_vmuh_du (__m128i, __m128i);
+__m128i __lsx_vmuh_h (__m128i, __m128i);
+__m128i __lsx_vmuh_hu (__m128i, __m128i);
+__m128i __lsx_vmuh_w (__m128i, __m128i);
+__m128i __lsx_vmuh_wu (__m128i, __m128i);
+__m128i __lsx_vmul_b (__m128i, __m128i);
+__m128i __lsx_vmul_d (__m128i, __m128i);
+__m128i __lsx_vmul_h (__m128i, __m128i);
+__m128i __lsx_vmul_w (__m128i, __m128i);
+__m128i __lsx_vmulwev_d_w (__m128i, __m128i);
+__m128i __lsx_vmulwev_d_wu (__m128i, __m128i);
+__m128i __lsx_vmulwev_d_wu_w (__m128i, __m128i);
+__m128i __lsx_vmulwev_h_b (__m128i, __m128i);
+__m128i __lsx_vmulwev_h_bu (__m128i, __m128i);
+__m128i __lsx_vmulwev_h_bu_b (__m128i, __m128i);
+__m128i __lsx_vmulwev_q_d (__m128i, __m128i);
+__m128i __lsx_vmulwev_q_du (__m128i, __m128i);
+__m128i __lsx_vmulwev_q_du_d (__m128i, __m128i);
+__m128i __lsx_vmulwev_w_h (__m128i, __m128i);
+__m128i __lsx_vmulwev_w_hu (__m128i, __m128i);
+__m128i __lsx_vmulwev_w_hu_h (__m128i, __m128i);
+__m128i __lsx_vmulwod_d_w (__m128i, __m128i);
+__m128i __lsx_vmulwod_d_wu (__m128i, __m128i);
+__m128i __lsx_vmulwod_d_wu_w (__m128i, __m128i);
+__m128i __lsx_vmulwod_h_b (__m128i, __m128i);
+__m128i __lsx_vmulwod_h_bu (__m128i, __m128i);
+__m128i __lsx_vmulwod_h_bu_b (__m128i, __m128i);
+__m128i __lsx_vmulwod_q_d (__m128i, __m128i);
+__m128i __lsx_vmulwod_q_du (__m128i, __m128i);
+__m128i __lsx_vmulwod_q_du_d (__m128i, __m128i);
+__m128i __lsx_vmulwod_w_h (__m128i, __m128i);
+__m128i __lsx_vmulwod_w_hu (__m128i, __m128i);
+__m128i __lsx_vmulwod_w_hu_h (__m128i, __m128i);
+__m128i __lsx_vneg_b (__m128i);
+__m128i __lsx_vneg_d (__m128i);
+__m128i __lsx_vneg_h (__m128i);
+__m128i __lsx_vneg_w (__m128i);
+__m128i __lsx_vnori_b (__m128i, imm0_255);
+__m128i __lsx_vnor_v (__m128i, __m128i);
+__m128i __lsx_vori_b (__m128i, imm0_255);
+__m128i __lsx_vorn_v (__m128i, __m128i);
+__m128i __lsx_vor_v (__m128i, __m128i);
+__m128i __lsx_vpackev_b (__m128i, __m128i);
+__m128i __lsx_vpackev_d (__m128i, __m128i);
+__m128i __lsx_vpackev_h (__m128i, __m128i);
+__m128i __lsx_vpackev_w (__m128i, __m128i);
+__m128i __lsx_vpackod_b (__m128i, __m128i);
+__m128i __lsx_vpackod_d (__m128i, __m128i);
+__m128i __lsx_vpackod_h (__m128i, __m128i);
+__m128i __lsx_vpackod_w (__m128i, __m128i);
+__m128i __lsx_vpcnt_b (__m128i);
+__m128i __lsx_vpcnt_d (__m128i);
+__m128i __lsx_vpcnt_h (__m128i);
+__m128i __lsx_vpcnt_w (__m128i);
+__m128i __lsx_vpermi_w (__m128i, __m128i, imm0_255);
+__m128i __lsx_vpickev_b (__m128i, __m128i);
+__m128i __lsx_vpickev_d (__m128i, __m128i);
+__m128i __lsx_vpickev_h (__m128i, __m128i);
+__m128i __lsx_vpickev_w (__m128i, __m128i);
+__m128i __lsx_vpickod_b (__m128i, __m128i);
+__m128i __lsx_vpickod_d (__m128i, __m128i);
+__m128i __lsx_vpickod_h (__m128i, __m128i);
+__m128i __lsx_vpickod_w (__m128i, __m128i);
+int __lsx_vpickve2gr_b (__m128i, imm0_15);
+unsinged int __lsx_vpickve2gr_bu (__m128i, imm0_15);
+long int __lsx_vpickve2gr_d (__m128i, imm0_1);
+unsigned long int __lsx_vpickve2gr_du (__m128i, imm0_1);
+int __lsx_vpickve2gr_h (__m128i, imm0_7);
+unsinged int __lsx_vpickve2gr_hu (__m128i, imm0_7);
+int __lsx_vpickve2gr_w (__m128i, imm0_3);
+unsigned int __lsx_vpickve2gr_wu (__m128i, imm0_3);
+__m128i __lsx_vreplgr2vr_b (int);
+__m128i __lsx_vreplgr2vr_d (long int);
+__m128i __lsx_vreplgr2vr_h (int);
+__m128i __lsx_vreplgr2vr_w (int);
+__m128i __lsx_vrepli_b (imm_n512_511);
+__m128i __lsx_vrepli_d (imm_n512_511);
+__m128i __lsx_vrepli_h (imm_n512_511);
+__m128i __lsx_vrepli_w (imm_n512_511);
+__m128i __lsx_vreplve_b (__m128i, int);
+__m128i __lsx_vreplve_d (__m128i, int);
+__m128i __lsx_vreplve_h (__m128i, int);
+__m128i __lsx_vreplvei_b (__m128i, imm0_15);
+__m128i __lsx_vreplvei_d (__m128i, imm0_1);
+__m128i __lsx_vreplvei_h (__m128i, imm0_7);
+__m128i __lsx_vreplvei_w (__m128i, imm0_3);
+__m128i __lsx_vreplve_w (__m128i, int);
+__m128i __lsx_vrotr_b (__m128i, __m128i);
+__m128i __lsx_vrotr_d (__m128i, __m128i);
+__m128i __lsx_vrotr_h (__m128i, __m128i);
+__m128i __lsx_vrotri_b (__m128i, imm0_7);
+__m128i __lsx_vrotri_d (__m128i, imm0_63);
+__m128i __lsx_vrotri_h (__m128i, imm0_15);
+__m128i __lsx_vrotri_w (__m128i, imm0_31);
+__m128i __lsx_vrotr_w (__m128i, __m128i);
+__m128i __lsx_vsadd_b (__m128i, __m128i);
+__m128i __lsx_vsadd_bu (__m128i, __m128i);
+__m128i __lsx_vsadd_d (__m128i, __m128i);
+__m128i __lsx_vsadd_du (__m128i, __m128i);
+__m128i __lsx_vsadd_h (__m128i, __m128i);
+__m128i __lsx_vsadd_hu (__m128i, __m128i);
+__m128i __lsx_vsadd_w (__m128i, __m128i);
+__m128i __lsx_vsadd_wu (__m128i, __m128i);
+__m128i __lsx_vsat_b (__m128i, imm0_7);
+__m128i __lsx_vsat_bu (__m128i, imm0_7);
+__m128i __lsx_vsat_d (__m128i, imm0_63);
+__m128i __lsx_vsat_du (__m128i, imm0_63);
+__m128i __lsx_vsat_h (__m128i, imm0_15);
+__m128i __lsx_vsat_hu (__m128i, imm0_15);
+__m128i __lsx_vsat_w (__m128i, imm0_31);
+__m128i __lsx_vsat_wu (__m128i, imm0_31);
+__m128i __lsx_vseq_b (__m128i, __m128i);
+__m128i __lsx_vseq_d (__m128i, __m128i);
+__m128i __lsx_vseq_h (__m128i, __m128i);
+__m128i __lsx_vseqi_b (__m128i, imm_n16_15);
+__m128i __lsx_vseqi_d (__m128i, imm_n16_15);
+__m128i __lsx_vseqi_h (__m128i, imm_n16_15);
+__m128i __lsx_vseqi_w (__m128i, imm_n16_15);
+__m128i __lsx_vseq_w (__m128i, __m128i);
+__m128i __lsx_vshuf4i_b (__m128i, imm0_255);
+__m128i __lsx_vshuf4i_d (__m128i, __m128i, imm0_255);
+__m128i __lsx_vshuf4i_h (__m128i, imm0_255);
+__m128i __lsx_vshuf4i_w (__m128i, imm0_255);
+__m128i __lsx_vshuf_b (__m128i, __m128i, __m128i);
+__m128i __lsx_vshuf_d (__m128i, __m128i, __m128i);
+__m128i __lsx_vshuf_h (__m128i, __m128i, __m128i);
+__m128i __lsx_vshuf_w (__m128i, __m128i, __m128i);
+__m128i __lsx_vsigncov_b (__m128i, __m128i);
+__m128i __lsx_vsigncov_d (__m128i, __m128i);
+__m128i __lsx_vsigncov_h (__m128i, __m128i);
+__m128i __lsx_vsigncov_w (__m128i, __m128i);
+__m128i __lsx_vsigncov_b (__m128i, __m128i);
+__m128i __lsx_vsigncov_d (__m128i, __m128i);
+__m128i __lsx_vsigncov_h (__m128i, __m128i);
+__m128i __lsx_vsigncov_w (__m128i, __m128i);
+__m128i __lsx_vsle_b (__m128i, __m128i);
+__m128i __lsx_vsle_bu (__m128i, __m128i);
+__m128i __lsx_vsle_d (__m128i, __m128i);
+__m128i __lsx_vsle_du (__m128i, __m128i);
+__m128i __lsx_vsle_h (__m128i, __m128i);
+__m128i __lsx_vsle_hu (__m128i, __m128i);
+__m128i __lsx_vslei_b (__m128i, imm_n16_15);
+__m128i __lsx_vslei_bu (__m128i, imm0_31);
+__m128i __lsx_vslei_d (__m128i, imm_n16_15);
+__m128i __lsx_vslei_du (__m128i, imm0_31);
+__m128i __lsx_vslei_h (__m128i, imm_n16_15);
+__m128i __lsx_vslei_hu (__m128i, imm0_31);
+__m128i __lsx_vslei_w (__m128i, imm_n16_15);
+__m128i __lsx_vslei_wu (__m128i, imm0_31);
+__m128i __lsx_vsle_w (__m128i, __m128i);
+__m128i __lsx_vsle_wu (__m128i, __m128i);
+__m128i __lsx_vsll_b (__m128i, __m128i);
+__m128i __lsx_vsll_d (__m128i, __m128i);
+__m128i __lsx_vsll_h (__m128i, __m128i);
+__m128i __lsx_vslli_b (__m128i, imm0_7);
+__m128i __lsx_vslli_d (__m128i, imm0_63);
+__m128i __lsx_vslli_h (__m128i, imm0_15);
+__m128i __lsx_vslli_w (__m128i, imm0_31);
+__m128i __lsx_vsll_w (__m128i, __m128i);
+__m128i __lsx_vsllwil_du_wu (__m128i, imm0_31);
+__m128i __lsx_vsllwil_d_w (__m128i, imm0_31);
+__m128i __lsx_vsllwil_h_b (__m128i, imm0_7);
+__m128i __lsx_vsllwil_hu_bu (__m128i, imm0_7);
+__m128i __lsx_vsllwil_w_h (__m128i, imm0_15);
+__m128i __lsx_vsllwil_wu_hu (__m128i, imm0_15);
+__m128i __lsx_vslt_b (__m128i, __m128i);
+__m128i __lsx_vslt_bu (__m128i, __m128i);
+__m128i __lsx_vslt_d (__m128i, __m128i);
+__m128i __lsx_vslt_du (__m128i, __m128i);
+__m128i __lsx_vslt_h (__m128i, __m128i);
+__m128i __lsx_vslt_hu (__m128i, __m128i);
+__m128i __lsx_vslti_b (__m128i, imm_n16_15);
+__m128i __lsx_vslti_bu (__m128i, imm0_31);
+__m128i __lsx_vslti_d (__m128i, imm_n16_15);
+__m128i __lsx_vslti_du (__m128i, imm0_31);
+__m128i __lsx_vslti_h (__m128i, imm_n16_15);
+__m128i __lsx_vslti_hu (__m128i, imm0_31);
+__m128i __lsx_vslti_w (__m128i, imm_n16_15);
+__m128i __lsx_vslti_wu (__m128i, imm0_31);
+__m128i __lsx_vslt_w (__m128i, __m128i);
+__m128i __lsx_vslt_wu (__m128i, __m128i);
+__m128i __lsx_vsra_b (__m128i, __m128i);
+__m128i __lsx_vsra_d (__m128i, __m128i);
+__m128i __lsx_vsra_h (__m128i, __m128i);
+__m128i __lsx_vsrai_b (__m128i, imm0_7);
+__m128i __lsx_vsrai_d (__m128i, imm0_63);
+__m128i __lsx_vsrai_h (__m128i, imm0_15);
+__m128i __lsx_vsrai_w (__m128i, imm0_31);
+__m128i __lsx_vsran_b_h (__m128i, __m128i);
+__m128i __lsx_vsran_h_w (__m128i, __m128i);
+__m128i __lsx_vsrani_b_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vsrani_d_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vsrani_h_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vsrani_w_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vsran_w_d (__m128i, __m128i);
+__m128i __lsx_vsrar_b (__m128i, __m128i);
+__m128i __lsx_vsrar_d (__m128i, __m128i);
+__m128i __lsx_vsrar_h (__m128i, __m128i);
+__m128i __lsx_vsrari_b (__m128i, imm0_7);
+__m128i __lsx_vsrari_d (__m128i, imm0_63);
+__m128i __lsx_vsrari_h (__m128i, imm0_15);
+__m128i __lsx_vsrari_w (__m128i, imm0_31);
+__m128i __lsx_vsrarn_b_h (__m128i, __m128i);
+__m128i __lsx_vsrarn_h_w (__m128i, __m128i);
+__m128i __lsx_vsrarni_b_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vsrarni_d_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vsrarni_h_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vsrarni_w_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vsrarn_w_d (__m128i, __m128i);
+__m128i __lsx_vsrar_w (__m128i, __m128i);
+__m128i __lsx_vsra_w (__m128i, __m128i);
+__m128i __lsx_vsrl_b (__m128i, __m128i);
+__m128i __lsx_vsrl_d (__m128i, __m128i);
+__m128i __lsx_vsrl_h (__m128i, __m128i);
+__m128i __lsx_vsrli_b (__m128i, imm0_7);
+__m128i __lsx_vsrli_d (__m128i, imm0_63);
+__m128i __lsx_vsrli_h (__m128i, imm0_15);
+__m128i __lsx_vsrli_w (__m128i, imm0_31);
+__m128i __lsx_vsrln_b_h (__m128i, __m128i);
+__m128i __lsx_vsrln_h_w (__m128i, __m128i);
+__m128i __lsx_vsrlni_b_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vsrlni_d_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vsrlni_h_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vsrlni_w_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vsrln_w_d (__m128i, __m128i);
+__m128i __lsx_vsrlr_b (__m128i, __m128i);
+__m128i __lsx_vsrlr_d (__m128i, __m128i);
+__m128i __lsx_vsrlr_h (__m128i, __m128i);
+__m128i __lsx_vsrlri_b (__m128i, imm0_7);
+__m128i __lsx_vsrlri_d (__m128i, imm0_63);
+__m128i __lsx_vsrlri_h (__m128i, imm0_15);
+__m128i __lsx_vsrlri_w (__m128i, imm0_31);
+__m128i __lsx_vsrlrn_b_h (__m128i, __m128i);
+__m128i __lsx_vsrlrn_h_w (__m128i, __m128i);
+__m128i __lsx_vsrlrni_b_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vsrlrni_d_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vsrlrni_h_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vsrlrni_w_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vsrlrn_w_d (__m128i, __m128i);
+__m128i __lsx_vsrlr_w (__m128i, __m128i);
+__m128i __lsx_vsrl_w (__m128i, __m128i);
+__m128i __lsx_vssran_b_h (__m128i, __m128i);
+__m128i __lsx_vssran_bu_h (__m128i, __m128i);
+__m128i __lsx_vssran_hu_w (__m128i, __m128i);
+__m128i __lsx_vssran_h_w (__m128i, __m128i);
+__m128i __lsx_vssrani_b_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vssrani_bu_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vssrani_d_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vssrani_du_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vssrani_hu_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vssrani_h_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vssrani_w_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vssrani_wu_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vssran_w_d (__m128i, __m128i);
+__m128i __lsx_vssran_wu_d (__m128i, __m128i);
+__m128i __lsx_vssrarn_b_h (__m128i, __m128i);
+__m128i __lsx_vssrarn_bu_h (__m128i, __m128i);
+__m128i __lsx_vssrarn_hu_w (__m128i, __m128i);
+__m128i __lsx_vssrarn_h_w (__m128i, __m128i);
+__m128i __lsx_vssrarni_b_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vssrarni_bu_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vssrarni_d_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vssrarni_du_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vssrarni_hu_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vssrarni_h_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vssrarni_w_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vssrarni_wu_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vssrarn_w_d (__m128i, __m128i);
+__m128i __lsx_vssrarn_wu_d (__m128i, __m128i);
+__m128i __lsx_vssrln_b_h (__m128i, __m128i);
+__m128i __lsx_vssrln_bu_h (__m128i, __m128i);
+__m128i __lsx_vssrln_hu_w (__m128i, __m128i);
+__m128i __lsx_vssrln_h_w (__m128i, __m128i);
+__m128i __lsx_vssrlni_b_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vssrlni_bu_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vssrlni_d_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vssrlni_du_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vssrlni_hu_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vssrlni_h_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vssrlni_w_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vssrlni_wu_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vssrln_w_d (__m128i, __m128i);
+__m128i __lsx_vssrln_wu_d (__m128i, __m128i);
+__m128i __lsx_vssrlrn_b_h (__m128i, __m128i);
+__m128i __lsx_vssrlrn_bu_h (__m128i, __m128i);
+__m128i __lsx_vssrlrn_hu_w (__m128i, __m128i);
+__m128i __lsx_vssrlrn_h_w (__m128i, __m128i);
+__m128i __lsx_vssrlrni_b_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vssrlrni_bu_h (__m128i, __m128i, imm0_15);
+__m128i __lsx_vssrlrni_d_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vssrlrni_du_q (__m128i, __m128i, imm0_127)
+__m128i __lsx_vssrlrni_hu_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vssrlrni_h_w (__m128i, __m128i, imm0_31);
+__m128i __lsx_vssrlrni_w_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vssrlrni_wu_d (__m128i, __m128i, imm0_63);
+__m128i __lsx_vssrlrn_w_d (__m128i, __m128i);
+__m128i __lsx_vssrlrn_wu_d (__m128i, __m128i);
+__m128i __lsx_vssub_b (__m128i, __m128i);
+__m128i __lsx_vssub_bu (__m128i, __m128i);
+__m128i __lsx_vssub_d (__m128i, __m128i);
+__m128i __lsx_vssub_du (__m128i, __m128i);
+__m128i __lsx_vssub_h (__m128i, __m128i);
+__m128i __lsx_vssub_hu (__m128i, __m128i);
+__m128i __lsx_vssub_w (__m128i, __m128i);
+__m128i __lsx_vssub_wu (__m128i, __m128i);
+void __lsx_vst (__m128i, void *, imm_n2048_2047)
+void __lsx_vstelm_b (__m128i, void *, imm_n128_127, idx);
+void __lsx_vstelm_d (__m128i, void *, imm_n128_127, idx);
+void __lsx_vstelm_h (__m128i, void *, imm_n128_127, idx);
+void __lsx_vstelm_w (__m128i, void *, imm_n128_127, idx);
+void __lsx_vstx (__m128i, void *, long int)
+__m128i __lsx_vsub_b (__m128i, __m128i);
+__m128i __lsx_vsub_d (__m128i, __m128i);
+__m128i __lsx_vsub_h (__m128i, __m128i);
+__m128i __lsx_vsubi_bu (__m128i, imm0_31);
+__m128i __lsx_vsubi_du (__m128i, imm0_31);
+__m128i __lsx_vsubi_hu (__m128i, imm0_31);
+__m128i __lsx_vsubi_wu (__m128i, imm0_31);
+__m128i __lsx_vsub_q (__m128i, __m128i);
+__m128i __lsx_vsub_w (__m128i, __m128i);
+__m128i __lsx_vsubwev_d_w (__m128i, __m128i);
+__m128i __lsx_vsubwev_d_wu (__m128i, __m128i);
+__m128i __lsx_vsubwev_h_b (__m128i, __m128i);
+__m128i __lsx_vsubwev_h_bu (__m128i, __m128i);
+__m128i __lsx_vsubwev_q_d (__m128i, __m128i);
+__m128i __lsx_vsubwev_q_du (__m128i, __m128i);
+__m128i __lsx_vsubwev_w_h (__m128i, __m128i);
+__m128i __lsx_vsubwev_w_hu (__m128i, __m128i);
+__m128i __lsx_vsubwod_d_w (__m128i, __m128i);
+__m128i __lsx_vsubwod_d_wu (__m128i, __m128i);
+__m128i __lsx_vsubwod_h_b (__m128i, __m128i);
+__m128i __lsx_vsubwod_h_bu (__m128i, __m128i);
+__m128i __lsx_vsubwod_q_d (__m128i, __m128i);
+__m128i __lsx_vsubwod_q_du (__m128i, __m128i);
+__m128i __lsx_vsubwod_w_h (__m128i, __m128i);
+__m128i __lsx_vsubwod_w_hu (__m128i, __m128i);
+__m128i __lsx_vxori_b (__m128i, imm0_255);
+__m128i __lsx_vxor_v (__m128i, __m128i);
+@end smallexample
+
+@node LoongArch ASX Vector Intrinsics
+@subsection LoongArch ASX Vector Intrinsics
+
+GCC provides intrinsics to access the LASX (Loongson Advanced SIMD Extension)
+instructions. The interface is made available by including @code{<lasxintrin.h>}
+and using @option{-mlasx}.
+
+The following vectors typedefs are included in @code{lasxintrin.h}:
+
+@itemize
+@item @code{__m256i}, a 256-bit vector of fixed point;
+@item @code{__m256}, a 256-bit vector of single precision floating point;
+@item @code{__m256d}, a 256-bit vector of double precision floating point.
+@end itemize
+
+Instructions and corresponding built-ins may have additional restrictions and/or
+input/output values manipulated:
+
+@itemize
+@item @code{imm0_1}, an integer literal in range 0 to 1.
+@item @code{imm0_3}, an integer literal in range 0 to 3.
+@item @code{imm0_7}, an integer literal in range 0 to 7.
+@item @code{imm0_15}, an integer literal in range 0 to 15.
+@item @code{imm0_31}, an integer literal in range 0 to 31.
+@item @code{imm0_63}, an integer literal in range 0 to 63.
+@item @code{imm0_127}, an integer literal in range 0 to 127.
+@item @code{imm0_255}, an integer literal in range 0 to 255.
+@item @code{imm_n16_15}, an integer literal in range -16 to 15.
+@item @code{imm_n128_127}, an integer literal in range -128 to 127.
+@item @code{imm_n256_255}, an integer literal in range -256 to 255.
+@item @code{imm_n512_511}, an integer literal in range -512 to 511.
+@item @code{imm_n1024_1023}, an integer literal in range -1024 to 1023.
+@item @code{imm_n2048_2047}, an integer literal in range -2048 to 2047.
+@end itemize
+
+For convenience, GCC defines functions @code{__lasx_xvrepli_@{b/h/w/d@}} and
+@code{__lasx_b[n]z_@{v/b/h/w/d@}}, which are implemented as follows:
+
+@smallexample
+a. @code{__lasx_xvrepli_@{b/h/w/d@}}: Implemented the case where the highest
+ bit of @code{xvldi} instruction @code{i13} is 1.
+
+ i13[12] == 1'b0
+ case i13[11:10] of :
+ 2'b00: __lasx_xvrepli_b (imm_n512_511)
+ 2'b01: __lasx_xvrepli_h (imm_n512_511)
+ 2'b10: __lasx_xvrepli_w (imm_n512_511)
+ 2'b11: __lasx_xvrepli_d (imm_n512_511)
+
+b. @code{__lasx_b[n]z_@{v/b/h/w/d@}}: Since the @code{xvseteqz} class directive
+ cannot be used on its own, this function is defined.
+
+ __lasx_xbz_v => xvseteqz.v + bcnez
+ __lasx_xbnz_v => xvsetnez.v + bcnez
+ __lasx_xbz_b => xvsetanyeqz.b + bcnez
+ __lasx_xbz_h => xvsetanyeqz.h + bcnez
+ __lasx_xbz_w => xvsetanyeqz.w + bcnez
+ __lasx_xbz_d => xvsetanyeqz.d + bcnez
+ __lasx_xbnz_b => xvsetallnez.b + bcnez
+ __lasx_xbnz_h => xvsetallnez.h + bcnez
+ __lasx_xbnz_w => xvsetallnez.w + bcnez
+ __lasx_xbnz_d => xvsetallnez.d + bcnez
+@end smallexample
+
+@smallexample
+eg:
+ #include <lasxintrin.h>
+
+ extern __m256i @var{a};
+
+ void
+ test (void)
+ @{
+ if (__lasx_xbz_v (@var{a}))
+ printf ("1\n");
+ else
+ printf ("2\n");
+ @}
+@end smallexample
+
+@emph{Note:} For directives where the intent operand is also the source operand
+(modifying only part of the bitfield of the intent register), the first parameter
+in the builtin call function is used as the intent operand.
+
+@smallexample
+eg:
+ #include <lasxintrin.h>
+ extern __m256i @var{dst};
+ int @var{src};
+
+ void
+ test (void)
+ @{
+ @var{dst} = __lasx_xvinsgr2vr_w (@var{dst}, @var{src}, 3);
+ @}
+@end smallexample
+
+
+The intrinsics provided are listed below:
+
+@smallexample
+__m256i __lasx_vext2xv_d_b (__m256i);
+__m256i __lasx_vext2xv_d_h (__m256i);
+__m256i __lasx_vext2xv_du_bu (__m256i);
+__m256i __lasx_vext2xv_du_hu (__m256i);
+__m256i __lasx_vext2xv_du_wu (__m256i);
+__m256i __lasx_vext2xv_d_w (__m256i);
+__m256i __lasx_vext2xv_h_b (__m256i);
+__m256i __lasx_vext2xv_hu_bu (__m256i);
+__m256i __lasx_vext2xv_w_b (__m256i);
+__m256i __lasx_vext2xv_w_h (__m256i);
+__m256i __lasx_vext2xv_wu_bu (__m256i);
+__m256i __lasx_vext2xv_wu_hu (__m256i);
+int __lasx_xbnz_b (__m256i);
+int __lasx_xbnz_d (__m256i);
+int __lasx_xbnz_h (__m256i);
+int __lasx_xbnz_v (__m256i);
+int __lasx_xbnz_w (__m256i);
+int __lasx_xbz_b (__m256i);
+int __lasx_xbz_d (__m256i);
+int __lasx_xbz_h (__m256i);
+int __lasx_xbz_v (__m256i);
+int __lasx_xbz_w (__m256i);
+__m256i __lasx_xvabsd_b (__m256i, __m256i);
+__m256i __lasx_xvabsd_bu (__m256i, __m256i);
+__m256i __lasx_xvabsd_d (__m256i, __m256i);
+__m256i __lasx_xvabsd_du (__m256i, __m256i);
+__m256i __lasx_xvabsd_h (__m256i, __m256i);
+__m256i __lasx_xvabsd_hu (__m256i, __m256i);
+__m256i __lasx_xvabsd_w (__m256i, __m256i);
+__m256i __lasx_xvabsd_wu (__m256i, __m256i);
+__m256i __lasx_xvadda_b (__m256i, __m256i);
+__m256i __lasx_xvadda_d (__m256i, __m256i);
+__m256i __lasx_xvadda_h (__m256i, __m256i);
+__m256i __lasx_xvadda_w (__m256i, __m256i);
+__m256i __lasx_xvadd_b (__m256i, __m256i);
+__m256i __lasx_xvadd_d (__m256i, __m256i);
+__m256i __lasx_xvadd_h (__m256i, __m256i);
+__m256i __lasx_xvaddi_bu (__m256i, imm0_31);
+__m256i __lasx_xvaddi_du (__m256i, imm0_31);
+__m256i __lasx_xvaddi_hu (__m256i, imm0_31);
+__m256i __lasx_xvaddi_wu (__m256i, imm0_31);
+__m256i __lasx_xvadd_q (__m256i, __m256i);
+__m256i __lasx_xvadd_w (__m256i, __m256i);
+__m256i __lasx_xvaddwev_d_w (__m256i, __m256i);
+__m256i __lasx_xvaddwev_d_wu (__m256i, __m256i);
+__m256i __lasx_xvaddwev_d_wu_w (__m256i, __m256i);
+__m256i __lasx_xvaddwev_h_b (__m256i, __m256i);
+__m256i __lasx_xvaddwev_h_bu (__m256i, __m256i);
+__m256i __lasx_xvaddwev_h_bu_b (__m256i, __m256i);
+__m256i __lasx_xvaddwev_q_d (__m256i, __m256i);
+__m256i __lasx_xvaddwev_q_du (__m256i, __m256i);
+__m256i __lasx_xvaddwev_q_du_d (__m256i, __m256i);
+__m256i __lasx_xvaddwev_w_h (__m256i, __m256i);
+__m256i __lasx_xvaddwev_w_hu (__m256i, __m256i);
+__m256i __lasx_xvaddwev_w_hu_h (__m256i, __m256i);
+__m256i __lasx_xvaddwod_d_w (__m256i, __m256i);
+__m256i __lasx_xvaddwod_d_wu (__m256i, __m256i);
+__m256i __lasx_xvaddwod_d_wu_w (__m256i, __m256i);
+__m256i __lasx_xvaddwod_h_b (__m256i, __m256i);
+__m256i __lasx_xvaddwod_h_bu (__m256i, __m256i);
+__m256i __lasx_xvaddwod_h_bu_b (__m256i, __m256i);
+__m256i __lasx_xvaddwod_q_d (__m256i, __m256i);
+__m256i __lasx_xvaddwod_q_du (__m256i, __m256i);
+__m256i __lasx_xvaddwod_q_du_d (__m256i, __m256i);
+__m256i __lasx_xvaddwod_w_h (__m256i, __m256i);
+__m256i __lasx_xvaddwod_w_hu (__m256i, __m256i);
+__m256i __lasx_xvaddwod_w_hu_h (__m256i, __m256i);
+__m256i __lasx_xvandi_b (__m256i, imm0_255);
+__m256i __lasx_xvandn_v (__m256i, __m256i);
+__m256i __lasx_xvand_v (__m256i, __m256i);
+__m256i __lasx_xvavg_b (__m256i, __m256i);
+__m256i __lasx_xvavg_bu (__m256i, __m256i);
+__m256i __lasx_xvavg_d (__m256i, __m256i);
+__m256i __lasx_xvavg_du (__m256i, __m256i);
+__m256i __lasx_xvavg_h (__m256i, __m256i);
+__m256i __lasx_xvavg_hu (__m256i, __m256i);
+__m256i __lasx_xvavgr_b (__m256i, __m256i);
+__m256i __lasx_xvavgr_bu (__m256i, __m256i);
+__m256i __lasx_xvavgr_d (__m256i, __m256i);
+__m256i __lasx_xvavgr_du (__m256i, __m256i);
+__m256i __lasx_xvavgr_h (__m256i, __m256i);
+__m256i __lasx_xvavgr_hu (__m256i, __m256i);
+__m256i __lasx_xvavgr_w (__m256i, __m256i);
+__m256i __lasx_xvavgr_wu (__m256i, __m256i);
+__m256i __lasx_xvavg_w (__m256i, __m256i);
+__m256i __lasx_xvavg_wu (__m256i, __m256i);
+__m256i __lasx_xvbitclr_b (__m256i, __m256i);
+__m256i __lasx_xvbitclr_d (__m256i, __m256i);
+__m256i __lasx_xvbitclr_h (__m256i, __m256i);
+__m256i __lasx_xvbitclri_b (__m256i, imm0_7);
+__m256i __lasx_xvbitclri_d (__m256i, imm0_63);
+__m256i __lasx_xvbitclri_h (__m256i, imm0_15);
+__m256i __lasx_xvbitclri_w (__m256i, imm0_31);
+__m256i __lasx_xvbitclr_w (__m256i, __m256i);
+__m256i __lasx_xvbitrev_b (__m256i, __m256i);
+__m256i __lasx_xvbitrev_d (__m256i, __m256i);
+__m256i __lasx_xvbitrev_h (__m256i, __m256i);
+__m256i __lasx_xvbitrevi_b (__m256i, imm0_7);
+__m256i __lasx_xvbitrevi_d (__m256i, imm0_63);
+__m256i __lasx_xvbitrevi_h (__m256i, imm0_15);
+__m256i __lasx_xvbitrevi_w (__m256i, imm0_31);
+__m256i __lasx_xvbitrev_w (__m256i, __m256i);
+__m256i __lasx_xvbitseli_b (__m256i, __m256i, imm0_255);
+__m256i __lasx_xvbitsel_v (__m256i, __m256i, __m256i);
+__m256i __lasx_xvbitset_b (__m256i, __m256i);
+__m256i __lasx_xvbitset_d (__m256i, __m256i);
+__m256i __lasx_xvbitset_h (__m256i, __m256i);
+__m256i __lasx_xvbitseti_b (__m256i, imm0_7);
+__m256i __lasx_xvbitseti_d (__m256i, imm0_63);
+__m256i __lasx_xvbitseti_h (__m256i, imm0_15);
+__m256i __lasx_xvbitseti_w (__m256i, imm0_31);
+__m256i __lasx_xvbitset_w (__m256i, __m256i);
+__m256i __lasx_xvbsll_v (__m256i, imm0_31);
+__m256i __lasx_xvbsrl_v (__m256i, imm0_31);
+__m256i __lasx_xvclo_b (__m256i);
+__m256i __lasx_xvclo_d (__m256i);
+__m256i __lasx_xvclo_h (__m256i);
+__m256i __lasx_xvclo_w (__m256i);
+__m256i __lasx_xvclz_b (__m256i);
+__m256i __lasx_xvclz_d (__m256i);
+__m256i __lasx_xvclz_h (__m256i);
+__m256i __lasx_xvclz_w (__m256i);
+__m256i __lasx_xvdiv_b (__m256i, __m256i);
+__m256i __lasx_xvdiv_bu (__m256i, __m256i);
+__m256i __lasx_xvdiv_d (__m256i, __m256i);
+__m256i __lasx_xvdiv_du (__m256i, __m256i);
+__m256i __lasx_xvdiv_h (__m256i, __m256i);
+__m256i __lasx_xvdiv_hu (__m256i, __m256i);
+__m256i __lasx_xvdiv_w (__m256i, __m256i);
+__m256i __lasx_xvdiv_wu (__m256i, __m256i);
+__m256i __lasx_xvexth_du_wu (__m256i);
+__m256i __lasx_xvexth_d_w (__m256i);
+__m256i __lasx_xvexth_h_b (__m256i);
+__m256i __lasx_xvexth_hu_bu (__m256i);
+__m256i __lasx_xvexth_q_d (__m256i);
+__m256i __lasx_xvexth_qu_du (__m256i);
+__m256i __lasx_xvexth_w_h (__m256i);
+__m256i __lasx_xvexth_wu_hu (__m256i);
+__m256i __lasx_xvextl_q_d (__m256i);
+__m256i __lasx_xvextl_qu_du (__m256i);
+__m256i __lasx_xvextrins_b (__m256i, __m256i, imm0_255);
+__m256i __lasx_xvextrins_d (__m256i, __m256i, imm0_255);
+__m256i __lasx_xvextrins_h (__m256i, __m256i, imm0_255);
+__m256i __lasx_xvextrins_w (__m256i, __m256i, imm0_255);
+__m256d __lasx_xvfadd_d (__m256d, __m256d);
+__m256 __lasx_xvfadd_s (__m256, __m256);
+__m256i __lasx_xvfclass_d (__m256d);
+__m256i __lasx_xvfclass_s (__m256);
+__m256i __lasx_xvfcmp_caf_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_caf_s (__m256, __m256);
+__m256i __lasx_xvfcmp_ceq_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_ceq_s (__m256, __m256);
+__m256i __lasx_xvfcmp_cle_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_cle_s (__m256, __m256);
+__m256i __lasx_xvfcmp_clt_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_clt_s (__m256, __m256);
+__m256i __lasx_xvfcmp_cne_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_cne_s (__m256, __m256);
+__m256i __lasx_xvfcmp_cor_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_cor_s (__m256, __m256);
+__m256i __lasx_xvfcmp_cueq_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_cueq_s (__m256, __m256);
+__m256i __lasx_xvfcmp_cule_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_cule_s (__m256, __m256);
+__m256i __lasx_xvfcmp_cult_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_cult_s (__m256, __m256);
+__m256i __lasx_xvfcmp_cun_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_cune_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_cune_s (__m256, __m256);
+__m256i __lasx_xvfcmp_cun_s (__m256, __m256);
+__m256i __lasx_xvfcmp_saf_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_saf_s (__m256, __m256);
+__m256i __lasx_xvfcmp_seq_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_seq_s (__m256, __m256);
+__m256i __lasx_xvfcmp_sle_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_sle_s (__m256, __m256);
+__m256i __lasx_xvfcmp_slt_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_slt_s (__m256, __m256);
+__m256i __lasx_xvfcmp_sne_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_sne_s (__m256, __m256);
+__m256i __lasx_xvfcmp_sor_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_sor_s (__m256, __m256);
+__m256i __lasx_xvfcmp_sueq_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_sueq_s (__m256, __m256);
+__m256i __lasx_xvfcmp_sule_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_sule_s (__m256, __m256);
+__m256i __lasx_xvfcmp_sult_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_sult_s (__m256, __m256);
+__m256i __lasx_xvfcmp_sun_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_sune_d (__m256d, __m256d);
+__m256i __lasx_xvfcmp_sune_s (__m256, __m256);
+__m256i __lasx_xvfcmp_sun_s (__m256, __m256);
+__m256d __lasx_xvfcvth_d_s (__m256);
+__m256i __lasx_xvfcvt_h_s (__m256, __m256);
+__m256 __lasx_xvfcvth_s_h (__m256i);
+__m256d __lasx_xvfcvtl_d_s (__m256);
+__m256 __lasx_xvfcvtl_s_h (__m256i);
+__m256 __lasx_xvfcvt_s_d (__m256d, __m256d);
+__m256d __lasx_xvfdiv_d (__m256d, __m256d);
+__m256 __lasx_xvfdiv_s (__m256, __m256);
+__m256d __lasx_xvffint_d_l (__m256i);
+__m256d __lasx_xvffint_d_lu (__m256i);
+__m256d __lasx_xvffinth_d_w (__m256i);
+__m256d __lasx_xvffintl_d_w (__m256i);
+__m256 __lasx_xvffint_s_l (__m256i, __m256i);
+__m256 __lasx_xvffint_s_w (__m256i);
+__m256 __lasx_xvffint_s_wu (__m256i);
+__m256d __lasx_xvflogb_d (__m256d);
+__m256 __lasx_xvflogb_s (__m256);
+__m256d __lasx_xvfmadd_d (__m256d, __m256d, __m256d);
+__m256 __lasx_xvfmadd_s (__m256, __m256, __m256);
+__m256d __lasx_xvfmaxa_d (__m256d, __m256d);
+__m256 __lasx_xvfmaxa_s (__m256, __m256);
+__m256d __lasx_xvfmax_d (__m256d, __m256d);
+__m256 __lasx_xvfmax_s (__m256, __m256);
+__m256d __lasx_xvfmina_d (__m256d, __m256d);
+__m256 __lasx_xvfmina_s (__m256, __m256);
+__m256d __lasx_xvfmin_d (__m256d, __m256d);
+__m256 __lasx_xvfmin_s (__m256, __m256);
+__m256d __lasx_xvfmsub_d (__m256d, __m256d, __m256d);
+__m256 __lasx_xvfmsub_s (__m256, __m256, __m256);
+__m256d __lasx_xvfmul_d (__m256d, __m256d);
+__m256 __lasx_xvfmul_s (__m256, __m256);
+__m256d __lasx_xvfnmadd_d (__m256d, __m256d, __m256d);
+__m256 __lasx_xvfnmadd_s (__m256, __m256, __m256);
+__m256d __lasx_xvfnmsub_d (__m256d, __m256d, __m256d);
+__m256 __lasx_xvfnmsub_s (__m256, __m256, __m256);
+__m256d __lasx_xvfrecip_d (__m256d);
+__m256 __lasx_xvfrecip_s (__m256);
+__m256d __lasx_xvfrint_d (__m256d);
+__m256i __lasx_xvfrintrm_d (__m256d);
+__m256i __lasx_xvfrintrm_s (__m256);
+__m256i __lasx_xvfrintrne_d (__m256d);
+__m256i __lasx_xvfrintrne_s (__m256);
+__m256i __lasx_xvfrintrp_d (__m256d);
+__m256i __lasx_xvfrintrp_s (__m256);
+__m256i __lasx_xvfrintrz_d (__m256d);
+__m256i __lasx_xvfrintrz_s (__m256);
+__m256 __lasx_xvfrint_s (__m256);
+__m256d __lasx_xvfrsqrt_d (__m256d);
+__m256 __lasx_xvfrsqrt_s (__m256);
+__m256i __lasx_xvfrstp_b (__m256i, __m256i, __m256i);
+__m256i __lasx_xvfrstp_h (__m256i, __m256i, __m256i);
+__m256i __lasx_xvfrstpi_b (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvfrstpi_h (__m256i, __m256i, imm0_31);
+__m256d __lasx_xvfsqrt_d (__m256d);
+__m256 __lasx_xvfsqrt_s (__m256);
+__m256d __lasx_xvfsub_d (__m256d, __m256d);
+__m256 __lasx_xvfsub_s (__m256, __m256);
+__m256i __lasx_xvftinth_l_s (__m256);
+__m256i __lasx_xvftint_l_d (__m256d);
+__m256i __lasx_xvftintl_l_s (__m256);
+__m256i __lasx_xvftint_lu_d (__m256d);
+__m256i __lasx_xvftintrmh_l_s (__m256);
+__m256i __lasx_xvftintrm_l_d (__m256d);
+__m256i __lasx_xvftintrml_l_s (__m256);
+__m256i __lasx_xvftintrm_w_d (__m256d, __m256d);
+__m256i __lasx_xvftintrm_w_s (__m256);
+__m256i __lasx_xvftintrneh_l_s (__m256);
+__m256i __lasx_xvftintrne_l_d (__m256d);
+__m256i __lasx_xvftintrnel_l_s (__m256);
+__m256i __lasx_xvftintrne_w_d (__m256d, __m256d);
+__m256i __lasx_xvftintrne_w_s (__m256);
+__m256i __lasx_xvftintrph_l_s (__m256);
+__m256i __lasx_xvftintrp_l_d (__m256d);
+__m256i __lasx_xvftintrpl_l_s (__m256);
+__m256i __lasx_xvftintrp_w_d (__m256d, __m256d);
+__m256i __lasx_xvftintrp_w_s (__m256);
+__m256i __lasx_xvftintrzh_l_s (__m256);
+__m256i __lasx_xvftintrz_l_d (__m256d);
+__m256i __lasx_xvftintrzl_l_s (__m256);
+__m256i __lasx_xvftintrz_lu_d (__m256d);
+__m256i __lasx_xvftintrz_w_d (__m256d, __m256d);
+__m256i __lasx_xvftintrz_w_s (__m256);
+__m256i __lasx_xvftintrz_wu_s (__m256);
+__m256i __lasx_xvftint_w_d (__m256d, __m256d);
+__m256i __lasx_xvftint_w_s (__m256);
+__m256i __lasx_xvftint_wu_s (__m256);
+__m256i __lasx_xvhaddw_du_wu (__m256i, __m256i);
+__m256i __lasx_xvhaddw_d_w (__m256i, __m256i);
+__m256i __lasx_xvhaddw_h_b (__m256i, __m256i);
+__m256i __lasx_xvhaddw_hu_bu (__m256i, __m256i);
+__m256i __lasx_xvhaddw_q_d (__m256i, __m256i);
+__m256i __lasx_xvhaddw_qu_du (__m256i, __m256i);
+__m256i __lasx_xvhaddw_w_h (__m256i, __m256i);
+__m256i __lasx_xvhaddw_wu_hu (__m256i, __m256i);
+__m256i __lasx_xvhsubw_du_wu (__m256i, __m256i);
+__m256i __lasx_xvhsubw_d_w (__m256i, __m256i);
+__m256i __lasx_xvhsubw_h_b (__m256i, __m256i);
+__m256i __lasx_xvhsubw_hu_bu (__m256i, __m256i);
+__m256i __lasx_xvhsubw_q_d (__m256i, __m256i);
+__m256i __lasx_xvhsubw_qu_du (__m256i, __m256i);
+__m256i __lasx_xvhsubw_w_h (__m256i, __m256i);
+__m256i __lasx_xvhsubw_wu_hu (__m256i, __m256i);
+__m256i __lasx_xvilvh_b (__m256i, __m256i);
+__m256i __lasx_xvilvh_d (__m256i, __m256i);
+__m256i __lasx_xvilvh_h (__m256i, __m256i);
+__m256i __lasx_xvilvh_w (__m256i, __m256i);
+__m256i __lasx_xvilvl_b (__m256i, __m256i);
+__m256i __lasx_xvilvl_d (__m256i, __m256i);
+__m256i __lasx_xvilvl_h (__m256i, __m256i);
+__m256i __lasx_xvilvl_w (__m256i, __m256i);
+__m256i __lasx_xvinsgr2vr_d (__m256i, long int, imm0_3);
+__m256i __lasx_xvinsgr2vr_w (__m256i, int, imm0_7);
+__m256i __lasx_xvinsve0_d (__m256i, __m256i, imm0_3);
+__m256i __lasx_xvinsve0_w (__m256i, __m256i, imm0_7);
+__m256i __lasx_xvld (void *, imm_n2048_2047);
+__m256i __lasx_xvldi (imm_n1024_1023);
+__m256i __lasx_xvldrepl_b (void *, imm_n2048_2047);
+__m256i __lasx_xvldrepl_d (void *, imm_n256_255);
+__m256i __lasx_xvldrepl_h (void *, imm_n1024_1023);
+__m256i __lasx_xvldrepl_w (void *, imm_n512_511);
+__m256i __lasx_xvldx (void *, long int);
+__m256i __lasx_xvmadd_b (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmadd_d (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmadd_h (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmadd_w (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_d_w (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_d_wu (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_d_wu_w (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_h_b (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_h_bu (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_h_bu_b (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_q_d (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_q_du (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_q_du_d (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_w_h (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_w_hu (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwev_w_hu_h (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_d_w (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_d_wu (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_d_wu_w (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_h_b (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_h_bu (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_h_bu_b (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_q_d (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_q_du (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_q_du_d (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_w_h (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_w_hu (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmaddwod_w_hu_h (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmax_b (__m256i, __m256i);
+__m256i __lasx_xvmax_bu (__m256i, __m256i);
+__m256i __lasx_xvmax_d (__m256i, __m256i);
+__m256i __lasx_xvmax_du (__m256i, __m256i);
+__m256i __lasx_xvmax_h (__m256i, __m256i);
+__m256i __lasx_xvmax_hu (__m256i, __m256i);
+__m256i __lasx_xvmaxi_b (__m256i, imm_n16_15);
+__m256i __lasx_xvmaxi_bu (__m256i, imm0_31);
+__m256i __lasx_xvmaxi_d (__m256i, imm_n16_15);
+__m256i __lasx_xvmaxi_du (__m256i, imm0_31);
+__m256i __lasx_xvmaxi_h (__m256i, imm_n16_15);
+__m256i __lasx_xvmaxi_hu (__m256i, imm0_31);
+__m256i __lasx_xvmaxi_w (__m256i, imm_n16_15);
+__m256i __lasx_xvmaxi_wu (__m256i, imm0_31);
+__m256i __lasx_xvmax_w (__m256i, __m256i);
+__m256i __lasx_xvmax_wu (__m256i, __m256i);
+__m256i __lasx_xvmin_b (__m256i, __m256i);
+__m256i __lasx_xvmin_bu (__m256i, __m256i);
+__m256i __lasx_xvmin_d (__m256i, __m256i);
+__m256i __lasx_xvmin_du (__m256i, __m256i);
+__m256i __lasx_xvmin_h (__m256i, __m256i);
+__m256i __lasx_xvmin_hu (__m256i, __m256i);
+__m256i __lasx_xvmini_b (__m256i, imm_n16_15);
+__m256i __lasx_xvmini_bu (__m256i, imm0_31);
+__m256i __lasx_xvmini_d (__m256i, imm_n16_15);
+__m256i __lasx_xvmini_du (__m256i, imm0_31);
+__m256i __lasx_xvmini_h (__m256i, imm_n16_15);
+__m256i __lasx_xvmini_hu (__m256i, imm0_31);
+__m256i __lasx_xvmini_w (__m256i, imm_n16_15);
+__m256i __lasx_xvmini_wu (__m256i, imm0_31);
+__m256i __lasx_xvmin_w (__m256i, __m256i);
+__m256i __lasx_xvmin_wu (__m256i, __m256i);
+__m256i __lasx_xvmod_b (__m256i, __m256i);
+__m256i __lasx_xvmod_bu (__m256i, __m256i);
+__m256i __lasx_xvmod_d (__m256i, __m256i);
+__m256i __lasx_xvmod_du (__m256i, __m256i);
+__m256i __lasx_xvmod_h (__m256i, __m256i);
+__m256i __lasx_xvmod_hu (__m256i, __m256i);
+__m256i __lasx_xvmod_w (__m256i, __m256i);
+__m256i __lasx_xvmod_wu (__m256i, __m256i);
+__m256i __lasx_xvmskgez_b (__m256i);
+__m256i __lasx_xvmskltz_b (__m256i);
+__m256i __lasx_xvmskltz_d (__m256i);
+__m256i __lasx_xvmskltz_h (__m256i);
+__m256i __lasx_xvmskltz_w (__m256i);
+__m256i __lasx_xvmsknz_b (__m256i);
+__m256i __lasx_xvmsub_b (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmsub_d (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmsub_h (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmsub_w (__m256i, __m256i, __m256i);
+__m256i __lasx_xvmuh_b (__m256i, __m256i);
+__m256i __lasx_xvmuh_bu (__m256i, __m256i);
+__m256i __lasx_xvmuh_d (__m256i, __m256i);
+__m256i __lasx_xvmuh_du (__m256i, __m256i);
+__m256i __lasx_xvmuh_h (__m256i, __m256i);
+__m256i __lasx_xvmuh_hu (__m256i, __m256i);
+__m256i __lasx_xvmuh_w (__m256i, __m256i);
+__m256i __lasx_xvmuh_wu (__m256i, __m256i);
+__m256i __lasx_xvmul_b (__m256i, __m256i);
+__m256i __lasx_xvmul_d (__m256i, __m256i);
+__m256i __lasx_xvmul_h (__m256i, __m256i);
+__m256i __lasx_xvmul_w (__m256i, __m256i);
+__m256i __lasx_xvmulwev_d_w (__m256i, __m256i);
+__m256i __lasx_xvmulwev_d_wu (__m256i, __m256i);
+__m256i __lasx_xvmulwev_d_wu_w (__m256i, __m256i);
+__m256i __lasx_xvmulwev_h_b (__m256i, __m256i);
+__m256i __lasx_xvmulwev_h_bu (__m256i, __m256i);
+__m256i __lasx_xvmulwev_h_bu_b (__m256i, __m256i);
+__m256i __lasx_xvmulwev_q_d (__m256i, __m256i);
+__m256i __lasx_xvmulwev_q_du (__m256i, __m256i);
+__m256i __lasx_xvmulwev_q_du_d (__m256i, __m256i);
+__m256i __lasx_xvmulwev_w_h (__m256i, __m256i);
+__m256i __lasx_xvmulwev_w_hu (__m256i, __m256i);
+__m256i __lasx_xvmulwev_w_hu_h (__m256i, __m256i);
+__m256i __lasx_xvmulwod_d_w (__m256i, __m256i);
+__m256i __lasx_xvmulwod_d_wu (__m256i, __m256i);
+__m256i __lasx_xvmulwod_d_wu_w (__m256i, __m256i);
+__m256i __lasx_xvmulwod_h_b (__m256i, __m256i);
+__m256i __lasx_xvmulwod_h_bu (__m256i, __m256i);
+__m256i __lasx_xvmulwod_h_bu_b (__m256i, __m256i);
+__m256i __lasx_xvmulwod_q_d (__m256i, __m256i);
+__m256i __lasx_xvmulwod_q_du (__m256i, __m256i);
+__m256i __lasx_xvmulwod_q_du_d (__m256i, __m256i);
+__m256i __lasx_xvmulwod_w_h (__m256i, __m256i);
+__m256i __lasx_xvmulwod_w_hu (__m256i, __m256i);
+__m256i __lasx_xvmulwod_w_hu_h (__m256i, __m256i);
+__m256i __lasx_xvneg_b (__m256i);
+__m256i __lasx_xvneg_d (__m256i);
+__m256i __lasx_xvneg_h (__m256i);
+__m256i __lasx_xvneg_w (__m256i);
+__m256i __lasx_xvnori_b (__m256i, imm0_255);
+__m256i __lasx_xvnor_v (__m256i, __m256i);
+__m256i __lasx_xvori_b (__m256i, imm0_255);
+__m256i __lasx_xvorn_v (__m256i, __m256i);
+__m256i __lasx_xvor_v (__m256i, __m256i);
+__m256i __lasx_xvpackev_b (__m256i, __m256i);
+__m256i __lasx_xvpackev_d (__m256i, __m256i);
+__m256i __lasx_xvpackev_h (__m256i, __m256i);
+__m256i __lasx_xvpackev_w (__m256i, __m256i);
+__m256i __lasx_xvpackod_b (__m256i, __m256i);
+__m256i __lasx_xvpackod_d (__m256i, __m256i);
+__m256i __lasx_xvpackod_h (__m256i, __m256i);
+__m256i __lasx_xvpackod_w (__m256i, __m256i);
+__m256i __lasx_xvpcnt_b (__m256i);
+__m256i __lasx_xvpcnt_d (__m256i);
+__m256i __lasx_xvpcnt_h (__m256i);
+__m256i __lasx_xvpcnt_w (__m256i);
+__m256i __lasx_xvpermi_d (__m256i, imm0_255);
+__m256i __lasx_xvpermi_q (__m256i, __m256i, imm0_255);
+__m256i __lasx_xvpermi_w (__m256i, __m256i, imm0_255);
+__m256i __lasx_xvperm_w (__m256i, __m256i);
+__m256i __lasx_xvpickev_b (__m256i, __m256i);
+__m256i __lasx_xvpickev_d (__m256i, __m256i);
+__m256i __lasx_xvpickev_h (__m256i, __m256i);
+__m256i __lasx_xvpickev_w (__m256i, __m256i);
+__m256i __lasx_xvpickod_b (__m256i, __m256i);
+__m256i __lasx_xvpickod_d (__m256i, __m256i);
+__m256i __lasx_xvpickod_h (__m256i, __m256i);
+__m256i __lasx_xvpickod_w (__m256i, __m256i);
+long int __lasx_xvpickve2gr_d (__m256i, imm0_3);
+unsigned long int __lasx_xvpickve2gr_du (__m256i, imm0_3);
+int __lasx_xvpickve2gr_w (__m256i, imm0_7);
+unsigned int __lasx_xvpickve2gr_wu (__m256i, imm0_7);
+__m256i __lasx_xvpickve_d (__m256i, imm0_3);
+__m256d __lasx_xvpickve_d_f (__m256d, imm0_3);
+__m256i __lasx_xvpickve_w (__m256i, imm0_7);
+__m256 __lasx_xvpickve_w_f (__m256, imm0_7);
+__m256i __lasx_xvrepl128vei_b (__m256i, imm0_15);
+__m256i __lasx_xvrepl128vei_d (__m256i, imm0_1);
+__m256i __lasx_xvrepl128vei_h (__m256i, imm0_7);
+__m256i __lasx_xvrepl128vei_w (__m256i, imm0_3);
+__m256i __lasx_xvreplgr2vr_b (int);
+__m256i __lasx_xvreplgr2vr_d (long int);
+__m256i __lasx_xvreplgr2vr_h (int);
+__m256i __lasx_xvreplgr2vr_w (int);
+__m256i __lasx_xvrepli_b (imm_n512_511);
+__m256i __lasx_xvrepli_d (imm_n512_511);
+__m256i __lasx_xvrepli_h (imm_n512_511);
+__m256i __lasx_xvrepli_w (imm_n512_511);
+__m256i __lasx_xvreplve0_b (__m256i);
+__m256i __lasx_xvreplve0_d (__m256i);
+__m256i __lasx_xvreplve0_h (__m256i);
+__m256i __lasx_xvreplve0_q (__m256i);
+__m256i __lasx_xvreplve0_w (__m256i);
+__m256i __lasx_xvreplve_b (__m256i, int);
+__m256i __lasx_xvreplve_d (__m256i, int);
+__m256i __lasx_xvreplve_h (__m256i, int);
+__m256i __lasx_xvreplve_w (__m256i, int);
+__m256i __lasx_xvrotr_b (__m256i, __m256i);
+__m256i __lasx_xvrotr_d (__m256i, __m256i);
+__m256i __lasx_xvrotr_h (__m256i, __m256i);
+__m256i __lasx_xvrotri_b (__m256i, imm0_7);
+__m256i __lasx_xvrotri_d (__m256i, imm0_63);
+__m256i __lasx_xvrotri_h (__m256i, imm0_15);
+__m256i __lasx_xvrotri_w (__m256i, imm0_31);
+__m256i __lasx_xvrotr_w (__m256i, __m256i);
+__m256i __lasx_xvsadd_b (__m256i, __m256i);
+__m256i __lasx_xvsadd_bu (__m256i, __m256i);
+__m256i __lasx_xvsadd_d (__m256i, __m256i);
+__m256i __lasx_xvsadd_du (__m256i, __m256i);
+__m256i __lasx_xvsadd_h (__m256i, __m256i);
+__m256i __lasx_xvsadd_hu (__m256i, __m256i);
+__m256i __lasx_xvsadd_w (__m256i, __m256i);
+__m256i __lasx_xvsadd_wu (__m256i, __m256i);
+__m256i __lasx_xvsat_b (__m256i, imm0_7);
+__m256i __lasx_xvsat_bu (__m256i, imm0_7);
+__m256i __lasx_xvsat_d (__m256i, imm0_63);
+__m256i __lasx_xvsat_du (__m256i, imm0_63);
+__m256i __lasx_xvsat_h (__m256i, imm0_15);
+__m256i __lasx_xvsat_hu (__m256i, imm0_15);
+__m256i __lasx_xvsat_w (__m256i, imm0_31);
+__m256i __lasx_xvsat_wu (__m256i, imm0_31);
+__m256i __lasx_xvseq_b (__m256i, __m256i);
+__m256i __lasx_xvseq_d (__m256i, __m256i);
+__m256i __lasx_xvseq_h (__m256i, __m256i);
+__m256i __lasx_xvseqi_b (__m256i, imm_n16_15);
+__m256i __lasx_xvseqi_d (__m256i, imm_n16_15);
+__m256i __lasx_xvseqi_h (__m256i, imm_n16_15);
+__m256i __lasx_xvseqi_w (__m256i, imm_n16_15);
+__m256i __lasx_xvseq_w (__m256i, __m256i);
+__m256i __lasx_xvshuf4i_b (__m256i, imm0_255);
+__m256i __lasx_xvshuf4i_d (__m256i, __m256i, imm0_255);
+__m256i __lasx_xvshuf4i_h (__m256i, imm0_255);
+__m256i __lasx_xvshuf4i_w (__m256i, imm0_255);
+__m256i __lasx_xvshuf_b (__m256i, __m256i, __m256i);
+__m256i __lasx_xvshuf_d (__m256i, __m256i, __m256i);
+__m256i __lasx_xvshuf_h (__m256i, __m256i, __m256i);
+__m256i __lasx_xvshuf_w (__m256i, __m256i, __m256i);
+__m256i __lasx_xvsigncov_b (__m256i, __m256i);
+__m256i __lasx_xvsigncov_d (__m256i, __m256i);
+__m256i __lasx_xvsigncov_h (__m256i, __m256i);
+__m256i __lasx_xvsigncov_w (__m256i, __m256i);
+__m256i __lasx_xvsle_b (__m256i, __m256i);
+__m256i __lasx_xvsle_bu (__m256i, __m256i);
+__m256i __lasx_xvsle_d (__m256i, __m256i);
+__m256i __lasx_xvsle_du (__m256i, __m256i);
+__m256i __lasx_xvsle_h (__m256i, __m256i);
+__m256i __lasx_xvsle_hu (__m256i, __m256i);
+__m256i __lasx_xvslei_b (__m256i, imm_n16_15);
+__m256i __lasx_xvslei_bu (__m256i, imm0_31);
+__m256i __lasx_xvslei_d (__m256i, imm_n16_15);
+__m256i __lasx_xvslei_du (__m256i, imm0_31);
+__m256i __lasx_xvslei_h (__m256i, imm_n16_15);
+__m256i __lasx_xvslei_hu (__m256i, imm0_31);
+__m256i __lasx_xvslei_w (__m256i, imm_n16_15);
+__m256i __lasx_xvslei_wu (__m256i, imm0_31);
+__m256i __lasx_xvsle_w (__m256i, __m256i);
+__m256i __lasx_xvsle_wu (__m256i, __m256i);
+__m256i __lasx_xvsll_b (__m256i, __m256i);
+__m256i __lasx_xvsll_d (__m256i, __m256i);
+__m256i __lasx_xvsll_h (__m256i, __m256i);
+__m256i __lasx_xvslli_b (__m256i, imm0_7);
+__m256i __lasx_xvslli_d (__m256i, imm0_63);
+__m256i __lasx_xvslli_h (__m256i, imm0_15);
+__m256i __lasx_xvslli_w (__m256i, imm0_31);
+__m256i __lasx_xvsll_w (__m256i, __m256i);
+__m256i __lasx_xvsllwil_du_wu (__m256i, imm0_31);
+__m256i __lasx_xvsllwil_d_w (__m256i, imm0_31);
+__m256i __lasx_xvsllwil_h_b (__m256i, imm0_7);
+__m256i __lasx_xvsllwil_hu_bu (__m256i, imm0_7);
+__m256i __lasx_xvsllwil_w_h (__m256i, imm0_15);
+__m256i __lasx_xvsllwil_wu_hu (__m256i, imm0_15);
+__m256i __lasx_xvslt_b (__m256i, __m256i);
+__m256i __lasx_xvslt_bu (__m256i, __m256i);
+__m256i __lasx_xvslt_d (__m256i, __m256i);
+__m256i __lasx_xvslt_du (__m256i, __m256i);
+__m256i __lasx_xvslt_h (__m256i, __m256i);
+__m256i __lasx_xvslt_hu (__m256i, __m256i);
+__m256i __lasx_xvslti_b (__m256i, imm_n16_15);
+__m256i __lasx_xvslti_bu (__m256i, imm0_31);
+__m256i __lasx_xvslti_d (__m256i, imm_n16_15);
+__m256i __lasx_xvslti_du (__m256i, imm0_31);
+__m256i __lasx_xvslti_h (__m256i, imm_n16_15);
+__m256i __lasx_xvslti_hu (__m256i, imm0_31);
+__m256i __lasx_xvslti_w (__m256i, imm_n16_15);
+__m256i __lasx_xvslti_wu (__m256i, imm0_31);
+__m256i __lasx_xvslt_w (__m256i, __m256i);
+__m256i __lasx_xvslt_wu (__m256i, __m256i);
+__m256i __lasx_xvsra_b (__m256i, __m256i);
+__m256i __lasx_xvsra_d (__m256i, __m256i);
+__m256i __lasx_xvsra_h (__m256i, __m256i);
+__m256i __lasx_xvsrai_b (__m256i, imm0_7);
+__m256i __lasx_xvsrai_d (__m256i, imm0_63);
+__m256i __lasx_xvsrai_h (__m256i, imm0_15);
+__m256i __lasx_xvsrai_w (__m256i, imm0_31);
+__m256i __lasx_xvsran_b_h (__m256i, __m256i);
+__m256i __lasx_xvsran_h_w (__m256i, __m256i);
+__m256i __lasx_xvsrani_b_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvsrani_d_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvsrani_h_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvsrani_w_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvsran_w_d (__m256i, __m256i);
+__m256i __lasx_xvsrar_b (__m256i, __m256i);
+__m256i __lasx_xvsrar_d (__m256i, __m256i);
+__m256i __lasx_xvsrar_h (__m256i, __m256i);
+__m256i __lasx_xvsrari_b (__m256i, imm0_7);
+__m256i __lasx_xvsrari_d (__m256i, imm0_63);
+__m256i __lasx_xvsrari_h (__m256i, imm0_15);
+__m256i __lasx_xvsrari_w (__m256i, imm0_31);
+__m256i __lasx_xvsrarn_b_h (__m256i, __m256i);
+__m256i __lasx_xvsrarn_h_w (__m256i, __m256i);
+__m256i __lasx_xvsrarni_b_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvsrarni_d_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvsrarni_h_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvsrarni_w_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvsrarn_w_d (__m256i, __m256i);
+__m256i __lasx_xvsrar_w (__m256i, __m256i);
+__m256i __lasx_xvsra_w (__m256i, __m256i);
+__m256i __lasx_xvsrl_b (__m256i, __m256i);
+__m256i __lasx_xvsrl_d (__m256i, __m256i);
+__m256i __lasx_xvsrl_h (__m256i, __m256i);
+__m256i __lasx_xvsrli_b (__m256i, imm0_7);
+__m256i __lasx_xvsrli_d (__m256i, imm0_63);
+__m256i __lasx_xvsrli_h (__m256i, imm0_15);
+__m256i __lasx_xvsrli_w (__m256i, imm0_31);
+__m256i __lasx_xvsrln_b_h (__m256i, __m256i);
+__m256i __lasx_xvsrln_h_w (__m256i, __m256i);
+__m256i __lasx_xvsrlni_b_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvsrlni_d_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvsrlni_h_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvsrlni_w_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvsrln_w_d (__m256i, __m256i);
+__m256i __lasx_xvsrlr_b (__m256i, __m256i);
+__m256i __lasx_xvsrlr_d (__m256i, __m256i);
+__m256i __lasx_xvsrlr_h (__m256i, __m256i);
+__m256i __lasx_xvsrlri_b (__m256i, imm0_7);
+__m256i __lasx_xvsrlri_d (__m256i, imm0_63);
+__m256i __lasx_xvsrlri_h (__m256i, imm0_15);
+__m256i __lasx_xvsrlri_w (__m256i, imm0_31);
+__m256i __lasx_xvsrlrn_b_h (__m256i, __m256i);
+__m256i __lasx_xvsrlrn_h_w (__m256i, __m256i);
+__m256i __lasx_xvsrlrni_b_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvsrlrni_d_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvsrlrni_h_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvsrlrni_w_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvsrlrn_w_d (__m256i, __m256i);
+__m256i __lasx_xvsrlr_w (__m256i, __m256i);
+__m256i __lasx_xvsrl_w (__m256i, __m256i);
+__m256i __lasx_xvssran_b_h (__m256i, __m256i);
+__m256i __lasx_xvssran_bu_h (__m256i, __m256i);
+__m256i __lasx_xvssran_hu_w (__m256i, __m256i);
+__m256i __lasx_xvssran_h_w (__m256i, __m256i);
+__m256i __lasx_xvssrani_b_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvssrani_bu_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvssrani_d_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvssrani_du_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvssrani_hu_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvssrani_h_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvssrani_w_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvssrani_wu_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvssran_w_d (__m256i, __m256i);
+__m256i __lasx_xvssran_wu_d (__m256i, __m256i);
+__m256i __lasx_xvssrarn_b_h (__m256i, __m256i);
+__m256i __lasx_xvssrarn_bu_h (__m256i, __m256i);
+__m256i __lasx_xvssrarn_hu_w (__m256i, __m256i);
+__m256i __lasx_xvssrarn_h_w (__m256i, __m256i);
+__m256i __lasx_xvssrarni_b_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvssrarni_bu_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvssrarni_d_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvssrarni_du_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvssrarni_hu_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvssrarni_h_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvssrarni_w_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvssrarni_wu_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvssrarn_w_d (__m256i, __m256i);
+__m256i __lasx_xvssrarn_wu_d (__m256i, __m256i);
+__m256i __lasx_xvssrln_b_h (__m256i, __m256i);
+__m256i __lasx_xvssrln_bu_h (__m256i, __m256i);
+__m256i __lasx_xvssrln_hu_w (__m256i, __m256i);
+__m256i __lasx_xvssrln_h_w (__m256i, __m256i);
+__m256i __lasx_xvssrlni_b_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvssrlni_bu_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvssrlni_d_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvssrlni_du_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvssrlni_hu_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvssrlni_h_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvssrlni_w_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvssrlni_wu_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvssrln_w_d (__m256i, __m256i);
+__m256i __lasx_xvssrln_wu_d (__m256i, __m256i);
+__m256i __lasx_xvssrlrn_b_h (__m256i, __m256i);
+__m256i __lasx_xvssrlrn_bu_h (__m256i, __m256i);
+__m256i __lasx_xvssrlrn_hu_w (__m256i, __m256i);
+__m256i __lasx_xvssrlrn_h_w (__m256i, __m256i);
+__m256i __lasx_xvssrlrni_b_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvssrlrni_bu_h (__m256i, __m256i, imm0_15);
+__m256i __lasx_xvssrlrni_d_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvssrlrni_du_q (__m256i, __m256i, imm0_127);
+__m256i __lasx_xvssrlrni_hu_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvssrlrni_h_w (__m256i, __m256i, imm0_31);
+__m256i __lasx_xvssrlrni_w_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvssrlrni_wu_d (__m256i, __m256i, imm0_63);
+__m256i __lasx_xvssrlrn_w_d (__m256i, __m256i);
+__m256i __lasx_xvssrlrn_wu_d (__m256i, __m256i);
+__m256i __lasx_xvssub_b (__m256i, __m256i);
+__m256i __lasx_xvssub_bu (__m256i, __m256i);
+__m256i __lasx_xvssub_d (__m256i, __m256i);
+__m256i __lasx_xvssub_du (__m256i, __m256i);
+__m256i __lasx_xvssub_h (__m256i, __m256i);
+__m256i __lasx_xvssub_hu (__m256i, __m256i);
+__m256i __lasx_xvssub_w (__m256i, __m256i);
+__m256i __lasx_xvssub_wu (__m256i, __m256i);
+void __lasx_xvst (__m256i, void *, imm_n2048_2047);
+void __lasx_xvstelm_b (__m256i, void *, imm_n128_127, idx);
+void __lasx_xvstelm_d (__m256i, void *, imm_n128_127, idx);
+void __lasx_xvstelm_h (__m256i, void *, imm_n128_127, idx);
+void __lasx_xvstelm_w (__m256i, void *, imm_n128_127, idx);
+void __lasx_xvstx (__m256i, void *, long int);
+__m256i __lasx_xvsub_b (__m256i, __m256i);
+__m256i __lasx_xvsub_d (__m256i, __m256i);
+__m256i __lasx_xvsub_h (__m256i, __m256i);
+__m256i __lasx_xvsubi_bu (__m256i, imm0_31);
+__m256i __lasx_xvsubi_du (__m256i, imm0_31);
+__m256i __lasx_xvsubi_hu (__m256i, imm0_31);
+__m256i __lasx_xvsubi_wu (__m256i, imm0_31);
+__m256i __lasx_xvsub_q (__m256i, __m256i);
+__m256i __lasx_xvsub_w (__m256i, __m256i);
+__m256i __lasx_xvsubwev_d_w (__m256i, __m256i);
+__m256i __lasx_xvsubwev_d_wu (__m256i, __m256i);
+__m256i __lasx_xvsubwev_h_b (__m256i, __m256i);
+__m256i __lasx_xvsubwev_h_bu (__m256i, __m256i);
+__m256i __lasx_xvsubwev_q_d (__m256i, __m256i);
+__m256i __lasx_xvsubwev_q_du (__m256i, __m256i);
+__m256i __lasx_xvsubwev_w_h (__m256i, __m256i);
+__m256i __lasx_xvsubwev_w_hu (__m256i, __m256i);
+__m256i __lasx_xvsubwod_d_w (__m256i, __m256i);
+__m256i __lasx_xvsubwod_d_wu (__m256i, __m256i);
+__m256i __lasx_xvsubwod_h_b (__m256i, __m256i);
+__m256i __lasx_xvsubwod_h_bu (__m256i, __m256i);
+__m256i __lasx_xvsubwod_q_d (__m256i, __m256i);
+__m256i __lasx_xvsubwod_q_du (__m256i, __m256i);
+__m256i __lasx_xvsubwod_w_h (__m256i, __m256i);
+__m256i __lasx_xvsubwod_w_hu (__m256i, __m256i);
+__m256i __lasx_xvxori_b (__m256i, imm0_255);
+__m256i __lasx_xvxor_v (__m256i, __m256i);
+@end smallexample
+
@node MIPS DSP Built-in Functions
@subsection MIPS DSP Built-in Functions
@@ -18666,11 +20704,11 @@ Disable global interrupt.
These built-in functions are available for the Nvidia PTX target:
-@defbuiltin{unsigned int __builtin_nvptx_brev (unsigned int @var{x})}
+@defbuiltin{{unsigned int} __builtin_nvptx_brev (unsigned int @var{x})}
Reverse the bit order of a 32-bit unsigned integer.
@enddefbuiltin
-@defbuiltin{unsigned long long __builtin_nvptx_brevll (unsigned long long @var{x})}
+@defbuiltin{{unsigned long long} __builtin_nvptx_brevll (unsigned long long @var{x})}
Reverse the bit order of a 64-bit unsigned integer.
@enddefbuiltin
@@ -19167,8 +21205,8 @@ round to odd as the rounding mode.
The following additional built-in functions are also available for the
PowerPC family of processors, starting with ISA 3.0 or later:
-@defbuiltin{long long __builtin_darn (void)}
-@defbuiltinx{long long __builtin_darn_raw (void)}
+@defbuiltin{{long long} __builtin_darn (void)}
+@defbuiltinx{{long long} __builtin_darn_raw (void)}
@defbuiltinx{int __builtin_darn_32 (void)}
The @code{__builtin_darn} and @code{__builtin_darn_raw}
functions require a
@@ -22252,12 +24290,12 @@ multiplying the bottom 16 bits of the two arguments into the
accumulator.
@enddefbuiltin
-@defbuiltin{int __builtin_rx_mvfachi (void)}
+@defbuiltin{int __builtin_rx_mvfachi (void)}
Generates the @code{mvfachi} machine instruction to read the top
32 bits of the accumulator.
@enddefbuiltin
-@defbuiltin{int __builtin_rx_mvfacmi (void)}
+@defbuiltin{int __builtin_rx_mvfacmi (void)}
Generates the @code{mvfacmi} machine instruction to read the middle
32 bits of the accumulator.
@enddefbuiltin
@@ -23626,7 +25664,7 @@ Generates the @code{extractps} machine instruction.
Generates the @code{pextrd} machine instruction.
@enddefbuiltin
-@defbuiltin{long long __builtin_ia32_vec_ext_v2di (v2di, const int)}
+@defbuiltin{{long long} __builtin_ia32_vec_ext_v2di (v2di, const int)}
Generates the @code{pextrq} machine instruction in 64bit mode.
@enddefbuiltin
@@ -23655,19 +25693,19 @@ v2di __builtin_ia32_pcmpgtq (v2di, v2di);
The following built-in functions are available when @option{-msse4.2} is
used.
-@defbuiltin{unsigned int __builtin_ia32_crc32qi (unsigned int, unsigned char)}
+@defbuiltin{{unsigned int} __builtin_ia32_crc32qi (unsigned int, unsigned char)}
Generates the @code{crc32b} machine instruction.
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_ia32_crc32hi (unsigned int, unsigned short)}
+@defbuiltin{{unsigned int} __builtin_ia32_crc32hi (unsigned int, unsigned short)}
Generates the @code{crc32w} machine instruction.
@enddefbuiltin
-@defbuiltin{unsigned int __builtin_ia32_crc32si (unsigned int, unsigned int)}
+@defbuiltin{{unsigned int} __builtin_ia32_crc32si (unsigned int, unsigned int)}
Generates the @code{crc32l} machine instruction.
@enddefbuiltin
-@defbuiltin{unsigned long long __builtin_ia32_crc32di (unsigned long long, unsigned long long)}
+@defbuiltin{{unsigned long long} __builtin_ia32_crc32di (unsigned long long, unsigned long long)}
Generates the @code{crc32q} machine instruction.
@enddefbuiltin
diff --git a/gcc/doc/install.texi b/gcc/doc/install.texi
index c1ccb8b..c1128d9 100644
--- a/gcc/doc/install.texi
+++ b/gcc/doc/install.texi
@@ -697,9 +697,8 @@ phases.
First, we @strong{highly} recommend that GCC be built into a
separate directory from the sources which does @strong{not} reside
within the source tree. This is how we generally build GCC; building
-where @var{srcdir} == @var{objdir} should still work, but doesn't
-get extensive testing; building where @var{objdir} is a subdirectory
-of @var{srcdir} is unsupported.
+where @var{objdir} is a subdirectory of @var{srcdir} should work as well;
+building where @var{objdir} == @var{srcdir} is unsupported.
If you have previously built GCC in the same directory for a
different target machine, do @samp{make distclean} to delete all files
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 2e6bac3..32f535e 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -219,6 +219,7 @@ in the following sections.
-fno-elide-constructors
-fno-enforce-eh-specs
-fno-gnu-keywords
+-fno-immediate-escalation
-fno-implicit-templates
-fno-implicit-inline-templates
-fno-implement-inlines
@@ -505,7 +506,8 @@ Objective-C and Objective-C++ Dialects}.
@item C and Objective-C-only Warning Options
@gccoptlist{-Wbad-function-cast -Wmissing-declarations
--Wmissing-parameter-type -Wmissing-prototypes -Wmissing-variable-declarations
+-Wmissing-parameter-type -Wdeclaration-missing-parameter-type
+-Wmissing-prototypes -Wmissing-variable-declarations
-Wnested-externs -Wold-style-declaration -Wold-style-definition
-Wstrict-prototypes -Wtraditional -Wtraditional-conversion
-Wdeclaration-after-statement -Wpointer-sign}
@@ -560,6 +562,7 @@ Objective-C and Objective-C++ Dialects}.
-fgcse -fgcse-after-reload -fgcse-las -fgcse-lm -fgraphite-identity
-fgcse-sm -fhoist-adjacent-loads -fif-conversion
-fif-conversion2 -findirect-inlining
+-finline-stringops[=@var{fn}]
-finline-functions -finline-functions-called-once -finline-limit=@var{n}
-finline-small-functions -fipa-modref -fipa-cp -fipa-cp-clone
-fipa-bit-cp -fipa-vrp -fipa-pta -fipa-profile -fipa-pure-const
@@ -652,6 +655,8 @@ Objective-C and Objective-C++ Dialects}.
-fstack-protector-explicit -fstack-check
-fstack-limit-register=@var{reg} -fstack-limit-symbol=@var{sym}
-fno-stack-limit -fsplit-stack
+-fstrub=disable -fstrub=strict -fstrub=relaxed
+-fstrub=all -fstrub=at-calls -fstrub=internal
-fvtable-verify=@r{[}std@r{|}preinit@r{|}none@r{]}
-fvtv-counts -fvtv-debug
-finstrument-functions -finstrument-functions-once
@@ -3384,6 +3389,39 @@ word as an identifier. You can use the keyword @code{__typeof__} instead.
This option is implied by the strict ISO C++ dialects: @option{-ansi},
@option{-std=c++98}, @option{-std=c++11}, etc.
+@opindex fno-immediate-escalation
+@opindex fimmediate-escalation
+@item -fno-immediate-escalation
+Do not enable immediate function escalation whereby certain functions
+can be promoted to consteval, as specified in P2564R3. For example:
+
+@example
+consteval int id(int i) @{ return i; @}
+
+constexpr int f(auto t)
+@{
+ return t + id(t); // id causes f<int> to be promoted to consteval
+@}
+
+void g(int i)
+@{
+ f (3);
+@}
+@end example
+
+compiles in C++20: @code{f} is an immediate-escalating function (due to
+the @code{auto} it is a function template and is declared @code{constexpr})
+and @code{id(t)} is an immediate-escalating expression, so @code{f} is
+promoted to @code{consteval}. Consequently, the call to @code{id(t)}
+is in an immediate context, so doesn't have to produce a constant (that
+is the mechanism allowing consteval function composition). However,
+with @option{-fno-immediate-escalation}, @code{f} is not promoted to
+@code{consteval}, and since the call to consteval function @code{id(t)}
+is not a constant expression, the compiler rejects the code.
+
+This option is turned on by default; it is only effective in C++20 mode
+or later.
+
@opindex fimplicit-constexpr
@item -fimplicit-constexpr
Make inline functions implicitly constexpr, if they satisfy the
@@ -6183,7 +6221,13 @@ only by this flag, but it also downgrades some C and C++ diagnostics
that have their own flag:
@gccoptlist{
+-Wdeclaration-missing-parameter-type @r{(C and Objective-C only)}
+-Wimplicit-function-declaration @r{(C and Objective-C only)}
+-Wimplicit-int @r{(C and Objective-C only)}
+-Wincompatible-pointer-types @r{(C and Objective-C only)}
+-Wint-conversion @r{(C and Objective-C only)}
-Wnarrowing @r{(C++)}
+-Wreturn-mismatch @r{(C and Objective-C only)}
}
The @option{-fpermissive} option is the default for historic C language
@@ -6853,8 +6897,10 @@ This warning is enabled by @option{-Wall} in C++.
@opindex Wno-implicit-int
@item -Wno-implicit-int @r{(C and Objective-C only)}
This option controls warnings when a declaration does not specify a type.
-This warning is enabled by default in C99 and later dialects of C,
-and also by @option{-Wall}.
+This warning is enabled by default, as an error, in C99 and later
+dialects of C, and also by @option{-Wall}. The error can be downgraded
+to a warning using @option{-fpermissive} (along with certain other
+errors), or for this error alone, with @option{-Wno-error=implicit-int}.
This warning is upgraded to an error by @option{-pedantic-errors}.
@@ -6862,8 +6908,11 @@ This warning is upgraded to an error by @option{-pedantic-errors}.
@opindex Wno-implicit-function-declaration
@item -Wno-implicit-function-declaration @r{(C and Objective-C only)}
This option controls warnings when a function is used before being declared.
-This warning is enabled by default in C99 and later dialects of C,
-and also by @option{-Wall}.
+This warning is enabled by default, as an error, in C99 and later
+dialects of C, and also by @option{-Wall}. The error can be downgraded
+to a warning using @option{-fpermissive} (along with certain other
+errors), or for this error alone, with
+@option{-Wno-error=implicit-function-declaration}.
This warning is upgraded to an error by @option{-pedantic-errors}.
@@ -7381,7 +7430,10 @@ Attempting to use the return value of a non-@code{void} function other
than @code{main} that flows off the end by reaching the closing curly
brace that terminates the function is undefined.
-This warning is specific to C and enabled by default.
+This warning is specific to C and enabled by default. In C99 and later
+language dialects, it is treated as an error. It can be downgraded
+to a warning using @option{-fpermissive} (along with other warnings),
+or for just this warning, with @option{-Wno-error=return-mismatch}.
@opindex Wreturn-type
@opindex Wno-return-type
@@ -8553,6 +8605,11 @@ types. This warning is for cases not covered by @option{-Wno-pointer-sign},
which warns for pointer argument passing or assignment with different
signedness.
+By default, in C99 and later dialects of C, GCC treats this issue as an
+error. The error can be downgraded to a warning using
+@option{-fpermissive} (along with certain other errors), or for this
+error alone, with @option{-Wno-error=incompatible-pointer-types}.
+
This warning is upgraded to an error by @option{-pedantic-errors}.
@opindex Wno-int-conversion
@@ -8563,6 +8620,11 @@ conversions. This warning is about implicit conversions; for explicit
conversions the warnings @option{-Wno-int-to-pointer-cast} and
@option{-Wno-pointer-to-int-cast} may be used.
+By default, in C99 and later dialects of C, GCC treats this issue as an
+error. The error can be downgraded to a warning using
+@option{-fpermissive} (along with certain other errors), or for this
+error alone, with @option{-Wno-error=int-conversion}.
+
This warning is upgraded to an error by @option{-pedantic-errors}.
@opindex Wzero-length-bounds
@@ -9734,6 +9796,20 @@ void foo(bar) @{ @}
This warning is also enabled by @option{-Wextra}.
+@opindex Wno-declaration-missing-parameter-type
+@opindex Wdeclaration-missing-parameter-type
+@item -Wno-declaration-missing-parameter-type @r{(C and Objective-C only)}
+Do not warn if a function declaration contains a parameter name without
+a type. Such function declarations do not provide a function prototype
+and prevent most type checking in function calls.
+
+This warning is enabled by default. In C99 and later dialects of C, it
+is treated as an error. The error can be downgraded to a warning using
+@option{-fpermissive} (along with certain other errors), or for this
+error alone, with @option{-Wno-error=declaration-missing-parameter-type}.
+
+This warning is upgraded to an error by @option{-pedantic-errors}.
+
@opindex Wmissing-prototypes
@opindex Wno-missing-prototypes
@item -Wmissing-prototypes @r{(C and Objective-C only)}
@@ -12373,6 +12449,20 @@ their @code{_FORTIFY_SOURCE} counterparts into faster alternatives.
Enabled at levels @option{-O2}, @option{-O3}.
+@opindex finline-stringops
+@item -finline-stringops[=@var{fn}]
+Expand memory and string operations (for now, only @code{memset})
+inline, even when the length is variable or big enough as to require
+looping. This is most useful along with @option{-ffreestanding} and
+@option{-fno-builtin}.
+
+In some circumstances, it enables the compiler to generate code that
+takes advantage of known alignment and length multipliers, but even then
+it may be less efficient than optimized runtime implementations, and
+grow code size so much that even a less performant but shared
+implementation runs faster due to better use of code caches. This
+option is disabled by default.
+
@opindex fno-inline
@opindex finline
@item -fno-inline
@@ -13729,6 +13819,27 @@ The values used for pattern initialization might be changed in the future.
The default is @samp{uninitialized}.
+Note that the initializer values, whether @samp{zero} or @samp{pattern},
+refer to data representation (in memory or machine registers), rather
+than to their interpretation as numerical values. This distinction may
+be important in languages that support types with biases or implicit
+multipliers, and with such extensions as @samp{hardbool} (@pxref{Type
+Attributes}). For example, a variable that uses 8 bits to represent
+(biased) quantities in the @code{range 160..400} will be initialized
+with the bit patterns @code{0x00} or @code{0xFE}, depending on
+@var{choice}, whether or not these representations stand for values in
+that range, and even if they do, the interpretation of the value held by
+the variable will depend on the bias. A @samp{hardbool} variable that
+uses say @code{0X5A} and @code{0xA5} for @code{false} and @code{true},
+respectively, will trap with either @samp{choice} of trivial
+initializer, i.e., @samp{zero} initialization will not convert to the
+representation for @code{false}, even if it would for a @code{static}
+variable of the same type. This means the initializer pattern doesn't
+generally depend on the type of the initialized variable. One notable
+exception is that (non-hardened) boolean variables that fit in registers
+are initialized with @code{false} (zero), even when @samp{pattern} is
+requested.
+
You can control this behavior for a specific variable by using the variable
attribute @code{uninitialized} (@pxref{Variable Attributes}).
@@ -17815,6 +17926,56 @@ without @option{-fsplit-stack} always has a large stack. Support for
this is implemented in the gold linker in GNU binutils release 2.21
and later.
+@opindex -fstrub=disable
+@item -fstrub=disable
+Disable stack scrubbing entirely, ignoring any @code{strub} attributes.
+See @xref{Common Type Attributes}.
+
+@opindex fstrub=strict
+@item -fstrub=strict
+Functions default to @code{strub} mode @code{disabled}, and apply
+@option{strict}ly the restriction that only functions associated with
+@code{strub}-@code{callable} modes (@code{at-calls}, @code{callable} and
+@code{always_inline} @code{internal}) are @code{callable} by functions
+with @code{strub}-enabled modes (@code{at-calls} and @code{internal}).
+
+@opindex fstrub=relaxed
+@item -fstrub=relaxed
+Restore the default stack scrub (@code{strub}) setting, namely,
+@code{strub} is only enabled as required by @code{strub} attributes
+associated with function and data types. @code{Relaxed} means that
+strub contexts are only prevented from calling functions explicitly
+associated with @code{strub} mode @code{disabled}. This option is only
+useful to override other @option{-fstrub=*} options that precede it in
+the command line.
+
+@opindex fstrub=at-calls
+@item -fstrub=at-calls
+Enable @code{at-calls} @code{strub} mode where viable. The primary use
+of this option is for testing. It exercises the @code{strub} machinery
+in scenarios strictly local to a translation unit. This @code{strub}
+mode modifies function interfaces, so any function that is visible to
+other translation units, or that has its address taken, will @emph{not}
+be affected by this option. Optimization options may also affect
+viability. See the @code{strub} attribute documentation for details on
+viability and eligibility requirements.
+
+@opindex fstrub=internal
+@item -fstrub=internal
+Enable @code{internal} @code{strub} mode where viable. The primary use
+of this option is for testing. This option is intended to exercise
+thoroughly parts of the @code{strub} machinery that implement the less
+efficient, but interface-preserving @code{strub} mode. Functions that
+would not be affected by this option are quite uncommon.
+
+@opindex fstrub=all
+@item -fstrub=all
+Enable some @code{strub} mode where viable. When both strub modes are
+viable, @code{at-calls} is preferred. @option{-fdump-ipa-strubm} adds
+function attributes that tell which mode was selected for each function.
+The primary use of this option is for testing, to exercise thoroughly
+the @code{strub} machinery.
+
@opindex fvtable-verify
@item -fvtable-verify=@r{[}std@r{|}preinit@r{|}none@r{]}
This option is only available when compiling C++ code.
@@ -19734,6 +19895,14 @@ and inlining decisions.
@item inline
Dump after function inlining.
+@item strubm
+Dump after selecting @code{strub} modes, and recording the selections as
+function attributes.
+
+@item strub
+Dump @code{strub} transformations: interface changes, function wrapping,
+and insertion of builtin calls for stack scrubbing and watermarking.
+
@end table
Additionally, the options @option{-optimized}, @option{-missed},
@@ -20908,7 +21077,7 @@ performance of the code. Permissible values for this option are:
@samp{cortex-r82}, @samp{cortex-x1}, @samp{cortex-x1c}, @samp{cortex-x2},
@samp{cortex-x3}, @samp{cortex-x4}, @samp{cortex-a510}, @samp{cortex-a520},
@samp{cortex-a710}, @samp{cortex-a715}, @samp{cortex-a720}, @samp{ampere1},
-@samp{ampere1a}, and @samp{native}.
+@samp{ampere1a}, @samp{ampere1b}, and @samp{native}.
The values @samp{cortex-a57.cortex-a53}, @samp{cortex-a72.cortex-a53},
@samp{cortex-a73.cortex-a35}, @samp{cortex-a73.cortex-a53},
@@ -21164,7 +21333,14 @@ Enable the Flag Manipulation instructions Extension.
Enable the Pointer Authentication Extension.
@item cssc
Enable the Common Short Sequence Compression instructions.
-
+@item sme
+Enable the Scalable Matrix Extension.
+@item sme-i16i64
+Enable the FEAT_SME_I16I64 extension to SME.
+@item sme-f64f64
+Enable the FEAT_SME_F64F64 extension to SME.
++@item sme2
+Enable the Scalable Matrix Extension 2. This also enables SME instructions.
@end table
Feature @option{crypto} implies @option{aes}, @option{sha2}, and @option{simd},
@@ -29701,7 +29877,7 @@ by particular CPU name.
Permissible values for this option are: @samp{sifive-e20}, @samp{sifive-e21},
@samp{sifive-e24}, @samp{sifive-e31}, @samp{sifive-e34}, @samp{sifive-e76},
@samp{sifive-s21}, @samp{sifive-s51}, @samp{sifive-s54}, @samp{sifive-s76},
-@samp{sifive-u54}, and @samp{sifive-u74}.
+@samp{sifive-u54}, @samp{sifive-u74}, and @samp{sifive-x280}.
@opindex mtune
@item -mtune=@var{processor-string}
@@ -29771,6 +29947,10 @@ Inlining will only be done if the strings are properly aligned
and instructions for accelerated processing are available.
The default is to not inline strcmp calls.
+The @option{--param riscv-strcmp-inline-limit=@var{n}} parameter controls
+the maximum number of bytes compared by the inlined code.
+The default value is 64.
+
@opindex minline-strncmp
@item -minline-strncmp
@itemx -mno-inline-strncmp
@@ -29779,6 +29959,10 @@ Inlining will only be done if the strings are properly aligned
and instructions for accelerated processing are available.
The default is to not inline strncmp calls.
+The @option{--param riscv-strcmp-inline-limit=@var{n}} parameter controls
+the maximum number of bytes compared by the inlined code.
+The default value is 64.
+
@opindex mshorten-memrefs
@item -mshorten-memrefs
@itemx -mno-shorten-memrefs
diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
index e27e0fa..c990902 100644
--- a/gcc/doc/sourcebuild.texi
+++ b/gcc/doc/sourcebuild.texi
@@ -2316,6 +2316,10 @@ AArch64 target which generates instruction sequences for big endian.
@item aarch64_small_fpic
Binutils installed on test system supports relocation types required by -fpic
for AArch64 small memory model.
+@item aarch64_sme
+AArch64 target that generates instructions for SME.
+@item aarch64_sme2
+AArch64 target that generates instructions for SME2.
@item aarch64_sve_hw
AArch64 target that is able to generate and execute SVE code (regardless of
whether it does so by default).
@@ -2671,6 +2675,9 @@ The language for the compiler under test is C++.
@item c99_runtime
Target provides a full C99 runtime.
+@item cfi
+Target supports DWARF CFI directives.
+
@item correct_iso_cpp_string_wchar_protos
Target @code{string.h} and @code{wchar.h} headers provide C++ required
overloads for @code{strchr} etc. functions.
@@ -3440,8 +3447,8 @@ stands for zero or more unmatched lines; the whitespace after
@subsubsection Scan optimization dump files
These commands are available for @var{kind} of @code{tree}, @code{ltrans-tree},
-@code{offload-tree}, @code{rtl}, @code{offload-rtl}, @code{ipa}, and
-@code{wpa-ipa}.
+@code{offload-tree}, @code{rtl}, @code{offload-rtl}, @code{ipa},
+@code{offload-ipa}, and @code{wpa-ipa}.
@table @code
@item scan-@var{kind}-dump @var{regex} @var{suffix} [@{ target/xfail @var{selector} @}]
@@ -3476,6 +3483,39 @@ occurrences of the string ``code has been optimized'', use:
/* @{ dg-final @{ scan-tree-dump "code has been optimized" "mypass\[1-3\]" @} @} */
@end smallexample
+The @code{offload-@dots{}} ones by default separately scan the dump
+file of each enabled offload target.
+You may use the @code{only_for_offload_target} wrapper to restrict the
+scanning to one specific offload target:
+
+@smallexample
+/* @{ dg-do link @{ target offload_target_amdgcn @} @} */
+/* @{ dg-additional-options -foffload-options=-fdump-ipa-simdclone-details @} */
+/* @{ dg-final @{ only_for_offload_target amdgcn-amdhsa scan-offload-ipa-dump @var{regex_amdgcn} simdclone @} @} */
+@end smallexample
+
+This test case is active if GCN offload compilation is enabled (but
+potentially also additional offload targets).
+The @code{simdclone} IPA dump file is (potentially) produced for all
+offload targets, but only the GCN offload one is scanned.
+
+If a test case doesn't have a @samp{@{ target @var{selector} @}}, and
+you need to scan, for example, for different @var{regex}es for each of
+host and potentially several offload targets, use a pattern like this:
+
+@smallexample
+/* @{ dg-final @{ scan-tree-dump @var{regex_host} optimized @} @}
+ @{ dg-final @{ only_for_offload_target amdgcn-amdhsa scan-offload-tree-dump @var{regex_amdgcn} optimized @{ target offload_target_amdgcn @} @} @}
+ @{ dg-final @{ only_for_offload_target nvptx-none scan-offload-tree-dump @var{regex_nvptx} optimized @{ target offload_target_nvptx @} @} @} */
+@end smallexample
+
+Here, unconditionally @var{regex_host} is scanned for in the host dump
+file.
+If GCN offloading compilation is actually enabled, @var{regex_amdgcn}
+is scanned for in the GCN offload compilation dump file.
+If nvptx offloading compilation is actually enabled, @var{regex_nvptx}
+is scanned for in the nvptx offload compilation dump file.
+
@subsubsection Check for output files
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index c0f949b..89a1735 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -3450,6 +3450,25 @@ in DWARF 2 debug information. The default is zero. A different value
may reduce the size of debug information on some ports.
@end defmac
+@defmac TARGET_STRUB_USE_DYNAMIC_ARRAY
+If defined to nonzero, @code{__strub_leave} will allocate a dynamic
+array covering the stack range that needs scrubbing before clearing it.
+Allocating the array tends to make scrubbing slower, but it enables the
+scrubbing to be safely implemented with a @code{memset} call, which
+could make up for the difference.
+@end defmac
+
+@defmac TARGET_STRUB_MAY_USE_MEMSET
+If defined to nonzero, enable @code{__strub_leave} to be optimized so as
+to call @code{memset} for stack scrubbing. This is only enabled by
+default if @code{TARGET_STRUB_USE_DYNAMIC_ARRAY} is enabled; it's not
+advisable to enable it otherwise, since @code{memset} would then likely
+overwrite its own stack frame, but it might work if the target ABI
+enables @code{memset} to not use the stack at all, not even for
+arguments or its return address, and its implementation is trivial
+enough that it doesn't use a stack frame.
+@end defmac
+
@node Exception Handling
@subsection Exception Handling Support
@cindex exception handling
@@ -3507,6 +3526,18 @@ If you want to support call frame exception handling, you must
define either this macro or the @code{eh_return} instruction pattern.
@end defmac
+@defmac EH_RETURN_TAKEN_RTX
+A C expression whose value is RTL representing a location in which
+to store if the EH return path was taken instead of a normal return.
+This macro allows conditionally executing different code in the
+epilogue for the EH and normal return cases.
+
+When this macro is defined, the macros @code{EH_RETURN_STACKADJ_RTX}
+and @code{EH_RETURN_HANDLER_RTX} are only meaningful in the epilogue
+when 1 is stored to the specified location. The value 0 means normal
+return.
+@end defmac
+
@defmac RETURN_ADDR_OFFSET
If defined, an integer-valued C expression for which rtl will be generated
to add it to the exception handler address before it is searched in the
@@ -5491,26 +5522,59 @@ except the last are treated as named.
You need not define this hook if it always returns @code{false}.
@end deftypefn
-@deftypefn {Target Hook} void TARGET_CALL_ARGS (rtx, @var{tree})
+@deftypefn {Target Hook} void TARGET_START_CALL_ARGS (cumulative_args_t @var{complete_args})
+This target hook is invoked while generating RTL for a function call,
+after the argument values have been computed, and after stack arguments
+have been initialized, but before register arguments have been moved into
+their ABI-defined hard register locations. It precedes calls to the related
+hooks @code{TARGET_CALL_ARGS} and @code{TARGET_END_CALL_ARGS}.
+The significance of this position in the call expansion is that:
+
+@itemize @bullet
+@item
+No argument registers are live.
+@item
+Although a call sequence can in general involve subcalls (such as using
+@code{memcpy} to copy large arguments), no such subcall will occur between
+the call to this hook and the generation of the main call instruction.
+@end itemize
+
+The single argument @var{complete_args} is the state of the target
+function's cumulative argument information after the final call to
+@code{TARGET_FUNCTION_ARG}.
+
+The hook can be used for things like switching processor mode, in cases
+where different calls need different processor modes. Most ports do not
+need to implement anything for this hook.
+@end deftypefn
+
+@deftypefn {Target Hook} void TARGET_CALL_ARGS (cumulative_args_t @var{complete_args}, rtx @var{loc}, tree @var{type})
While generating RTL for a function call, this target hook is invoked once
for each argument passed to the function, either a register returned by
@code{TARGET_FUNCTION_ARG} or a memory location. It is called just
-before the point where argument registers are stored. The type of the
-function to be called is also passed as the second argument; it is
-@code{NULL_TREE} for libcalls. The @code{TARGET_END_CALL_ARGS} hook is
-invoked just after the code to copy the return reg has been emitted.
-This functionality can be used to perform special setup of call argument
-registers if a target needs it.
+before the point where argument registers are stored.
+
+@var{complete_args} is the state of the target function's cumulative
+argument information after the final call to @code{TARGET_FUNCTION_ARG}.
+@var{loc} is the location of the argument. @var{type} is the type of
+the function being called, or @code{NULL_TREE} for libcalls.
+
For functions without arguments, the hook is called once with @code{pc_rtx}
passed instead of an argument register.
-Most ports do not need to implement anything for this hook.
+
+This functionality can be used to perform special setup of call argument
+registers, if a target needs it. Most ports do not need to implement
+anything for this hook.
@end deftypefn
-@deftypefn {Target Hook} void TARGET_END_CALL_ARGS (void)
+@deftypefn {Target Hook} void TARGET_END_CALL_ARGS (cumulative_args_t @var{complete_args})
This target hook is invoked while generating RTL for a function call,
just after the point where the return reg is copied into a pseudo. It
signals that all the call argument and return registers for the just
-emitted call are now no longer in use.
+emitted call are now no longer in use. @var{complete_args} is the
+state of the target function's cumulative argument information after
+the final call to @code{TARGET_FUNCTION_ARG}.
+
Most ports do not need to implement anything for this hook.
@end deftypefn
@@ -10534,12 +10598,33 @@ Target-specific attributes may be defined for functions, data and types.
These are described using the following target hooks; they also need to
be documented in @file{extend.texi}.
-@deftypevr {Target Hook} {const struct attribute_spec *} TARGET_ATTRIBUTE_TABLE
-If defined, this target hook points to an array of @samp{struct
-attribute_spec} (defined in @file{tree-core.h}) specifying the machine
-specific attributes for this target and some of the restrictions on the
-entities to which these attributes are applied and the arguments they
-take.
+@deftypevr {Target Hook} {array_slice<const struct scoped_attribute_specs *const>} TARGET_ATTRIBUTE_TABLE
+If defined, this target hook provides an array of
+@samp{scoped_attribute_spec}s (defined in @file{attribs.h}) that specify the
+machine-specific attributes for this target. The information includes some
+of the restrictions on the entities to which these attributes are applied
+and the arguments that the attributes take.
+
+In C and C++, these attributes are associated with two syntaxes:
+the traditional GNU @code{__attribute__} syntax and the standard
+@samp{[[]]} syntax. Attributes that support the GNU syntax must be
+placed in the @code{gnu} namespace. Such attributes can then also be
+written @samp{[[gnu::@dots{}]]}. Attributes that use only the standard
+syntax should be placed in whichever namespace the attribute specification
+requires. For example, a target might choose to support vendor-specific
+@samp{[[]]} attributes that the vendor places in their own namespace.
+
+Targets that only define attributes in the @code{gnu} namespace
+can uase the following shorthand to define the table:
+
+@smallexample
+TARGET_GNU_ATTRIBUTES (@var{cpu_attribute_table}, @{
+ @{ "@var{attribute1}", @dots{} @},
+ @{ "@var{attribute2}", @dots{} @},
+ @dots{},
+ @{ "@var{attributen}", @dots{} @},
+@});
+@end smallexample
@end deftypevr
@deftypefn {Target Hook} bool TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P (const_tree @var{name})
@@ -11755,10 +11840,11 @@ from shared libraries (DLLs).
You need not define this macro if it would always evaluate to zero.
@end defmac
-@deftypefn {Target Hook} {rtx_insn *} TARGET_MD_ASM_ADJUST (vec<rtx>& @var{outputs}, vec<rtx>& @var{inputs}, vec<machine_mode>& @var{input_modes}, vec<const char *>& @var{constraints}, vec<rtx>& @var{clobbers}, HARD_REG_SET& @var{clobbered_regs}, location_t @var{loc})
+@deftypefn {Target Hook} {rtx_insn *} TARGET_MD_ASM_ADJUST (vec<rtx>& @var{outputs}, vec<rtx>& @var{inputs}, vec<machine_mode>& @var{input_modes}, vec<const char *>& @var{constraints}, vec<rtx>& @var{usess}, vec<rtx>& @var{clobbers}, HARD_REG_SET& @var{clobbered_regs}, location_t @var{loc})
This target hook may add @dfn{clobbers} to @var{clobbers} and
@var{clobbered_regs} for any hard regs the port wishes to automatically
-clobber for an asm. The @var{outputs} and @var{inputs} may be inspected
+clobber for an asm. It can also add hard registers that are used by the
+asm to @var{uses}. The @var{outputs} and @var{inputs} may be inspected
to avoid clobbering a register that is already used by the asm. @var{loc}
is the source location of the asm.
@@ -11846,6 +11932,33 @@ of the if-block in the @code{struct ce_if_block} structure that is pointed
to by @var{ce_info}.
@end defmac
+@deftypefn {Target Hook} bool TARGET_USE_LATE_PROLOGUE_EPILOGUE ()
+Return true if the current function's prologue and epilogue should
+be emitted late in the pass pipeline, instead of at the usual point.
+
+Normally, the prologue and epilogue sequences are introduced soon after
+register allocation is complete. The advantage of this approach is that
+it allows the prologue and epilogue instructions to be optimized and
+scheduled with other code in the function. However, some targets
+require the prologue and epilogue to be the first and last sequences
+executed by the function, with no variation allowed. This hook should
+return true on such targets.
+
+The default implementation returns false, which is correct for most
+targets. The hook should only return true if there is a specific
+target limitation that cannot be described in RTL. For example,
+the hook might return true if the prologue and epilogue need to switch
+between instruction sets.
+@end deftypefn
+
+@deftypefn {Target Hook} void TARGET_EMIT_EPILOGUE_FOR_SIBCALL (rtx_call_insn *@var{call})
+If defined, this hook emits an epilogue sequence for sibling (tail)
+call instruction @var{call}. Another way of providing epilogues
+for sibling calls is to define the @code{sibcall_epilogue} instruction
+pattern; the main advantage of this hook over the pattern is that it
+has access to the call instruction.
+@end deftypefn
+
@deftypefn {Target Hook} void TARGET_MACHINE_DEPENDENT_REORG (void)
If non-null, this hook performs a target-specific pass over the
instruction stream. The compiler will run it at all optimization levels,
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index ef04c89..ebc1d3d 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -2686,6 +2686,25 @@ in DWARF 2 debug information. The default is zero. A different value
may reduce the size of debug information on some ports.
@end defmac
+@defmac TARGET_STRUB_USE_DYNAMIC_ARRAY
+If defined to nonzero, @code{__strub_leave} will allocate a dynamic
+array covering the stack range that needs scrubbing before clearing it.
+Allocating the array tends to make scrubbing slower, but it enables the
+scrubbing to be safely implemented with a @code{memset} call, which
+could make up for the difference.
+@end defmac
+
+@defmac TARGET_STRUB_MAY_USE_MEMSET
+If defined to nonzero, enable @code{__strub_leave} to be optimized so as
+to call @code{memset} for stack scrubbing. This is only enabled by
+default if @code{TARGET_STRUB_USE_DYNAMIC_ARRAY} is enabled; it's not
+advisable to enable it otherwise, since @code{memset} would then likely
+overwrite its own stack frame, but it might work if the target ABI
+enables @code{memset} to not use the stack at all, not even for
+arguments or its return address, and its implementation is trivial
+enough that it doesn't use a stack frame.
+@end defmac
+
@node Exception Handling
@subsection Exception Handling Support
@cindex exception handling
@@ -2743,6 +2762,18 @@ If you want to support call frame exception handling, you must
define either this macro or the @code{eh_return} instruction pattern.
@end defmac
+@defmac EH_RETURN_TAKEN_RTX
+A C expression whose value is RTL representing a location in which
+to store if the EH return path was taken instead of a normal return.
+This macro allows conditionally executing different code in the
+epilogue for the EH and normal return cases.
+
+When this macro is defined, the macros @code{EH_RETURN_STACKADJ_RTX}
+and @code{EH_RETURN_HANDLER_RTX} are only meaningful in the epilogue
+when 1 is stored to the specified location. The value 0 means normal
+return.
+@end defmac
+
@defmac RETURN_ADDR_OFFSET
If defined, an integer-valued C expression for which rtl will be generated
to add it to the exception handler address before it is searched in the
@@ -3825,6 +3856,8 @@ These machine description macros help implement varargs:
@hook TARGET_STRICT_ARGUMENT_NAMING
+@hook TARGET_START_CALL_ARGS
+
@hook TARGET_CALL_ARGS
@hook TARGET_END_CALL_ARGS
@@ -7772,6 +7805,10 @@ of the if-block in the @code{struct ce_if_block} structure that is pointed
to by @var{ce_info}.
@end defmac
+@hook TARGET_USE_LATE_PROLOGUE_EPILOGUE
+
+@hook TARGET_EMIT_EPILOGUE_FOR_SIBCALL
+
@hook TARGET_MACHINE_DEPENDENT_REORG
@hook TARGET_INIT_BUILTINS
diff --git a/gcc/except.cc b/gcc/except.cc
index e728aa4..508f8bb 100644
--- a/gcc/except.cc
+++ b/gcc/except.cc
@@ -2281,6 +2281,10 @@ expand_eh_return (void)
emit_move_insn (EH_RETURN_STACKADJ_RTX, const0_rtx);
#endif
+#ifdef EH_RETURN_TAKEN_RTX
+ emit_move_insn (EH_RETURN_TAKEN_RTX, const0_rtx);
+#endif
+
around_label = gen_label_rtx ();
emit_jump (around_label);
@@ -2291,6 +2295,10 @@ expand_eh_return (void)
emit_move_insn (EH_RETURN_STACKADJ_RTX, crtl->eh.ehr_stackadj);
#endif
+#ifdef EH_RETURN_TAKEN_RTX
+ emit_move_insn (EH_RETURN_TAKEN_RTX, const1_rtx);
+#endif
+
if (targetm.have_eh_return ())
emit_insn (targetm.gen_eh_return (crtl->eh.ehr_handler));
else
@@ -2301,7 +2309,19 @@ expand_eh_return (void)
error ("%<__builtin_eh_return%> not supported on this target");
}
+#ifdef EH_RETURN_TAKEN_RTX
+ rtx_code_label *eh_done_label = gen_label_rtx ();
+ emit_jump (eh_done_label);
+#endif
+
emit_label (around_label);
+
+#ifdef EH_RETURN_TAKEN_RTX
+ for (rtx tmp : { EH_RETURN_STACKADJ_RTX, EH_RETURN_HANDLER_RTX })
+ if (tmp && REG_P (tmp))
+ emit_clobber (tmp);
+ emit_label (eh_done_label);
+#endif
}
/* Convert a ptr_mode address ADDR_TREE to a Pmode address controlled by
diff --git a/gcc/expr.cc b/gcc/expr.cc
index c432170..fea7190 100644
--- a/gcc/expr.cc
+++ b/gcc/expr.cc
@@ -80,7 +80,11 @@ static bool emit_block_move_via_pattern (rtx, rtx, rtx, unsigned, unsigned,
HOST_WIDE_INT, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT, bool);
-static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
+static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned, int);
+static void emit_block_move_via_sized_loop (rtx, rtx, rtx, unsigned, unsigned);
+static void emit_block_move_via_oriented_loop (rtx, rtx, rtx, unsigned, unsigned);
+static rtx emit_block_cmp_via_loop (rtx, rtx, rtx, tree, rtx, bool,
+ unsigned, unsigned);
static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
static rtx_insn *compress_float_constant (rtx, rtx);
static rtx get_subtarget (rtx);
@@ -1982,6 +1986,8 @@ compare_by_pieces (rtx arg0, rtx arg1, unsigned HOST_WIDE_INT len,
MIN_SIZE is the minimal size of block to move
MAX_SIZE is the maximal size of block to move, if it cannot be represented
in unsigned HOST_WIDE_INT, than it is mask of all ones.
+ CTZ_SIZE is the trailing-zeros count of SIZE; even a nonconstant SIZE is
+ known to be a multiple of 1<<CTZ_SIZE.
Return the address of the new block, if memcpy is called and returns it,
0 otherwise. */
@@ -1993,7 +1999,7 @@ emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
unsigned HOST_WIDE_INT max_size,
unsigned HOST_WIDE_INT probable_max_size,
bool bail_out_libcall, bool *is_move_done,
- bool might_overlap)
+ bool might_overlap, unsigned ctz_size)
{
int may_use_call;
rtx retval = 0;
@@ -2079,6 +2085,14 @@ emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
}
}
+ bool dynamic_direction = false;
+ if (!pattern_ok && !pieces_ok && may_use_call
+ && (flag_inline_stringops & (might_overlap ? ILSOP_MEMMOVE : ILSOP_MEMCPY)))
+ {
+ may_use_call = 0;
+ dynamic_direction = might_overlap;
+ }
+
if (pattern_ok)
;
else if (pieces_ok)
@@ -2100,10 +2114,12 @@ emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
retval = emit_block_copy_via_libcall (x, y, size,
method == BLOCK_OP_TAILCALL);
}
+ else if (dynamic_direction)
+ emit_block_move_via_oriented_loop (x, y, size, align, ctz_size);
else if (might_overlap)
*is_move_done = false;
else
- emit_block_move_via_loop (x, y, size, align);
+ emit_block_move_via_sized_loop (x, y, size, align, ctz_size);
if (method == BLOCK_OP_CALL_PARM)
OK_DEFER_POP;
@@ -2112,7 +2128,8 @@ emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
}
rtx
-emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method)
+emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method,
+ unsigned int ctz_size)
{
unsigned HOST_WIDE_INT max, min = 0;
if (GET_CODE (size) == CONST_INT)
@@ -2120,7 +2137,8 @@ emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method)
else
max = GET_MODE_MASK (GET_MODE (size));
return emit_block_move_hints (x, y, size, method, 0, -1,
- min, max, max);
+ min, max, max,
+ false, NULL, false, ctz_size);
}
/* A subroutine of emit_block_move. Returns true if calling the
@@ -2282,13 +2300,117 @@ emit_block_move_via_pattern (rtx x, rtx y, rtx size, unsigned int align,
return false;
}
+/* Like emit_block_move_via_loop, but choose a suitable INCR based on
+ ALIGN and CTZ_SIZE. */
+
+static void
+emit_block_move_via_sized_loop (rtx x, rtx y, rtx size,
+ unsigned int align,
+ unsigned int ctz_size)
+{
+ int incr = align / BITS_PER_UNIT;
+
+ if (CONST_INT_P (size))
+ ctz_size = MAX (ctz_size, (unsigned) wi::ctz (UINTVAL (size)));
+
+ if (HOST_WIDE_INT_1U << ctz_size < (unsigned HOST_WIDE_INT) incr)
+ incr = HOST_WIDE_INT_1U << ctz_size;
+
+ while (incr > 1 && !can_move_by_pieces (incr, align))
+ incr >>= 1;
+
+ gcc_checking_assert (incr);
+
+ return emit_block_move_via_loop (x, y, size, align, incr);
+}
+
+/* Like emit_block_move_via_sized_loop, but besides choosing INCR so
+ as to ensure safe moves even in case of overlap, output dynamic
+ tests to choose between two loops, one moving downwards, another
+ moving upwards. */
+
+static void
+emit_block_move_via_oriented_loop (rtx x, rtx y, rtx size,
+ unsigned int align,
+ unsigned int ctz_size)
+{
+ int incr = align / BITS_PER_UNIT;
+
+ if (CONST_INT_P (size))
+ ctz_size = MAX (ctz_size, (unsigned) wi::ctz (UINTVAL (size)));
+
+ if (HOST_WIDE_INT_1U << ctz_size < (unsigned HOST_WIDE_INT) incr)
+ incr = HOST_WIDE_INT_1U << ctz_size;
+
+ while (incr > 1 && !int_mode_for_size (incr, 0).exists ())
+ incr >>= 1;
+
+ gcc_checking_assert (incr);
+
+ rtx_code_label *upw_label, *end_label;
+ upw_label = gen_label_rtx ();
+ end_label = gen_label_rtx ();
+
+ rtx x_addr = force_operand (XEXP (x, 0), NULL_RTX);
+ rtx y_addr = force_operand (XEXP (y, 0), NULL_RTX);
+ do_pending_stack_adjust ();
+
+ machine_mode mode = GET_MODE (x_addr);
+ if (mode != GET_MODE (y_addr))
+ {
+ scalar_int_mode xmode
+ = smallest_int_mode_for_size (GET_MODE_BITSIZE (mode));
+ scalar_int_mode ymode
+ = smallest_int_mode_for_size (GET_MODE_BITSIZE
+ (GET_MODE (y_addr)));
+ if (GET_MODE_BITSIZE (xmode) < GET_MODE_BITSIZE (ymode))
+ mode = ymode;
+ else
+ mode = xmode;
+
+#ifndef POINTERS_EXTEND_UNSIGNED
+ const int POINTERS_EXTEND_UNSIGNED = 1;
+#endif
+ x_addr = convert_modes (mode, GET_MODE (x_addr), x_addr,
+ POINTERS_EXTEND_UNSIGNED);
+ y_addr = convert_modes (mode, GET_MODE (y_addr), y_addr,
+ POINTERS_EXTEND_UNSIGNED);
+ }
+
+ /* Test for overlap: if (x >= y || x + size <= y) goto upw_label. */
+ emit_cmp_and_jump_insns (x_addr, y_addr, GEU, NULL_RTX, mode,
+ true, upw_label,
+ profile_probability::guessed_always ()
+ .apply_scale (5, 10));
+ rtx tmp = convert_modes (GET_MODE (x_addr), GET_MODE (size), size, true);
+ tmp = simplify_gen_binary (PLUS, GET_MODE (x_addr), x_addr, tmp);
+
+ emit_cmp_and_jump_insns (tmp, y_addr, LEU, NULL_RTX, mode,
+ true, upw_label,
+ profile_probability::guessed_always ()
+ .apply_scale (8, 10));
+
+ emit_block_move_via_loop (x, y, size, align, -incr);
+
+ emit_jump (end_label);
+ emit_label (upw_label);
+
+ emit_block_move_via_loop (x, y, size, align, incr);
+
+ emit_label (end_label);
+}
+
/* A subroutine of emit_block_move. Copy the data via an explicit
- loop. This is used only when libcalls are forbidden. */
-/* ??? It'd be nice to copy in hunks larger than QImode. */
+ loop. This is used only when libcalls are forbidden, or when
+ inlining is required. INCR is the block size to be copied in each
+ loop iteration. If it is negative, the absolute value is used, and
+ the block is copied backwards. INCR must be a power of two, an
+ exact divisor for SIZE and ALIGN, and imply a mode that can be
+ safely copied per iteration assuming no overlap. */
static void
emit_block_move_via_loop (rtx x, rtx y, rtx size,
- unsigned int align ATTRIBUTE_UNUSED)
+ unsigned int align, int incr)
{
rtx_code_label *cmp_label, *top_label;
rtx iter, x_addr, y_addr, tmp;
@@ -2304,7 +2426,38 @@ emit_block_move_via_loop (rtx x, rtx y, rtx size,
cmp_label = gen_label_rtx ();
iter = gen_reg_rtx (iter_mode);
- emit_move_insn (iter, const0_rtx);
+ bool downwards = incr < 0;
+ rtx iter_init;
+ rtx_code iter_cond;
+ rtx iter_limit;
+ rtx iter_incr;
+ machine_mode move_mode;
+ if (downwards)
+ {
+ incr = -incr;
+ iter_init = size;
+ iter_cond = GEU;
+ iter_limit = const0_rtx;
+ iter_incr = GEN_INT (incr);
+ }
+ else
+ {
+ iter_init = const0_rtx;
+ iter_cond = LTU;
+ iter_limit = size;
+ iter_incr = GEN_INT (incr);
+ }
+ emit_move_insn (iter, iter_init);
+
+ scalar_int_mode int_move_mode
+ = smallest_int_mode_for_size (incr * BITS_PER_UNIT);
+ if (GET_MODE_BITSIZE (int_move_mode) != incr * BITS_PER_UNIT)
+ {
+ move_mode = BLKmode;
+ gcc_checking_assert (can_move_by_pieces (incr, align));
+ }
+ else
+ move_mode = int_move_mode;
x_addr = force_operand (XEXP (x, 0), NULL_RTX);
y_addr = force_operand (XEXP (y, 0), NULL_RTX);
@@ -2320,19 +2473,32 @@ emit_block_move_via_loop (rtx x, rtx y, rtx size,
tmp = convert_modes (y_addr_mode, iter_mode, iter, true);
y_addr = simplify_gen_binary (PLUS, y_addr_mode, y_addr, tmp);
- x = change_address (x, QImode, x_addr);
- y = change_address (y, QImode, y_addr);
+ x = change_address (x, move_mode, x_addr);
+ y = change_address (y, move_mode, y_addr);
+
+ if (move_mode == BLKmode)
+ {
+ bool done;
+ emit_block_move_hints (x, y, iter_incr, BLOCK_OP_NO_LIBCALL,
+ align, incr, incr, incr, incr,
+ false, &done, false);
+ gcc_checking_assert (done);
+ }
+ else
+ emit_move_insn (x, y);
- emit_move_insn (x, y);
+ if (downwards)
+ emit_label (cmp_label);
- tmp = expand_simple_binop (iter_mode, PLUS, iter, const1_rtx, iter,
+ tmp = expand_simple_binop (iter_mode, PLUS, iter, iter_incr, iter,
true, OPTAB_LIB_WIDEN);
if (tmp != iter)
emit_move_insn (iter, tmp);
- emit_label (cmp_label);
+ if (!downwards)
+ emit_label (cmp_label);
- emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
+ emit_cmp_and_jump_insns (iter, iter_limit, iter_cond, NULL_RTX, iter_mode,
true, top_label,
profile_probability::guessed_always ()
.apply_scale (9, 10));
@@ -2432,7 +2598,8 @@ emit_block_cmp_via_cmpmem (rtx x, rtx y, rtx len, tree len_type, rtx target,
Both X and Y must be MEM rtx's. LEN is an rtx that says how long
they are. LEN_TYPE is the type of the expression that was used to
- calculate it.
+ calculate it, and CTZ_LEN is the known trailing-zeros count of LEN,
+ so LEN must be a multiple of 1<<CTZ_LEN even if it's not constant.
If EQUALITY_ONLY is true, it means we don't have to return the tri-state
value of a normal memcmp call, instead we can just compare for equality.
@@ -2448,7 +2615,7 @@ emit_block_cmp_via_cmpmem (rtx x, rtx y, rtx len, tree len_type, rtx target,
rtx
emit_block_cmp_hints (rtx x, rtx y, rtx len, tree len_type, rtx target,
bool equality_only, by_pieces_constfn y_cfn,
- void *y_cfndata)
+ void *y_cfndata, unsigned ctz_len)
{
rtx result = 0;
@@ -2470,8 +2637,203 @@ emit_block_cmp_hints (rtx x, rtx y, rtx len, tree len_type, rtx target,
else
result = emit_block_cmp_via_cmpmem (x, y, len, len_type, target, align);
+ if (!result && (flag_inline_stringops & ILSOP_MEMCMP))
+ result = emit_block_cmp_via_loop (x, y, len, len_type,
+ target, equality_only,
+ align, ctz_len);
+
return result;
}
+
+/* Like emit_block_cmp_hints, but with known alignment and no support
+ for constats. Always expand to a loop with iterations that compare
+ blocks of the largest compare-by-pieces size that divides both len
+ and align, and then, if !EQUALITY_ONLY, identify the word and then
+ the unit that first differs to return the result. */
+
+rtx
+emit_block_cmp_via_loop (rtx x, rtx y, rtx len, tree len_type, rtx target,
+ bool equality_only, unsigned align, unsigned ctz_len)
+{
+ unsigned incr = align / BITS_PER_UNIT;
+
+ if (CONST_INT_P (len))
+ ctz_len = MAX (ctz_len, (unsigned) wi::ctz (UINTVAL (len)));
+
+ if (HOST_WIDE_INT_1U << ctz_len < (unsigned HOST_WIDE_INT) incr)
+ incr = HOST_WIDE_INT_1U << ctz_len;
+
+ while (incr > 1
+ && !can_do_by_pieces (incr, align, COMPARE_BY_PIECES))
+ incr >>= 1;
+
+ rtx_code_label *cmp_label, *top_label, *ne_label, *res_label;
+ rtx iter, x_addr, y_addr, tmp;
+ machine_mode x_addr_mode = get_address_mode (x);
+ machine_mode y_addr_mode = get_address_mode (y);
+ machine_mode iter_mode;
+
+ iter_mode = GET_MODE (len);
+ if (iter_mode == VOIDmode)
+ iter_mode = word_mode;
+
+ rtx iter_init = const0_rtx;
+ rtx_code iter_cond = LTU;
+ rtx_code entry_cond = GEU;
+ rtx iter_limit = len;
+ rtx iter_incr = GEN_INT (incr);
+ machine_mode cmp_mode;
+
+ /* We can drop the loop back edge if we know there's exactly one
+ iteration. */
+ top_label = (!rtx_equal_p (len, iter_incr)
+ ? gen_label_rtx ()
+ : NULL);
+ /* We need not test before entering the loop if len is known
+ nonzero. ??? This could be even stricter, testing whether a
+ nonconstant LEN could possibly be zero. */
+ cmp_label = (!CONSTANT_P (len) || rtx_equal_p (len, iter_init)
+ ? gen_label_rtx ()
+ : NULL);
+ ne_label = gen_label_rtx ();
+ res_label = gen_label_rtx ();
+
+ iter = gen_reg_rtx (iter_mode);
+ emit_move_insn (iter, iter_init);
+
+ scalar_int_mode int_cmp_mode
+ = smallest_int_mode_for_size (incr * BITS_PER_UNIT);
+ if (GET_MODE_BITSIZE (int_cmp_mode) != incr * BITS_PER_UNIT
+ || !can_compare_p (NE, int_cmp_mode, ccp_jump))
+ {
+ cmp_mode = BLKmode;
+ gcc_checking_assert (incr != 1);
+ }
+ else
+ cmp_mode = int_cmp_mode;
+
+ /* Save the base addresses. */
+ x_addr = force_operand (XEXP (x, 0), NULL_RTX);
+ y_addr = force_operand (XEXP (y, 0), NULL_RTX);
+ do_pending_stack_adjust ();
+
+ if (cmp_label)
+ {
+ if (top_label)
+ emit_jump (cmp_label);
+ else
+ emit_cmp_and_jump_insns (iter, iter_limit, entry_cond,
+ NULL_RTX, iter_mode,
+ true, cmp_label,
+ profile_probability::guessed_always ()
+ .apply_scale (1, 10));
+ }
+ if (top_label)
+ emit_label (top_label);
+
+ /* Offset the base addresses by ITER. */
+ tmp = convert_modes (x_addr_mode, iter_mode, iter, true);
+ x_addr = simplify_gen_binary (PLUS, x_addr_mode, x_addr, tmp);
+
+ if (x_addr_mode != y_addr_mode)
+ tmp = convert_modes (y_addr_mode, iter_mode, iter, true);
+ y_addr = simplify_gen_binary (PLUS, y_addr_mode, y_addr, tmp);
+
+ x = change_address (x, cmp_mode, x_addr);
+ y = change_address (y, cmp_mode, y_addr);
+
+ /* Compare one block. */
+ rtx part_res;
+ if (cmp_mode == BLKmode)
+ part_res = compare_by_pieces (x, y, incr, target, align, 0, 0);
+ else
+ part_res = expand_binop (cmp_mode, sub_optab, x, y, NULL_RTX,
+ true, OPTAB_LIB_WIDEN);
+
+ /* Stop if we found a difference. */
+ emit_cmp_and_jump_insns (part_res, GEN_INT (0), NE, NULL_RTX,
+ GET_MODE (part_res), true, ne_label,
+ profile_probability::guessed_always ()
+ .apply_scale (1, 10));
+
+ /* Increment ITER. */
+ tmp = expand_simple_binop (iter_mode, PLUS, iter, iter_incr, iter,
+ true, OPTAB_LIB_WIDEN);
+ if (tmp != iter)
+ emit_move_insn (iter, tmp);
+
+ if (cmp_label)
+ emit_label (cmp_label);
+ /* Loop until we reach the limit. */
+
+ if (top_label)
+ emit_cmp_and_jump_insns (iter, iter_limit, iter_cond, NULL_RTX, iter_mode,
+ true, top_label,
+ profile_probability::guessed_always ()
+ .apply_scale (9, 10));
+
+ /* We got to the end without differences, so the result is zero. */
+ if (target == NULL_RTX
+ || !REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
+ target = gen_reg_rtx (TYPE_MODE (integer_type_node));
+
+ emit_move_insn (target, const0_rtx);
+ emit_jump (res_label);
+
+ emit_label (ne_label);
+
+ /* Return nonzero, or pinpoint the difference to return the expected
+ result for non-equality tests. */
+ if (equality_only)
+ emit_move_insn (target, const1_rtx);
+ else
+ {
+ if (incr > UNITS_PER_WORD)
+ /* ??? Re-compare the block found to be different one word at a
+ time. */
+ part_res = emit_block_cmp_via_loop (x, y, GEN_INT (incr), len_type,
+ target, equality_only,
+ BITS_PER_WORD, 0);
+ else if (incr > 1)
+ /* ??? Re-compare the block found to be different one byte at a
+ time. We could do better using part_res, and being careful
+ about endianness. */
+ part_res = emit_block_cmp_via_loop (x, y, GEN_INT (incr), len_type,
+ target, equality_only,
+ BITS_PER_UNIT, 0);
+ else if (known_gt (GET_MODE_BITSIZE (GET_MODE (target)),
+ GET_MODE_BITSIZE (cmp_mode)))
+ part_res = expand_binop (GET_MODE (target), sub_optab, x, y, target,
+ true, OPTAB_LIB_WIDEN);
+ else
+ {
+ /* In the odd chance target is QImode, we can't count on
+ widening subtract to capture the result of the unsigned
+ compares. */
+ rtx_code_label *ltu_label;
+ ltu_label = gen_label_rtx ();
+ emit_cmp_and_jump_insns (x, y, LTU, NULL_RTX,
+ cmp_mode, true, ltu_label,
+ profile_probability::guessed_always ()
+ .apply_scale (5, 10));
+
+ emit_move_insn (target, const1_rtx);
+ emit_jump (res_label);
+
+ emit_label (ltu_label);
+ emit_move_insn (target, constm1_rtx);
+ part_res = target;
+ }
+
+ if (target != part_res)
+ convert_move (target, part_res, false);
+ }
+
+ emit_label (res_label);
+
+ return target;
+}
+
/* Copy all or part of a value X into registers starting at REGNO.
The number of registers to be filled is NREGS. */
diff --git a/gcc/expr.h b/gcc/expr.h
index 2a17286..431616d 100644
--- a/gcc/expr.h
+++ b/gcc/expr.h
@@ -126,7 +126,8 @@ struct by_pieces_prev
fixed_size_mode mode;
};
-extern rtx emit_block_move (rtx, rtx, rtx, enum block_op_methods);
+extern rtx emit_block_move (rtx, rtx, rtx, enum block_op_methods,
+ unsigned ctz_size = 0);
extern rtx emit_block_move_hints (rtx, rtx, rtx, enum block_op_methods,
unsigned int, HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
@@ -134,9 +135,11 @@ extern rtx emit_block_move_hints (rtx, rtx, rtx, enum block_op_methods,
unsigned HOST_WIDE_INT,
bool bail_out_libcall = false,
bool *is_move_done = NULL,
- bool might_overlap = false);
+ bool might_overlap = false,
+ unsigned ctz_size = 0);
extern rtx emit_block_cmp_hints (rtx, rtx, rtx, tree, rtx, bool,
- by_pieces_constfn, void *);
+ by_pieces_constfn, void *,
+ unsigned ctz_len = 0);
extern bool emit_storent_insn (rtx to, rtx from);
/* Copy all or part of a value X into registers starting at REGNO.
diff --git a/gcc/flag-types.h b/gcc/flag-types.h
index c1852cd..650ae08 100644
--- a/gcc/flag-types.h
+++ b/gcc/flag-types.h
@@ -447,6 +447,17 @@ enum gfc_convert
};
+/* Inline String Operations functions. */
+enum ilsop_fn
+{
+ ILSOP_NONE = 0,
+ ILSOP_MEMSET = 1 << 0,
+ ILSOP_MEMCPY = 1 << 1,
+ ILSOP_MEMMOVE = 1 << 2,
+ ILSOP_MEMCMP = 1 << 3,
+ ILSOP_ALL = -1
+};
+
/* Control-Flow Protection values. */
enum cf_protection_level
{
diff --git a/gcc/fold-const.cc b/gcc/fold-const.cc
index 332bc8a..2692b98 100644
--- a/gcc/fold-const.cc
+++ b/gcc/fold-const.cc
@@ -10803,27 +10803,38 @@ fold_vec_perm_cst (tree type, tree arg0, tree arg1, const vec_perm_indices &sel,
unsigned res_npatterns, res_nelts_per_pattern;
unsigned HOST_WIDE_INT res_nelts;
- /* (1) If SEL is a suitable mask as determined by
- valid_mask_for_fold_vec_perm_cst_p, then:
- res_npatterns = max of npatterns between ARG0, ARG1, and SEL
- res_nelts_per_pattern = max of nelts_per_pattern between
- ARG0, ARG1 and SEL.
- (2) If SEL is not a suitable mask, and TYPE is VLS then:
- res_npatterns = nelts in result vector.
- res_nelts_per_pattern = 1.
- This exception is made so that VLS ARG0, ARG1 and SEL work as before. */
- if (valid_mask_for_fold_vec_perm_cst_p (arg0, arg1, sel, reason))
- {
- res_npatterns
- = std::max (VECTOR_CST_NPATTERNS (arg0),
- std::max (VECTOR_CST_NPATTERNS (arg1),
- sel.encoding ().npatterns ()));
+ /* First try to implement the fold in a VLA-friendly way.
+
+ (1) If the selector is simply a duplication of N elements, the
+ result is likewise a duplication of N elements.
+
+ (2) If the selector is N elements followed by a duplication
+ of N elements, the result is too.
+
+ (3) If the selector is N elements followed by an interleaving
+ of N linear series, the situation is more complex.
+
+ valid_mask_for_fold_vec_perm_cst_p detects whether we
+ can handle this case. If we can, then each of the N linear
+ series either (a) selects the same element each time or
+ (b) selects a linear series from one of the input patterns.
- res_nelts_per_pattern
- = std::max (VECTOR_CST_NELTS_PER_PATTERN (arg0),
- std::max (VECTOR_CST_NELTS_PER_PATTERN (arg1),
- sel.encoding ().nelts_per_pattern ()));
+ If (b) holds for one of the linear series, the result
+ will contain a linear series, and so the result will have
+ the same shape as the selector. If (a) holds for all of
+ the linear series, the result will be the same as (2) above.
+ (b) can only hold if one of the input patterns has a
+ stepped encoding. */
+
+ if (valid_mask_for_fold_vec_perm_cst_p (arg0, arg1, sel, reason))
+ {
+ res_npatterns = sel.encoding ().npatterns ();
+ res_nelts_per_pattern = sel.encoding ().nelts_per_pattern ();
+ if (res_nelts_per_pattern == 3
+ && VECTOR_CST_NELTS_PER_PATTERN (arg0) < 3
+ && VECTOR_CST_NELTS_PER_PATTERN (arg1) < 3)
+ res_nelts_per_pattern = 2;
res_nelts = res_npatterns * res_nelts_per_pattern;
}
else if (TYPE_VECTOR_SUBPARTS (type).is_constant (&res_nelts))
@@ -14552,7 +14563,7 @@ multiple_of_p (tree type, const_tree top, const_tree bottom, bool nowrap)
&& TREE_CODE (op2) == INTEGER_CST
&& integer_pow2p (bottom)
&& wi::multiple_of_p (wi::to_widest (op2),
- wi::to_widest (bottom), UNSIGNED))
+ wi::to_widest (bottom), SIGNED))
return true;
op1 = gimple_assign_rhs1 (stmt);
@@ -17622,6 +17633,29 @@ test_nunits_min_2 (machine_mode vmode)
tree expected_res[] = { ARG0(0), ARG1(0), ARG1(1) };
validate_res (1, 3, res, expected_res);
}
+
+ /* Case 8: Same as aarch64/sve/slp_3.c:
+ arg0, arg1 are dup vectors.
+ sel = { 0, len, 1, len+1, 2, len+2, ... } // (2, 3)
+ So res = { arg0[0], arg1[0], ... } // (2, 1)
+
+ In this case, since the input vectors are dup, only the first two
+ elements per pattern in sel are considered significant. */
+ {
+ tree arg0 = build_vec_cst_rand (vmode, 1, 1);
+ tree arg1 = build_vec_cst_rand (vmode, 1, 1);
+ poly_uint64 len = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
+
+ vec_perm_builder builder (len, 2, 3);
+ poly_uint64 mask_elems[] = { 0, len, 1, len + 1, 2, len + 2 };
+ builder_push_elems (builder, mask_elems);
+
+ vec_perm_indices sel (builder, 2, len);
+ tree res = fold_vec_perm_cst (TREE_TYPE (arg0), arg0, arg1, sel);
+
+ tree expected_res[] = { ARG0(0), ARG1(0) };
+ validate_res (2, 1, res, expected_res);
+ }
}
}
@@ -17790,6 +17824,44 @@ test_nunits_min_4 (machine_mode vmode)
ASSERT_TRUE (res == NULL_TREE);
ASSERT_TRUE (!strcmp (reason, "step is not multiple of npatterns"));
}
+
+ /* Case 8: PR111754: When input vector is not a stepped sequence,
+ check that the result is not a stepped sequence either, even
+ if sel has a stepped sequence. */
+ {
+ tree arg0 = build_vec_cst_rand (vmode, 1, 2);
+ poly_uint64 len = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
+
+ vec_perm_builder builder (len, 1, 3);
+ poly_uint64 mask_elems[] = { 0, 1, 2 };
+ builder_push_elems (builder, mask_elems);
+
+ vec_perm_indices sel (builder, 1, len);
+ tree res = fold_vec_perm_cst (TREE_TYPE (arg0), arg0, arg0, sel);
+
+ tree expected_res[] = { ARG0(0), ARG0(1) };
+ validate_res (sel.encoding ().npatterns (), 2, res, expected_res);
+ }
+
+ /* Case 9: If sel doesn't contain a stepped sequence,
+ check that the result has same encoding as sel, irrespective
+ of shape of input vectors. */
+ {
+ tree arg0 = build_vec_cst_rand (vmode, 1, 3, 1);
+ tree arg1 = build_vec_cst_rand (vmode, 1, 3, 1);
+ poly_uint64 len = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
+
+ vec_perm_builder builder (len, 1, 2);
+ poly_uint64 mask_elems[] = { 0, len };
+ builder_push_elems (builder, mask_elems);
+
+ vec_perm_indices sel (builder, 2, len);
+ tree res = fold_vec_perm_cst (TREE_TYPE (arg0), arg0, arg1, sel);
+
+ tree expected_res[] = { ARG0(0), ARG1(0) };
+ validate_res (sel.encoding ().npatterns (),
+ sel.encoding ().nelts_per_pattern (), res, expected_res);
+ }
}
}
diff --git a/gcc/fold-mem-offsets.cc b/gcc/fold-mem-offsets.cc
index 6263fc7..7ba5600 100644
--- a/gcc/fold-mem-offsets.cc
+++ b/gcc/fold-mem-offsets.cc
@@ -154,7 +154,7 @@ static int stats_fold_count;
The definition is desired for REG used in INSN.
Return the definition insn or NULL if there's no definition with
the desired criteria. */
-static rtx_insn*
+static rtx_insn *
get_single_def_in_bb (rtx_insn *insn, rtx reg)
{
df_ref use;
@@ -205,11 +205,10 @@ get_single_def_in_bb (rtx_insn *insn, rtx reg)
/* Get all uses of REG which is set in INSN. Return the use list or NULL if a
use is missing / irregular. If SUCCESS is not NULL then set it to false if
there are missing / irregular uses and true otherwise. */
-static struct df_link*
+static df_link *
get_uses (rtx_insn *insn, rtx reg, bool *success)
{
df_ref def;
- struct df_link *ref_chain, *ref_link;
if (success)
*success = false;
@@ -221,18 +220,30 @@ get_uses (rtx_insn *insn, rtx reg, bool *success)
if (!def)
return NULL;
- ref_chain = DF_REF_CHAIN (def);
+ df_link *ref_chain = DF_REF_CHAIN (def);
+ int insn_luid = DF_INSN_LUID (insn);
+ basic_block insn_bb = BLOCK_FOR_INSN (insn);
- for (ref_link = ref_chain; ref_link; ref_link = ref_link->next)
+ for (df_link *ref_link = ref_chain; ref_link; ref_link = ref_link->next)
{
/* Problem getting a use for this instruction. */
if (ref_link->ref == NULL)
return NULL;
if (DF_REF_CLASS (ref_link->ref) != DF_REF_REGULAR)
return NULL;
+
+ rtx_insn *use = DF_REF_INSN (ref_link->ref);
+ if (DEBUG_INSN_P (use))
+ continue;
+
/* We do not handle REG_EQUIV/REG_EQ notes for now. */
if (DF_REF_FLAGS (ref_link->ref) & DF_REF_IN_NOTE)
return NULL;
+ if (BLOCK_FOR_INSN (use) != insn_bb)
+ return NULL;
+ /* Punt if use appears before def in the basic block. See PR111601. */
+ if (DF_INSN_LUID (use) < insn_luid)
+ return NULL;
}
if (success)
@@ -255,8 +266,7 @@ fold_offsets (rtx_insn *insn, rtx reg, bool analyze, bitmap foldable_insns);
If DO_RECURSION is true and ANALYZE is false then offset that would result
from folding is computed and is returned through the pointer OFFSET_OUT.
- The instructions that can be folded are recorded in FOLDABLE_INSNS.
-*/
+ The instructions that can be folded are recorded in FOLDABLE_INSNS. */
static bool
fold_offsets_1 (rtx_insn *insn, bool analyze, bool do_recursion,
HOST_WIDE_INT *offset_out, bitmap foldable_insns)
@@ -846,8 +856,8 @@ pass_fold_mem_offsets::execute (function *fn)
FOR_ALL_BB_FN (bb, fn)
{
/* There is a conflict between this pass and RISCV's shorten-memrefs
- pass. For now disable folding if optimizing for size because
- otherwise this cancels the effects of shorten-memrefs. */
+ pass. For now disable folding if optimizing for size because
+ otherwise this cancels the effects of shorten-memrefs. */
if (optimize_bb_for_size_p (bb))
continue;
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 21c5f2a..be4fe9a 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,61 @@
+2023-12-05 Harald Anlauf <anlauf@gmx.de>
+ Tobias Burnus <tobias@codesourcery.com>
+
+ PR fortran/100988
+ * gfortran.h (IS_PROC_POINTER): New macro.
+ * trans-types.cc (gfc_sym_type): Use macro in determination if the
+ restrict qualifier can be used for a dummy variable. Fix logic to
+ allow the restrict qualifier also for optional arguments, and to
+ not apply it to pointer or proc_pointer arguments.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * f95-lang.cc (gfc_gnu_attribute_table): Add extra braces to work
+ around PR 16333 in older compilers.
+
+2023-12-02 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/93762
+ PR fortran/100651
+ * trans-array.cc (gfc_trans_deferred_array): Add presence check
+ for optional deferred-length character dummy arguments.
+ * trans-expr.cc (gfc_conv_missing_dummy): The character length for
+ deferred-length dummy arguments is passed by reference, so that
+ its value can be returned. Adjust handling for optional dummies.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * f95-lang.cc: Include attribs.h.
+ (gfc_attribute_table): Change to an array of scoped_attribute_specs
+ pointers, using...
+ (gfc_gnu_attributes, gfc_gnu_attribute_table): ...these new globals.
+
+2023-12-01 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/112772
+ * trans-expr.cc (gfc_conv_class_to_class): Make copy-out conditional
+ on the presence of an OPTIONAL CLASS argument passed to an OPTIONAL
+ CLASS dummy.
+
+2023-11-30 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/112764
+ * primary.cc (gfc_variable_attr): Set TARGET attribute of associating
+ entity dependent on TARGET or POINTER attribute of selector.
+
+2023-11-28 Andrew Jenner <andrew@codesourcery.com>
+ Tobias Burnus <tobias@codesourcery.com>
+
+ PR fortran/110415
+ * trans-expr.cc (trans_class_vptr_len_assignment): Add
+ from_vptrp parameter. Populate it. Don't check for DECL_P
+ when deciding whether to create temporary.
+ (trans_class_pointer_fcn, gfc_trans_pointer_assignment): Add
+ NULL argument to trans_class_vptr_len_assignment calls.
+ (trans_class_assignment): Get rhs_vptr from
+ trans_class_vptr_len_assignment and use it for determining size
+ for allocation/reallocation. Use return value from realloc.
+
2023-11-26 Harald Anlauf <anlauf@gmx.de>
PR fortran/111880
diff --git a/gcc/fortran/f95-lang.cc b/gcc/fortran/f95-lang.cc
index 350e6e3..32fddcd 100644
--- a/gcc/fortran/f95-lang.cc
+++ b/gcc/fortran/f95-lang.cc
@@ -39,6 +39,7 @@ along with GCC; see the file COPYING3. If not see
#include "cpp.h"
#include "trans-types.h"
#include "trans-const.h"
+#include "attribs.h"
/* Language-dependent contents of an identifier. */
@@ -87,7 +88,7 @@ gfc_handle_omp_declare_target_attribute (tree *, tree, tree, int, bool *)
}
/* Table of valid Fortran attributes. */
-static const struct attribute_spec gfc_attribute_table[] =
+static const attribute_spec gfc_gnu_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -97,7 +98,16 @@ static const struct attribute_spec gfc_attribute_table[] =
gfc_handle_omp_declare_target_attribute, NULL },
{ "oacc function", 0, -1, true, false, false, false,
gfc_handle_omp_declare_target_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+};
+
+static const scoped_attribute_specs gfc_gnu_attribute_table =
+{
+ "gnu", { gfc_gnu_attributes }
+};
+
+static const scoped_attribute_specs *const gfc_attribute_table[] =
+{
+ &gfc_gnu_attribute_table
};
/* Get a value for the SARIF v2.1.0 "artifact.sourceLanguage" property,
diff --git a/gcc/fortran/gfortran.h b/gcc/fortran/gfortran.h
index aa3f6cb..a77441f 100644
--- a/gcc/fortran/gfortran.h
+++ b/gcc/fortran/gfortran.h
@@ -4008,6 +4008,9 @@ bool gfc_may_be_finalized (gfc_typespec);
#define IS_POINTER(sym) \
(sym->ts.type == BT_CLASS && sym->attr.class_ok && CLASS_DATA (sym) \
? CLASS_DATA (sym)->attr.class_pointer : sym->attr.pointer)
+#define IS_PROC_POINTER(sym) \
+ (sym->ts.type == BT_CLASS && sym->attr.class_ok && CLASS_DATA (sym) \
+ ? CLASS_DATA (sym)->attr.proc_pointer : sym->attr.proc_pointer)
/* frontend-passes.cc */
diff --git a/gcc/fortran/primary.cc b/gcc/fortran/primary.cc
index d3aeeb8..7278932 100644
--- a/gcc/fortran/primary.cc
+++ b/gcc/fortran/primary.cc
@@ -2653,6 +2653,22 @@ gfc_variable_attr (gfc_expr *expr, gfc_typespec *ts)
if (pointer || attr.proc_pointer)
target = 1;
+ /* F2018:11.1.3.3: Other attributes of associate names
+ "The associating entity does not have the ALLOCATABLE or POINTER
+ attributes; it has the TARGET attribute if and only if the selector is
+ a variable and has either the TARGET or POINTER attribute." */
+ if (sym->attr.associate_var && sym->assoc && sym->assoc->target)
+ {
+ if (sym->assoc->target->expr_type == EXPR_VARIABLE)
+ {
+ symbol_attribute tgt_attr;
+ tgt_attr = gfc_expr_attr (sym->assoc->target);
+ target = (tgt_attr.pointer || tgt_attr.target);
+ }
+ else
+ target = 0;
+ }
+
if (ts != NULL && expr->ts.type == BT_UNKNOWN)
*ts = sym->ts;
diff --git a/gcc/fortran/trans-array.cc b/gcc/fortran/trans-array.cc
index bbb81f4..82f60a6 100644
--- a/gcc/fortran/trans-array.cc
+++ b/gcc/fortran/trans-array.cc
@@ -11430,6 +11430,15 @@ gfc_trans_deferred_array (gfc_symbol * sym, gfc_wrapped_block * block)
{
gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
gfc_trans_vla_type_sizes (sym, &init);
+
+ /* Presence check of optional deferred-length character dummy. */
+ if (sym->ts.deferred && sym->attr.dummy && sym->attr.optional)
+ {
+ tmp = gfc_finish_block (&init);
+ tmp = build3_v (COND_EXPR, gfc_conv_expr_present (sym),
+ tmp, build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&init, tmp);
+ }
}
/* Dummy, use associated and result variables don't need anything special. */
diff --git a/gcc/fortran/trans-expr.cc b/gcc/fortran/trans-expr.cc
index 50c4604..ea08729 100644
--- a/gcc/fortran/trans-expr.cc
+++ b/gcc/fortran/trans-expr.cc
@@ -1365,6 +1365,15 @@ gfc_conv_class_to_class (gfc_se *parmse, gfc_expr *e, gfc_typespec class_ts,
tmp = build3_loc (input_location, COND_EXPR, void_type_node,
cond, tmp, tmp2);
gfc_add_expr_to_block (&parmse->pre, tmp);
+
+ if (!elemental && full_array && copyback)
+ {
+ tmp2 = build_empty_stmt (input_location);
+ tmp = gfc_finish_block (&parmse->post);
+ tmp = build3_loc (input_location, COND_EXPR, void_type_node,
+ cond, tmp, tmp2);
+ gfc_add_expr_to_block (&parmse->post, tmp);
+ }
}
else
gfc_add_block_to_block (&parmse->pre, &block);
@@ -2116,10 +2125,24 @@ gfc_conv_missing_dummy (gfc_se * se, gfc_expr * arg, gfc_typespec ts, int kind)
if (ts.type == BT_CHARACTER)
{
- tmp = build_int_cst (gfc_charlen_type_node, 0);
- tmp = fold_build3_loc (input_location, COND_EXPR, gfc_charlen_type_node,
- present, se->string_length, tmp);
- tmp = gfc_evaluate_now (tmp, &se->pre);
+ /* Handle deferred-length dummies that pass the character length by
+ reference so that the value can be returned. */
+ if (ts.deferred && INDIRECT_REF_P (se->string_length))
+ {
+ tmp = gfc_build_addr_expr (NULL_TREE, se->string_length);
+ tmp = fold_build3_loc (input_location, COND_EXPR, TREE_TYPE (tmp),
+ present, tmp, null_pointer_node);
+ tmp = gfc_evaluate_now (tmp, &se->pre);
+ tmp = build_fold_indirect_ref_loc (input_location, tmp);
+ }
+ else
+ {
+ tmp = build_int_cst (gfc_charlen_type_node, 0);
+ tmp = fold_build3_loc (input_location, COND_EXPR,
+ gfc_charlen_type_node,
+ present, se->string_length, tmp);
+ tmp = gfc_evaluate_now (tmp, &se->pre);
+ }
se->string_length = tmp;
}
return;
@@ -9936,7 +9959,8 @@ trans_get_upoly_len (stmtblock_t *block, gfc_expr *expr)
static tree
trans_class_vptr_len_assignment (stmtblock_t *block, gfc_expr * le,
gfc_expr * re, gfc_se *rse,
- tree * to_lenp, tree * from_lenp)
+ tree * to_lenp, tree * from_lenp,
+ tree * from_vptrp)
{
gfc_se se;
gfc_expr * vptr_expr;
@@ -9944,10 +9968,11 @@ trans_class_vptr_len_assignment (stmtblock_t *block, gfc_expr * le,
bool set_vptr = false, temp_rhs = false;
stmtblock_t *pre = block;
tree class_expr = NULL_TREE;
+ tree from_vptr = NULL_TREE;
/* Create a temporary for complicated expressions. */
if (re->expr_type != EXPR_VARIABLE && re->expr_type != EXPR_NULL
- && rse->expr != NULL_TREE && !DECL_P (rse->expr))
+ && rse->expr != NULL_TREE)
{
if (re->ts.type == BT_CLASS && !GFC_CLASS_TYPE_P (TREE_TYPE (rse->expr)))
class_expr = gfc_get_class_from_expr (rse->expr);
@@ -10044,6 +10069,7 @@ trans_class_vptr_len_assignment (stmtblock_t *block, gfc_expr * le,
tmp = rse->expr;
se.expr = gfc_class_vptr_get (tmp);
+ from_vptr = se.expr;
if (UNLIMITED_POLY (re))
from_len = gfc_class_len_get (tmp);
@@ -10065,6 +10091,7 @@ trans_class_vptr_len_assignment (stmtblock_t *block, gfc_expr * le,
gfc_free_expr (vptr_expr);
gfc_add_block_to_block (block, &se.pre);
gcc_assert (se.post.head == NULL_TREE);
+ from_vptr = se.expr;
}
gfc_add_modify (pre, lhs_vptr, fold_convert (TREE_TYPE (lhs_vptr),
se.expr));
@@ -10093,11 +10120,13 @@ trans_class_vptr_len_assignment (stmtblock_t *block, gfc_expr * le,
}
}
- /* Return the _len trees only, when requested. */
+ /* Return the _len and _vptr trees only, when requested. */
if (to_lenp)
*to_lenp = to_len;
if (from_lenp)
*from_lenp = from_len;
+ if (from_vptrp)
+ *from_vptrp = from_vptr;
return lhs_vptr;
}
@@ -10166,7 +10195,7 @@ trans_class_pointer_fcn (stmtblock_t *block, gfc_se *lse, gfc_se *rse,
{
expr1_vptr = trans_class_vptr_len_assignment (block, expr1,
expr2, rse,
- NULL, NULL);
+ NULL, NULL, NULL);
gfc_add_block_to_block (block, &rse->pre);
tmp = gfc_create_var (TREE_TYPE (rse->expr), "ptrtemp");
gfc_add_modify (&lse->pre, tmp, rse->expr);
@@ -10242,7 +10271,7 @@ gfc_trans_pointer_assignment (gfc_expr * expr1, gfc_expr * expr2)
if (non_proc_ptr_assign && expr1->ts.type == BT_CLASS)
{
trans_class_vptr_len_assignment (&block, expr1, expr2, &rse, NULL,
- NULL);
+ NULL, NULL);
lse.expr = gfc_class_data_get (lse.expr);
}
@@ -10371,7 +10400,8 @@ gfc_trans_pointer_assignment (gfc_expr * expr1, gfc_expr * expr2)
if (expr1->ts.type == BT_CLASS)
expr1_vptr = trans_class_vptr_len_assignment (&block, expr1,
expr2, &rse,
- NULL, NULL);
+ NULL, NULL,
+ NULL);
}
}
else if (expr2->expr_type == EXPR_VARIABLE)
@@ -10388,7 +10418,7 @@ gfc_trans_pointer_assignment (gfc_expr * expr1, gfc_expr * expr2)
rse.expr = NULL_TREE;
rse.string_length = strlen_rhs;
trans_class_vptr_len_assignment (&block, expr1, expr2, &rse,
- NULL, NULL);
+ NULL, NULL, NULL);
}
if (remap == NULL)
@@ -10421,7 +10451,7 @@ gfc_trans_pointer_assignment (gfc_expr * expr1, gfc_expr * expr2)
{
expr1_vptr = trans_class_vptr_len_assignment (&block, expr1,
expr2, &rse, NULL,
- NULL);
+ NULL, NULL);
gfc_add_block_to_block (&block, &rse.pre);
tmp = gfc_create_var (TREE_TYPE (rse.expr), "ptrtemp");
gfc_add_modify (&lse.pre, tmp, rse.expr);
@@ -11819,7 +11849,7 @@ trans_class_assignment (stmtblock_t *block, gfc_expr *lhs, gfc_expr *rhs,
gfc_se *lse, gfc_se *rse, bool use_vptr_copy,
bool class_realloc)
{
- tree tmp, fcn, stdcopy, to_len, from_len, vptr, old_vptr;
+ tree tmp, fcn, stdcopy, to_len, from_len, vptr, old_vptr, rhs_vptr;
vec<tree, va_gc> *args = NULL;
bool final_expr;
@@ -11843,7 +11873,9 @@ trans_class_assignment (stmtblock_t *block, gfc_expr *lhs, gfc_expr *rhs,
}
vptr = trans_class_vptr_len_assignment (block, lhs, rhs, rse, &to_len,
- &from_len);
+ &from_len, &rhs_vptr);
+ if (rhs_vptr == NULL_TREE)
+ rhs_vptr = vptr;
/* Generate (re)allocation of the lhs. */
if (class_realloc)
@@ -11856,7 +11888,7 @@ trans_class_assignment (stmtblock_t *block, gfc_expr *lhs, gfc_expr *rhs,
else
old_vptr = build_int_cst (TREE_TYPE (vptr), 0);
- size = gfc_vptr_size_get (vptr);
+ size = gfc_vptr_size_get (rhs_vptr);
tmp = lse->expr;
class_han = GFC_CLASS_TYPE_P (TREE_TYPE (tmp))
? gfc_class_data_get (tmp) : tmp;
@@ -11870,12 +11902,14 @@ trans_class_assignment (stmtblock_t *block, gfc_expr *lhs, gfc_expr *rhs,
/* Reallocate if dynamic types are different. */
gfc_init_block (&re_alloc);
+ tmp = fold_convert (pvoid_type_node, class_han);
re = build_call_expr_loc (input_location,
builtin_decl_explicit (BUILT_IN_REALLOC), 2,
- fold_convert (pvoid_type_node, class_han),
- size);
+ tmp, size);
+ re = fold_build2_loc (input_location, MODIFY_EXPR, TREE_TYPE (tmp), tmp,
+ re);
tmp = fold_build2_loc (input_location, NE_EXPR,
- logical_type_node, vptr, old_vptr);
+ logical_type_node, rhs_vptr, old_vptr);
re = fold_build3_loc (input_location, COND_EXPR, void_type_node,
tmp, re, build_empty_stmt (input_location));
gfc_add_expr_to_block (&re_alloc, re);
diff --git a/gcc/fortran/trans-types.cc b/gcc/fortran/trans-types.cc
index 084b8c3..5b11ffc 100644
--- a/gcc/fortran/trans-types.cc
+++ b/gcc/fortran/trans-types.cc
@@ -2327,8 +2327,8 @@ gfc_sym_type (gfc_symbol * sym, bool is_bind_c)
else
byref = 0;
- restricted = !sym->attr.target && !sym->attr.pointer
- && !sym->attr.proc_pointer && !sym->attr.cray_pointee;
+ restricted = (!sym->attr.target && !IS_POINTER (sym)
+ && !IS_PROC_POINTER (sym) && !sym->attr.cray_pointee);
if (!restricted)
type = gfc_nonrestricted_type (type);
@@ -2384,11 +2384,10 @@ gfc_sym_type (gfc_symbol * sym, bool is_bind_c)
|| (sym->ns->proc_name && sym->ns->proc_name->attr.entry_master))
type = build_pointer_type (type);
else
- {
- type = build_reference_type (type);
- if (restricted)
- type = build_qualified_type (type, TYPE_QUAL_RESTRICT);
- }
+ type = build_reference_type (type);
+
+ if (restricted)
+ type = build_qualified_type (type, TYPE_QUAL_RESTRICT);
}
return (type);
diff --git a/gcc/function.cc b/gcc/function.cc
index 527ea48..8984178 100644
--- a/gcc/function.cc
+++ b/gcc/function.cc
@@ -84,6 +84,7 @@ along with GCC; see the file COPYING3. If not see
#include "function-abi.h"
#include "value-range.h"
#include "gimple-range.h"
+#include "insn-attr.h"
/* So we can assign to cfun in this file. */
#undef cfun
@@ -6207,7 +6208,17 @@ thread_prologue_and_epilogue_insns (void)
if (!(CALL_P (insn) && SIBLING_CALL_P (insn)))
continue;
- if (rtx_insn *ep_seq = targetm.gen_sibcall_epilogue ())
+ rtx_insn *ep_seq;
+ if (targetm.emit_epilogue_for_sibcall)
+ {
+ start_sequence ();
+ targetm.emit_epilogue_for_sibcall (as_a<rtx_call_insn *> (insn));
+ ep_seq = get_insns ();
+ end_sequence ();
+ }
+ else
+ ep_seq = targetm.gen_sibcall_epilogue ();
+ if (ep_seq)
{
start_sequence ();
emit_note (NOTE_INSN_EPILOGUE_BEG);
@@ -6267,7 +6278,8 @@ reposition_prologue_and_epilogue_notes (void)
{
if (!targetm.have_prologue ()
&& !targetm.have_epilogue ()
- && !targetm.have_sibcall_epilogue ())
+ && !targetm.have_sibcall_epilogue ()
+ && !targetm.emit_epilogue_for_sibcall)
return;
/* Since the hash table is created on demand, the fact that it is
@@ -6629,6 +6641,11 @@ public:
{}
/* opt_pass methods: */
+ bool gate (function *) final override
+ {
+ return !targetm.use_late_prologue_epilogue ();
+ }
+
unsigned int execute (function * fun) final override
{
rest_of_handle_thread_prologue_and_epilogue (fun);
@@ -6637,6 +6654,44 @@ public:
}; // class pass_thread_prologue_and_epilogue
+const pass_data pass_data_late_thread_prologue_and_epilogue =
+{
+ RTL_PASS, /* type */
+ "late_pro_and_epilogue", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_THREAD_PROLOGUE_AND_EPILOGUE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
+};
+
+class pass_late_thread_prologue_and_epilogue : public rtl_opt_pass
+{
+public:
+ pass_late_thread_prologue_and_epilogue (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_late_thread_prologue_and_epilogue, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ bool gate (function *) final override
+ {
+ return targetm.use_late_prologue_epilogue ();
+ }
+
+ unsigned int execute (function *fn) final override
+ {
+ /* It's not currently possible to have both delay slots and
+ late prologue/epilogue, since the latter has to run before
+ the former, and the former won't honor whatever restrictions
+ the latter is trying to enforce. */
+ gcc_assert (!DELAY_SLOTS);
+ rest_of_handle_thread_prologue_and_epilogue (fn);
+ return 0;
+ }
+}; // class pass_late_thread_prologue_and_epilogue
+
} // anon namespace
rtl_opt_pass *
@@ -6645,6 +6700,12 @@ make_pass_thread_prologue_and_epilogue (gcc::context *ctxt)
return new pass_thread_prologue_and_epilogue (ctxt);
}
+rtl_opt_pass *
+make_pass_late_thread_prologue_and_epilogue (gcc::context *ctxt)
+{
+ return new pass_late_thread_prologue_and_epilogue (ctxt);
+}
+
namespace {
const pass_data pass_data_zero_call_used_regs =
diff --git a/gcc/function.h b/gcc/function.h
index 2984656..833c35e 100644
--- a/gcc/function.h
+++ b/gcc/function.h
@@ -518,6 +518,17 @@ set_loops_for_fn (struct function *fn, struct loops *loops)
fn->x_current_loops = loops;
}
+/* Get a new unique dependence clique or zero if none is left. */
+
+inline unsigned short
+get_new_clique (function *fn)
+{
+ unsigned short clique = fn->last_clique + 1;
+ if (clique != 0)
+ fn->last_clique = clique;
+ return clique;
+}
+
/* For backward compatibility... eventually these should all go away. */
#define current_function_funcdef_no (cfun->funcdef_no)
diff --git a/gcc/gcc.cc b/gcc/gcc.cc
index 9f21ad9..03ec6e1 100644
--- a/gcc/gcc.cc
+++ b/gcc/gcc.cc
@@ -4617,6 +4617,7 @@ driver_handle_option (struct gcc_options *opts,
/* -pie is turned on by default. */
validated = true;
#endif
+ /* FALLTHROUGH */
case OPT_r:
case OPT_shared:
case OPT_no_pie:
diff --git a/gcc/gengtype-lex.l b/gcc/gengtype-lex.l
index 34837d9..a7bb44c 100644
--- a/gcc/gengtype-lex.l
+++ b/gcc/gengtype-lex.l
@@ -165,6 +165,9 @@ CXX_KEYWORD inline|public:|private:|protected:|template|operator|friend|static|m
[(){},*:<>;=%/|+\!\?\.-] { return yytext[0]; }
/* ignore pp-directives */
+^{HWS}"#"{HWS}[a-z_]+([^\n]*"\\"\n)+[^\n]*\n {
+ update_lineno (yytext, yyleng);
+}
^{HWS}"#"{HWS}[a-z_]+[^\n]*\n {lexer_line.line++;}
. {
diff --git a/gcc/genhooks.cc b/gcc/genhooks.cc
index 49414ec..135c523 100644
--- a/gcc/genhooks.cc
+++ b/gcc/genhooks.cc
@@ -304,7 +304,12 @@ emit_init_macros (const char *docname)
name, name, hook_array[i].init);
}
if (nest == print_nest)
- printf (" %s, \\\n", name);
+ {
+ if (strcmp (name, "TARGET_ATTRIBUTE_TABLE") == 0)
+ printf (" { %s }, \\\n", name);
+ else
+ printf (" %s, \\\n", name);
+ }
}
}
}
diff --git a/gcc/gimple-lower-bitint.cc b/gcc/gimple-lower-bitint.cc
index 5024815..d2026f6 100644
--- a/gcc/gimple-lower-bitint.cc
+++ b/gcc/gimple-lower-bitint.cc
@@ -1963,7 +1963,7 @@ range_to_prec (tree op, gimple *stmt)
if (TYPE_UNSIGNED (type))
return prec;
else
- return -prec;
+ return MIN ((int) -prec, -2);
}
if (!TYPE_UNSIGNED (TREE_TYPE (op)))
@@ -2179,6 +2179,8 @@ bitint_large_huge::handle_operand_addr (tree op, gimple *stmt,
*prec = MIN ((int) -min_prec, -2);
}
mp = CEIL (min_prec, limb_prec) * limb_prec;
+ if (mp == 0)
+ mp = 1;
if (mp >= (unsigned) TYPE_PRECISION (TREE_TYPE (op)))
type = TREE_TYPE (op);
else
@@ -2622,7 +2624,7 @@ bitint_large_huge::lower_mergeable_stmt (gimple *stmt, tree_code &cmp_code,
{
if (kind == bitint_prec_large || (i == 0 && bo_bit != 0))
idx = size_int (start + i);
- else if (i == cnt - 1)
+ else if (i == cnt - 1 && (rem != 0))
idx = size_int (end);
else if (i == (bo_bit != 0))
idx = create_loop (size_int (start + i), &idx_next);
@@ -3682,6 +3684,8 @@ bitint_large_huge::finish_arith_overflow (tree var, tree obj, tree type,
else
g = gimple_build_assign (lhs2, NOP_EXPR, ovf);
gsi_replace (&gsi, g, true);
+ if (gsi_stmt (m_gsi) == use_stmt)
+ m_gsi = gsi_for_stmt (g);
break;
}
}
@@ -3790,11 +3794,45 @@ bitint_large_huge::lower_addsub_overflow (tree obj, gimple *stmt)
int prec = TYPE_PRECISION (type);
int prec0 = range_to_prec (arg0, stmt);
int prec1 = range_to_prec (arg1, stmt);
- int prec2 = ((prec0 < 0) == (prec1 < 0)
- ? MAX (prec0 < 0 ? -prec0 : prec0,
- prec1 < 0 ? -prec1 : prec1) + 1
- : MAX (prec0 < 0 ? -prec0 : prec0 + 1,
- prec1 < 0 ? -prec1 : prec1 + 1) + 1);
+ /* If PREC0 >= 0 && PREC1 >= 0 and CODE is not MINUS_EXPR, PREC2 is
+ the be minimum unsigned precision of any possible operation's
+ result, otherwise it is minimum signed precision.
+ Some examples:
+ If PREC0 or PREC1 is 8, it means that argument is [0, 0xff],
+ if PREC0 or PREC1 is 10, it means that argument is [0, 0x3ff],
+ if PREC0 or PREC1 is -8, it means that argument is [-0x80, 0x7f],
+ if PREC0 or PREC1 is -10, it means that argument is [-0x200, 0x1ff].
+ PREC0 CODE PREC1 RESULT PREC2 SIGNED vs. UNSIGNED
+ 8 + 8 [0, 0x1fe] 9 UNSIGNED
+ 8 + 10 [0, 0x4fe] 11 UNSIGNED
+ -8 + -8 [-0x100, 0xfe] 9 SIGNED
+ -8 + -10 [-0x280, 0x27e] 11 SIGNED
+ 8 + -8 [-0x80, 0x17e] 10 SIGNED
+ 8 + -10 [-0x200, 0x2fe] 11 SIGNED
+ 10 + -8 [-0x80, 0x47e] 12 SIGNED
+ 8 - 8 [-0xff, 0xff] 9 SIGNED
+ 8 - 10 [-0x3ff, 0xff] 11 SIGNED
+ 10 - 8 [-0xff, 0x3ff] 11 SIGNED
+ -8 - -8 [-0xff, 0xff] 9 SIGNED
+ -8 - -10 [-0x27f, 0x27f] 11 SIGNED
+ -10 - -8 [-0x27f, 0x27f] 11 SIGNED
+ 8 - -8 [-0x7f, 0x17f] 10 SIGNED
+ 8 - -10 [-0x1ff, 0x2ff] 11 SIGNED
+ 10 - -8 [-0x7f, 0x47f] 12 SIGNED
+ -8 - 8 [-0x17f, 0x7f] 10 SIGNED
+ -8 - 10 [-0x47f, 0x7f] 12 SIGNED
+ -10 - 8 [-0x2ff, 0x1ff] 11 SIGNED */
+ int prec2 = MAX (prec0 < 0 ? -prec0 : prec0,
+ prec1 < 0 ? -prec1 : prec1);
+ /* If operands are either both signed or both unsigned,
+ we need just one additional bit. */
+ prec2 = (((prec0 < 0) == (prec1 < 0)
+ /* If one operand is signed and one unsigned and
+ the signed one has larger precision, we need
+ just one extra bit, otherwise two. */
+ || (prec0 < 0 ? (prec2 == -prec0 && prec2 != prec1)
+ : (prec2 == -prec1 && prec2 != prec0)))
+ ? prec2 + 1 : prec2 + 2);
int prec3 = MAX (prec0 < 0 ? -prec0 : prec0,
prec1 < 0 ? -prec1 : prec1);
prec3 = MAX (prec3, prec);
@@ -3873,15 +3911,18 @@ bitint_large_huge::lower_addsub_overflow (tree obj, gimple *stmt)
tree type0 = TREE_TYPE (arg0);
tree type1 = TREE_TYPE (arg1);
- if (TYPE_PRECISION (type0) < prec3)
+ int prec5 = prec3;
+ if (bitint_precision_kind (prec5) < bitint_prec_large)
+ prec5 = MAX (TYPE_PRECISION (type0), TYPE_PRECISION (type1));
+ if (TYPE_PRECISION (type0) < prec5)
{
- type0 = build_bitint_type (prec3, TYPE_UNSIGNED (type0));
+ type0 = build_bitint_type (prec5, TYPE_UNSIGNED (type0));
if (TREE_CODE (arg0) == INTEGER_CST)
arg0 = fold_convert (type0, arg0);
}
- if (TYPE_PRECISION (type1) < prec3)
+ if (TYPE_PRECISION (type1) < prec5)
{
- type1 = build_bitint_type (prec3, TYPE_UNSIGNED (type1));
+ type1 = build_bitint_type (prec5, TYPE_UNSIGNED (type1));
if (TREE_CODE (arg1) == INTEGER_CST)
arg1 = fold_convert (type1, arg1);
}
@@ -4028,11 +4069,11 @@ bitint_large_huge::lower_addsub_overflow (tree obj, gimple *stmt)
edge edge_true_true, edge_true_false, edge_false;
gimple *g2 = NULL;
if (!single_comparison)
- g2 = gimple_build_cond (EQ_EXPR, idx,
+ g2 = gimple_build_cond (NE_EXPR, idx,
size_int (startlimb), NULL_TREE,
NULL_TREE);
if_then_if_then_else (g, g2, profile_probability::likely (),
- profile_probability::unlikely (),
+ profile_probability::likely (),
edge_true_true, edge_true_false,
edge_false);
unsigned tidx = startlimb + (cmp_code == GT_EXPR);
@@ -4199,8 +4240,9 @@ bitint_large_huge::lower_mul_overflow (tree obj, gimple *stmt)
arg0 = handle_operand_addr (arg0, stmt, NULL, &prec0);
arg1 = handle_operand_addr (arg1, stmt, NULL, &prec1);
int prec2 = ((prec0 < 0 ? -prec0 : prec0)
- + (prec1 < 0 ? -prec1 : prec1)
- + ((prec0 < 0) != (prec1 < 0)));
+ + (prec1 < 0 ? -prec1 : prec1));
+ if (prec0 == 1 || prec1 == 1)
+ --prec2;
tree var = NULL_TREE;
tree orig_obj = obj;
bool force_var = false;
@@ -6287,21 +6329,9 @@ gimple_lower_bitint (void)
tree type = NULL_TREE;
/* Middle _BitInt(N) is rewritten to casts to INTEGER_TYPEs
with the same precision and back. */
- if (tree lhs = gimple_get_lhs (stmt))
- if (TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
- && (bitint_precision_kind (TREE_TYPE (lhs))
- == bitint_prec_middle))
- {
- int prec = TYPE_PRECISION (TREE_TYPE (lhs));
- int uns = TYPE_UNSIGNED (TREE_TYPE (lhs));
- type = build_nonstandard_integer_type (prec, uns);
- tree lhs2 = make_ssa_name (type);
- gimple *g = gimple_build_assign (lhs, NOP_EXPR, lhs2);
- gsi_insert_after (&gsi, g, GSI_SAME_STMT);
- gimple_set_lhs (stmt, lhs2);
- }
unsigned int nops = gimple_num_ops (stmt);
- for (unsigned int i = 0; i < nops; ++i)
+ for (unsigned int i = is_gimple_assign (stmt) ? 1 : 0;
+ i < nops; ++i)
if (tree op = gimple_op (stmt, i))
{
tree nop = maybe_cast_middle_bitint (&gsi, op, type);
@@ -6328,6 +6358,25 @@ gimple_lower_bitint (void)
type);
}
}
+ if (tree lhs = gimple_get_lhs (stmt))
+ if (TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
+ && (bitint_precision_kind (TREE_TYPE (lhs))
+ == bitint_prec_middle))
+ {
+ int prec = TYPE_PRECISION (TREE_TYPE (lhs));
+ int uns = TYPE_UNSIGNED (TREE_TYPE (lhs));
+ type = build_nonstandard_integer_type (prec, uns);
+ tree lhs2 = make_ssa_name (type);
+ gimple_set_lhs (stmt, lhs2);
+ gimple *g = gimple_build_assign (lhs, NOP_EXPR, lhs2);
+ if (stmt_ends_bb_p (stmt))
+ {
+ edge e = find_fallthru_edge (gsi_bb (gsi)->succs);
+ gsi_insert_on_edge_immediate (e, g);
+ }
+ else
+ gsi_insert_after (&gsi, g, GSI_SAME_STMT);
+ }
update_stmt (stmt);
continue;
}
diff --git a/gcc/gimple-match-exports.cc b/gcc/gimple-match-exports.cc
index d6dac08..3a054b6 100644
--- a/gcc/gimple-match-exports.cc
+++ b/gcc/gimple-match-exports.cc
@@ -236,7 +236,30 @@ build_call_internal (internal_fn fn, gimple_match_op *res_op)
tree_pair types = direct_internal_fn_types (fn, res_op->type,
res_op->ops);
if (!direct_internal_fn_supported_p (fn, types, OPTIMIZE_FOR_BOTH))
- return NULL;
+ {
+ switch (fn)
+ {
+ case IFN_CLZ:
+ case IFN_CTZ:
+ case IFN_CLRSB:
+ case IFN_FFS:
+ case IFN_POPCOUNT:
+ case IFN_PARITY:
+ /* For these 6 builtins large/huge _BitInt operand is ok
+ before bitint lowering pass. */
+ if (res_op->num_ops >= 1
+ && TREE_CODE (TREE_TYPE (res_op->ops[0])) == BITINT_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (res_op->ops[0]))
+ > MAX_FIXED_MODE_SIZE)
+ && cfun
+ && (cfun->curr_properties & PROP_gimple_lbitint) == 0)
+ break;
+ return NULL;
+
+ default:
+ return NULL;
+ }
+ }
}
return gimple_build_call_internal (fn, res_op->num_ops,
res_op->op_or_null (0),
diff --git a/gcc/gimple-predicate-analysis.cc b/gcc/gimple-predicate-analysis.cc
index ad2c355..5e231a6 100644
--- a/gcc/gimple-predicate-analysis.cc
+++ b/gcc/gimple-predicate-analysis.cc
@@ -244,21 +244,18 @@ find_matching_predicate_in_rest_chains (const pred_info &pred,
of that's the form "FLAG_VAR CMP FLAG_VAR" with value range info.
PHI is the phi node whose incoming (interesting) paths need to be
examined. On success, return the comparison code, set defintion
- gimple of FLAG_DEF and BOUNDARY_CST. Otherwise return ERROR_MARK. */
+ gimple of FLAG_DEF and BOUNDARY_CST. Otherwise return ERROR_MARK.
+ I is the running iterator so the function can be called repeatedly
+ to gather all candidates. */
static tree_code
find_var_cmp_const (pred_chain_union preds, gphi *phi, gimple **flag_def,
- tree *boundary_cst)
+ tree *boundary_cst, unsigned &i)
{
- tree_code vrinfo_code = ERROR_MARK;
- gimple *vrinfo_def = NULL;
- tree vrinfo_cst = NULL;
-
gcc_assert (preds.length () > 0);
pred_chain chain = preds[0];
- for (unsigned i = 0; i < chain.length (); i++)
+ for (; i < chain.length (); i++)
{
- bool use_vrinfo_p = false;
const pred_info &pred = chain[i];
tree cond_lhs = pred.pred_lhs;
tree cond_rhs = pred.pred_rhs;
@@ -282,8 +279,7 @@ find_var_cmp_const (pred_chain_union preds, gphi *phi, gimple **flag_def,
}
/* Check if we can take advantage of FLAG_VAR COMP FLAG_VAR predicate
with value range info. Note only first of such case is handled. */
- else if (vrinfo_code == ERROR_MARK
- && TREE_CODE (cond_lhs) == SSA_NAME
+ else if (TREE_CODE (cond_lhs) == SSA_NAME
&& TREE_CODE (cond_rhs) == SSA_NAME)
{
gimple* lhs_def = SSA_NAME_DEF_STMT (cond_lhs);
@@ -331,8 +327,6 @@ find_var_cmp_const (pred_chain_union preds, gphi *phi, gimple **flag_def,
cond_rhs = wide_int_to_tree (type, min);
else
continue;
-
- use_vrinfo_p = true;
}
else
continue;
@@ -345,27 +339,13 @@ find_var_cmp_const (pred_chain_union preds, gphi *phi, gimple **flag_def,
|| !find_matching_predicate_in_rest_chains (pred, preds))
continue;
- /* Return if any "flag_var comp const" predicate is found. */
- if (!use_vrinfo_p)
- {
- *boundary_cst = cond_rhs;
- return code;
- }
- /* Record if any "flag_var comp flag_var[vinfo]" predicate is found. */
- else if (vrinfo_code == ERROR_MARK)
- {
- vrinfo_code = code;
- vrinfo_def = *flag_def;
- vrinfo_cst = cond_rhs;
- }
- }
- /* Return the "flag_var cmp flag_var[vinfo]" predicate we found. */
- if (vrinfo_code != ERROR_MARK)
- {
- *flag_def = vrinfo_def;
- *boundary_cst = vrinfo_cst;
+ /* Return predicate found. */
+ *boundary_cst = cond_rhs;
+ ++i;
+ return code;
}
- return vrinfo_code;
+
+ return ERROR_MARK;
}
/* Return true if all interesting opnds are pruned, false otherwise.
@@ -641,27 +621,29 @@ uninit_analysis::overlap (gphi *phi, unsigned opnds, hash_set<gphi *> *visited,
{
gimple *flag_def = NULL;
tree boundary_cst = NULL_TREE;
- bitmap visited_flag_phis = NULL;
/* Find within the common prefix of multiple predicate chains
a predicate that is a comparison of a flag variable against
a constant. */
- tree_code cmp_code = find_var_cmp_const (use_preds.chain (), phi, &flag_def,
- &boundary_cst);
- if (cmp_code == ERROR_MARK)
- return true;
-
- /* Now check all the uninit incoming edges have a constant flag
- value that is in conflict with the use guard/predicate. */
- gphi *phi_def = as_a<gphi *> (flag_def);
- bool all_pruned = prune_phi_opnds (phi, opnds, phi_def, boundary_cst,
- cmp_code, visited,
- &visited_flag_phis);
-
- if (visited_flag_phis)
- BITMAP_FREE (visited_flag_phis);
+ unsigned i = 0;
+ tree_code cmp_code;
+ while ((cmp_code = find_var_cmp_const (use_preds.chain (), phi, &flag_def,
+ &boundary_cst, i)) != ERROR_MARK)
+ {
+ /* Now check all the uninit incoming edges have a constant flag
+ value that is in conflict with the use guard/predicate. */
+ bitmap visited_flag_phis = NULL;
+ gphi *phi_def = as_a<gphi *> (flag_def);
+ bool all_pruned = prune_phi_opnds (phi, opnds, phi_def, boundary_cst,
+ cmp_code, visited,
+ &visited_flag_phis);
+ if (visited_flag_phis)
+ BITMAP_FREE (visited_flag_phis);
+ if (all_pruned)
+ return false;
+ }
- return !all_pruned;
+ return true;
}
/* Return true if two predicates PRED1 and X2 are equivalent. Assume
diff --git a/gcc/gimple-range-fold.h b/gcc/gimple-range-fold.h
index fcbe162..0094b4e3 100644
--- a/gcc/gimple-range-fold.h
+++ b/gcc/gimple-range-fold.h
@@ -89,18 +89,6 @@ gimple_range_ssa_p (tree exp)
return NULL_TREE;
}
-// Return true if TYPE1 and TYPE2 are compatible range types.
-
-inline bool
-range_compatible_p (tree type1, tree type2)
-{
- // types_compatible_p requires conversion in both directions to be useless.
- // GIMPLE only requires a cast one way in order to be compatible.
- // Ranges really only need the sign and precision to be the same.
- return (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
- && TYPE_SIGN (type1) == TYPE_SIGN (type2));
-}
-
// Source of all operands for fold_using_range and gori_compute.
// It abstracts out the source of an operand so it can come from a stmt or
// and edge or anywhere a derived class of fur_source wants.
diff --git a/gcc/gimple-range.cc b/gcc/gimple-range.cc
index 5e9bb39..84d2c75 100644
--- a/gcc/gimple-range.cc
+++ b/gcc/gimple-range.cc
@@ -544,40 +544,6 @@ gimple_ranger::register_transitive_inferred_ranges (basic_block bb)
}
}
-// When a statement S has changed since the result was cached, re-evaluate
-// and update the global cache.
-
-void
-gimple_ranger::update_stmt (gimple *s)
-{
- tree lhs = gimple_get_lhs (s);
- if (!lhs || !gimple_range_ssa_p (lhs))
- return;
- Value_Range r (TREE_TYPE (lhs));
- // Only update if it already had a value.
- if (m_cache.get_global_range (r, lhs))
- {
- // Re-calculate a new value using just cache values.
- Value_Range tmp (TREE_TYPE (lhs));
- fold_using_range f;
- fur_stmt src (s, &m_cache);
- f.fold_stmt (tmp, s, src, lhs);
-
- // Combine the new value with the old value to check for a change.
- if (r.intersect (tmp))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- print_generic_expr (dump_file, lhs, TDF_SLIM);
- fprintf (dump_file, " : global value re-evaluated to ");
- r.dump (dump_file);
- fputc ('\n', dump_file);
- }
- m_cache.set_global_range (lhs, r);
- }
- }
-}
-
// This routine will export whatever global ranges are known to GCC
// SSA_RANGE_NAME_INFO and SSA_NAME_PTR_INFO fields.
diff --git a/gcc/gimple-range.h b/gcc/gimple-range.h
index 5807a2b..6b0835c 100644
--- a/gcc/gimple-range.h
+++ b/gcc/gimple-range.h
@@ -52,7 +52,6 @@ public:
virtual bool range_of_stmt (vrange &r, gimple *, tree name = NULL) override;
virtual bool range_of_expr (vrange &r, tree name, gimple * = NULL) override;
virtual bool range_on_edge (vrange &r, edge e, tree name) override;
- virtual void update_stmt (gimple *) override;
void range_on_entry (vrange &r, basic_block bb, tree name);
void range_on_exit (vrange &r, basic_block bb, tree name);
void export_global_ranges ();
diff --git a/gcc/gimplify.cc b/gcc/gimplify.cc
index 02f85e7..342e43a 100644
--- a/gcc/gimplify.cc
+++ b/gcc/gimplify.cc
@@ -4887,6 +4887,8 @@ gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value,
to = TREE_OPERAND (*expr_p, 0);
from = TREE_OPERAND (*expr_p, 1);
+ gcc_assert (ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (TREE_TYPE (to)))
+ && ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (TREE_TYPE (from))));
/* Mark the RHS addressable. Beware that it may not be possible to do so
directly if a temporary has been created by the gimplification. */
@@ -4945,6 +4947,7 @@ gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value,
/* Now proceed. */
to = TREE_OPERAND (*expr_p, 0);
+ gcc_assert (ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (TREE_TYPE (to))));
to_ptr = build_fold_addr_expr_loc (loc, to);
gimplify_arg (&to_ptr, seq_p, loc);
@@ -6466,8 +6469,9 @@ gimplify_modify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
if (TREE_CODE (from) == CONSTRUCTOR)
return gimplify_modify_expr_to_memset (expr_p, size, want_value, pre_p);
-
- if (is_gimple_addressable (from))
+ else if (is_gimple_addressable (from)
+ && ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (TREE_TYPE (*to_p)))
+ && ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (TREE_TYPE (from))))
{
*from_p = from;
return gimplify_modify_expr_to_memcpy (expr_p, size, want_value,
diff --git a/gcc/go/gofrontend/MERGE b/gcc/go/gofrontend/MERGE
index aff74bd..5e8677b 100644
--- a/gcc/go/gofrontend/MERGE
+++ b/gcc/go/gofrontend/MERGE
@@ -1,4 +1,4 @@
-e997b0201512110e9c20b1fdfd40014830031047
+f5d708fd905d3f91d848a0ea25c77119f8af0c36
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
diff --git a/gcc/hooks.cc b/gcc/hooks.cc
index e83add4..fe59bbd 100644
--- a/gcc/hooks.cc
+++ b/gcc/hooks.cc
@@ -281,11 +281,6 @@ hook_void_FILEptr_tree (FILE *, tree)
}
void
-hook_void_rtx_tree (rtx, tree)
-{
-}
-
-void
hook_void_constcharptr (const char *)
{
}
diff --git a/gcc/hooks.h b/gcc/hooks.h
index 6aa01fc..3a02b6c 100644
--- a/gcc/hooks.h
+++ b/gcc/hooks.h
@@ -83,7 +83,6 @@ extern void hook_void_FILEptr_constcharptr (FILE *, const char *);
extern void hook_void_FILEptr_constcharptr_const_tree (FILE *, const char *,
const_tree);
extern bool hook_bool_FILEptr_rtx_false (FILE *, rtx);
-extern void hook_void_rtx_tree (rtx, tree);
extern void hook_void_FILEptr_tree (FILE *, tree);
extern void hook_void_tree (tree);
extern void hook_void_tree_treeptr (tree, tree *);
diff --git a/gcc/internal-fn.cc b/gcc/internal-fn.cc
index cce73b3..cb4ef44 100644
--- a/gcc/internal-fn.cc
+++ b/gcc/internal-fn.cc
@@ -2976,6 +2976,10 @@ expand_partial_load_optab_fn (internal_fn ifn, gcall *stmt, convert_optab optab)
mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
gcc_assert (MEM_P (mem));
+ /* The built MEM_REF does not accurately reflect that the load
+ is only partial. Clear it. */
+ set_mem_expr (mem, NULL_TREE);
+ clear_mem_offset (mem);
target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
create_output_operand (&ops[i++], target, TYPE_MODE (type));
create_fixed_operand (&ops[i++], mem);
@@ -3019,6 +3023,10 @@ expand_partial_store_optab_fn (internal_fn ifn, gcall *stmt, convert_optab optab
mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
gcc_assert (MEM_P (mem));
+ /* The built MEM_REF does not accurately reflect that the store
+ is only partial. Clear it. */
+ set_mem_expr (mem, NULL_TREE);
+ clear_mem_offset (mem);
reg = expand_normal (rhs);
create_fixed_operand (&ops[i++], mem);
create_input_operand (&ops[i++], reg, TYPE_MODE (type));
diff --git a/gcc/ipa-cp.cc b/gcc/ipa-cp.cc
index 34fae06..649ad53 100644
--- a/gcc/ipa-cp.cc
+++ b/gcc/ipa-cp.cc
@@ -1926,7 +1926,8 @@ ipa_vr_operation_and_type_effects (vrange &dst_vr,
Value_Range varying (dst_type);
varying.set_varying (dst_type);
- return (handler.fold_range (dst_vr, dst_type, src_vr, varying)
+ return (handler.operand_check_p (dst_type, src_type, dst_type)
+ && handler.fold_range (dst_vr, dst_type, src_vr, varying)
&& !dst_vr.varying_p ()
&& !dst_vr.undefined_p ());
}
diff --git a/gcc/ipa-icf.cc b/gcc/ipa-icf.cc
index c72c9d5..81232d5 100644
--- a/gcc/ipa-icf.cc
+++ b/gcc/ipa-icf.cc
@@ -1666,6 +1666,10 @@ sem_variable::equals_wpa (sem_item *item,
if (DECL_IN_TEXT_SECTION (decl) != DECL_IN_TEXT_SECTION (item->decl))
return return_false_with_msg ("text section");
+ if (TYPE_ADDR_SPACE (TREE_TYPE (decl))
+ != TYPE_ADDR_SPACE (TREE_TYPE (item->decl)))
+ return return_false_with_msg ("address-space");
+
ipa_ref *ref = NULL, *ref2 = NULL;
for (unsigned i = 0; node->iterate_reference (i, ref); i++)
{
diff --git a/gcc/ipa-inline.cc b/gcc/ipa-inline.cc
index dc120e6..dbc3c7e 100644
--- a/gcc/ipa-inline.cc
+++ b/gcc/ipa-inline.cc
@@ -119,6 +119,7 @@ along with GCC; see the file COPYING3. If not see
#include "stringpool.h"
#include "attribs.h"
#include "asan.h"
+#include "ipa-strub.h"
/* Inliner uses greedy algorithm to inline calls in a priority order.
Badness is used as the key in a Fibonacci heap which roughly corresponds
@@ -443,6 +444,11 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
inlinable = false;
}
+ if (inlinable && !strub_inlinable_to_p (callee, caller))
+ {
+ e->inline_failed = CIF_UNSPECIFIED;
+ inlinable = false;
+ }
if (!inlinable && report)
report_inline_failed_reason (e);
return inlinable;
diff --git a/gcc/ipa-split.cc b/gcc/ipa-split.cc
index 6730f4f..1a7285f 100644
--- a/gcc/ipa-split.cc
+++ b/gcc/ipa-split.cc
@@ -104,6 +104,7 @@ along with GCC; see the file COPYING3. If not see
#include "ipa-fnsummary.h"
#include "cfgloop.h"
#include "attribs.h"
+#include "ipa-strub.h"
/* Per basic block info. */
@@ -1811,6 +1812,12 @@ execute_split_functions (void)
"section.\n");
return 0;
}
+ if (!strub_splittable_p (node))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Not splitting: function is a strub context.\n");
+ return 0;
+ }
/* We enforce splitting after loop headers when profile info is not
available. */
diff --git a/gcc/ipa-strub.cc b/gcc/ipa-strub.cc
new file mode 100644
index 0000000..293bec1
--- /dev/null
+++ b/gcc/ipa-strub.cc
@@ -0,0 +1,3573 @@
+/* strub (stack scrubbing) support.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ Contributed by Alexandre Oliva <oliva@adacore.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "tree.h"
+#include "gimple.h"
+#include "gimplify.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "gimple-iterator.h"
+#include "gimplify-me.h"
+#include "tree-into-ssa.h"
+#include "tree-ssa.h"
+#include "tree-cfg.h"
+#include "cfghooks.h"
+#include "cfgloop.h"
+#include "cfgcleanup.h"
+#include "tree-eh.h"
+#include "except.h"
+#include "builtins.h"
+#include "attribs.h"
+#include "tree-inline.h"
+#include "cgraph.h"
+#include "alloc-pool.h"
+#include "symbol-summary.h"
+#include "ipa-prop.h"
+#include "ipa-fnsummary.h"
+#include "gimple-fold.h"
+#include "fold-const.h"
+#include "gimple-walk.h"
+#include "tree-dfa.h"
+#include "langhooks.h"
+#include "calls.h"
+#include "vec.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "alias.h"
+#include "diagnostic.h"
+#include "intl.h"
+#include "ipa-strub.h"
+#include "symtab-thunks.h"
+#include "attr-fnspec.h"
+
+/* This file introduces two passes that, together, implement
+ machine-independent stack scrubbing, strub for short. It arranges
+ for stack frames that have strub enabled to be zeroed-out after
+ relinquishing control to a caller, whether by returning or by
+ propagating an exception. This admittedly unusual design decision
+ was driven by exception support (one needs a stack frame to be
+ active to propagate exceptions out of it), and it enabled an
+ implementation that is entirely machine-independent (no custom
+ epilogue code is required).
+
+ Strub modes can be selected for stack frames by attaching attribute
+ strub to functions or to variables (to their types, actually).
+ Different strub modes, with different implementation details, are
+ available, and they can be selected by an argument to the strub
+ attribute. When enabled by strub-enabled variables, whether by
+ accessing (as in reading from) statically-allocated ones, or by
+ introducing (as in declaring) automatically-allocated ones, a
+ suitable mode is selected automatically.
+
+ At-calls mode modifies the interface of a function, adding a stack
+ watermark argument, that callers use to clean up the stack frame of
+ the called function. Because of the interface change, it can only
+ be used when explicitly selected, or when a function is internal to
+ a translation unit. Strub-at-calls function types are distinct
+ from their original types (they're not modified in-place), and they
+ are not interchangeable with other function types.
+
+ Internal mode, in turn, does not modify the type or the interface
+ of a function. It is currently implemented by turning the function
+ into a wrapper, moving the function body to a separate wrapped
+ function, and scrubbing the wrapped body's stack in the wrapper.
+ Internal-strub function types are mostly interface-compatible with
+ other strub modes, namely callable (from strub functions, though
+ not strub-enabled) and disabled (not callable from strub
+ functions).
+
+ Always_inline functions can be strub functions, but they can only
+ be called from other strub functions, because strub functions must
+ never be inlined into non-strub functions. Internal and at-calls
+ modes are indistinguishable when it comes to always_inline
+ functions: they will necessarily be inlined into another strub
+ function, and will thus be integrated into the caller's stack
+ frame, whatever the mode. (Contrast with non-always_inline strub
+ functions: an at-calls function can be called from other strub
+ functions, ensuring no discontinuity in stack erasing, whereas an
+ internal-strub function can only be called from other strub
+ functions if it happens to be inlined, or if -fstrub=relaxed mode
+ is in effect (that's the default). In -fstrub=strict mode,
+ internal-strub functions are not callable from strub functions,
+ because the wrapper itself is not strubbed.
+
+ The implementation involves two simple-IPA passes. The earliest
+ one, strub-mode, assigns strub modes to functions. It needs to run
+ before any inlining, so that we can prevent inlining of strub
+ functions into non-strub functions. It notes explicit strub mode
+ requests, enables strub in response to strub variables and testing
+ options, and flags unsatisfiable requests.
+
+ Three possibilities of unsatisfiable requests come to mind: (a)
+ when a strub mode is explicitly selected, but the function uses
+ features that make it ineligible for that mode (e.g. at-calls rules
+ out calling __builtin_apply_args, because of the interface changes,
+ and internal mode rules out noclone or otherwise non-versionable
+ functions, non-default varargs, non-local or forced labels, and
+ functions with far too many arguments); (b) when some strub mode
+ must be enabled because of a strub variable, but the function is
+ not eligible or not viable for any mode; and (c) when
+ -fstrub=strict is enabled, and calls are found in strub functions
+ to functions that are not callable from strub contexts.
+ compute_strub_mode implements (a) and (b), and verify_strub
+ implements (c).
+
+ The second IPA pass modifies interfaces of at-calls-strub functions
+ and types, introduces strub calls in and around them. and splits
+ internal-strub functions. It is placed after early inlining, so
+ that even internal-strub functions get a chance of being inlined
+ into other strub functions, but before non-early inlining, so that
+ internal-strub wrapper functions still get a chance of inlining
+ after splitting.
+
+ Wrappers avoid duplicating the copying of large arguments again by
+ passing them by reference to the wrapped bodies. This involves
+ occasional SSA rewriting of address computations, because of the
+ additional indirection. Besides these changes, and the
+ introduction of the stack watermark parameter, wrappers and wrapped
+ functions cooperate to handle variable argument lists (performing
+ va_start in the wrapper, passing the list as an argument, and
+ replacing va_start calls in the wrapped body with va_copy), and
+ __builtin_apply_args (also called in the wrapper and passed to the
+ wrapped body as an argument).
+
+ Strub bodies (both internal-mode wrapped bodies, and at-calls
+ functions) always start by adjusting the watermark parameter, by
+ calling __builtin___strub_update. The compiler inserts them in the
+ main strub pass. Allocations of additional stack space for the
+ frame (__builtin_alloca) are also followed by watermark updates.
+ Stack space temporarily allocated to pass arguments to other
+ functions, released right after the call, is not regarded as part
+ of the frame. Around calls to them, i.e., in internal-mode
+ wrappers and at-calls callers (even calls through pointers), calls
+ to __builtin___strub_enter and __builtin___strub_leave are
+ inserted, the latter as a __finally block, so that it runs at
+ regular and exceptional exit paths. strub_enter only initializes
+ the stack watermark, and strub_leave is where the scrubbing takes
+ place, overwriting with zeros the stack space from the top of the
+ stack to the watermark.
+
+ These calls can be optimized in various cases. In
+ pass_ipa_strub::adjust_at_calls_call, for example, we enable
+ tail-calling and other optimized calls from one strub body to
+ another by passing on the watermark parameter. The builtins
+ themselves may undergo inline substitution during expansion,
+ dependign on optimization levels. This involves dealing with stack
+ red zones (when the builtins are called out-of-line, the red zone
+ cannot be used) and other ugly details related with inlining strub
+ bodies into other strub bodies (see expand_builtin_strub_update).
+ expand_builtin_strub_leave may even perform partial inline
+ substitution. */
+
+/* Const and pure functions that gain a watermark parameter for strub purposes
+ are still regarded as such, which may cause the inline expansions of the
+ __strub builtins to malfunction. Ideally, attribute "fn spec" would enable
+ us to inform the backend about requirements and side effects of the call, but
+ call_fusage building in calls.c:expand_call does not even look at
+ attr_fnspec, so we resort to asm loads and updates to attain an equivalent
+ effect. Once expand_call gains the ability to issue extra memory uses and
+ clobbers based on pure/const function's fnspec, we can define this to 1. */
+#define ATTR_FNSPEC_DECONST_WATERMARK 0
+
+enum strub_mode {
+ /* This mode denotes a regular function, that does not require stack
+ scrubbing (strubbing). It may call any other functions, but if
+ it calls AT_CALLS (or WRAPPED) ones, strubbing logic is
+ automatically introduced around those calls (the latter, by
+ inlining INTERNAL wrappers). */
+ STRUB_DISABLED = 0,
+
+ /* This denotes a function whose signature is (to be) modified to
+ take an extra parameter, for stack use annotation, and its
+ callers must initialize and pass that argument, and perform the
+ strubbing. Functions that are explicitly marked with attribute
+ strub must have the mark visible wherever the function is,
+ including aliases, and overriders and overriding methods.
+ Functions that are implicitly marked for strubbing, for accessing
+ variables explicitly marked as such, will only select this
+ strubbing method if they are internal to a translation unit. It
+ can only be inlined into other strubbing functions, i.e.,
+ STRUB_AT_CALLS or STRUB_WRAPPED. */
+ STRUB_AT_CALLS = 1,
+
+ /* This denotes a function that is to perform strubbing internally,
+ without any changes to its interface (the function is turned into
+ a strubbing wrapper, and its original body is moved to a separate
+ STRUB_WRAPPED function, with a modified interface). Functions
+ may be explicitly marked with attribute strub(2), and the
+ attribute must be visible at the point of definition. Functions
+ that are explicitly marked for strubbing, for accessing variables
+ explicitly marked as such, may select this strubbing mode if
+ their interface cannot change, e.g. because its interface is
+ visible to other translation units, directly, by indirection
+ (having its address taken), inheritance, etc. Functions that use
+ this method must not have the noclone attribute, nor the noipa
+ one. Functions marked as always_inline may select this mode, but
+ they are NOT wrapped, they remain unchanged, and are only inlined
+ into strubbed contexts. Once non-always_inline functions are
+ wrapped, the wrapper becomes STRUB_WRAPPER, and the wrapped becomes
+ STRUB_WRAPPED. */
+ STRUB_INTERNAL = 2,
+
+ /* This denotes a function whose stack is not strubbed, but that is
+ nevertheless explicitly or implicitly marked as callable from strubbing
+ functions. Normally, only STRUB_AT_CALLS (and STRUB_INTERNAL ->
+ STRUB_WRAPPED) functions can be called from strubbing contexts (bodies of
+ STRUB_AT_CALLS, STRUB_INTERNAL and STRUB_WRAPPED functions), but attribute
+ strub(3) enables other functions to be (indirectly) called from these
+ contexts. Some builtins and internal functions may be implicitly marked as
+ STRUB_CALLABLE. */
+ STRUB_CALLABLE = 3,
+
+ /* This denotes the function that took over the body of a
+ STRUB_INTERNAL function. At first, it's only called by its
+ wrapper, but the wrapper may be inlined. The wrapped function,
+ in turn, can only be inlined into other functions whose stack
+ frames are strubbed, i.e., that are STRUB_WRAPPED or
+ STRUB_AT_CALLS. */
+ STRUB_WRAPPED = -1,
+
+ /* This denotes the wrapper function that replaced the STRUB_INTERNAL
+ function. This mode overrides the STRUB_INTERNAL mode at the time the
+ internal to-be-wrapped function becomes a wrapper, so that inlining logic
+ can tell one from the other. */
+ STRUB_WRAPPER = -2,
+
+ /* This denotes an always_inline function that requires strubbing. It can
+ only be called from, and inlined into, other strubbing contexts. */
+ STRUB_INLINABLE = -3,
+
+ /* This denotes a function that accesses strub variables, so it would call for
+ internal strubbing (whether or not it's eligible for that), but since
+ at-calls strubbing is viable, that's selected as an optimization. This
+ mode addresses the inconvenience that such functions may have different
+ modes selected depending on optimization flags, and get a different
+ callable status depending on that choice: if we assigned them
+ STRUB_AT_CALLS mode, they would be callable when optimizing, whereas
+ STRUB_INTERNAL would not be callable. */
+ STRUB_AT_CALLS_OPT = -4,
+
+};
+
+/* Look up a strub attribute in TYPE, and return it. */
+
+static tree
+get_strub_attr_from_type (tree type)
+{
+ return lookup_attribute ("strub", TYPE_ATTRIBUTES (type));
+}
+
+/* Look up a strub attribute in DECL or in its type, and return it. */
+
+static tree
+get_strub_attr_from_decl (tree decl)
+{
+ tree ret = lookup_attribute ("strub", DECL_ATTRIBUTES (decl));
+ if (ret)
+ return ret;
+ return get_strub_attr_from_type (TREE_TYPE (decl));
+}
+
+#define STRUB_ID_COUNT 8
+#define STRUB_IDENT_COUNT 3
+#define STRUB_TYPE_COUNT 5
+
+#define STRUB_ID_BASE 0
+#define STRUB_IDENT_BASE (STRUB_ID_BASE + STRUB_ID_COUNT)
+#define STRUB_TYPE_BASE (STRUB_IDENT_BASE + STRUB_IDENT_COUNT)
+#define STRUB_CACHE_SIZE (STRUB_TYPE_BASE + STRUB_TYPE_COUNT)
+
+/* Keep the strub mode and temp identifiers and types from being GC'd. */
+static GTY((deletable)) tree strub_cache[STRUB_CACHE_SIZE];
+
+/* Define a function to cache identifier ID, to be used as a strub attribute
+ parameter for a strub mode named after NAME. */
+#define DEF_STRUB_IDS(IDX, NAME, ID) \
+static inline tree get_strub_mode_id_ ## NAME () { \
+ int idx = STRUB_ID_BASE + IDX; \
+ tree identifier = strub_cache[idx]; \
+ if (!identifier) \
+ strub_cache[idx] = identifier = get_identifier (ID); \
+ return identifier; \
+}
+/* Same as DEF_STRUB_IDS, but use the string expansion of NAME as ID. */
+#define DEF_STRUB_ID(IDX, NAME) \
+ DEF_STRUB_IDS (IDX, NAME, #NAME)
+
+/* Define functions for each of the strub mode identifiers.
+ Expose dashes rather than underscores. */
+DEF_STRUB_ID (0, disabled)
+DEF_STRUB_IDS (1, at_calls, "at-calls")
+DEF_STRUB_ID (2, internal)
+DEF_STRUB_ID (3, callable)
+DEF_STRUB_ID (4, wrapped)
+DEF_STRUB_ID (5, wrapper)
+DEF_STRUB_ID (6, inlinable)
+DEF_STRUB_IDS (7, at_calls_opt, "at-calls-opt")
+
+/* Release the temporary macro names. */
+#undef DEF_STRUB_IDS
+#undef DEF_STRUB_ID
+
+/* Return the identifier corresponding to strub MODE. */
+
+static tree
+get_strub_mode_attr_parm (enum strub_mode mode)
+{
+ switch (mode)
+ {
+ case STRUB_DISABLED:
+ return get_strub_mode_id_disabled ();
+
+ case STRUB_AT_CALLS:
+ return get_strub_mode_id_at_calls ();
+
+ case STRUB_INTERNAL:
+ return get_strub_mode_id_internal ();
+
+ case STRUB_CALLABLE:
+ return get_strub_mode_id_callable ();
+
+ case STRUB_WRAPPED:
+ return get_strub_mode_id_wrapped ();
+
+ case STRUB_WRAPPER:
+ return get_strub_mode_id_wrapper ();
+
+ case STRUB_INLINABLE:
+ return get_strub_mode_id_inlinable ();
+
+ case STRUB_AT_CALLS_OPT:
+ return get_strub_mode_id_at_calls_opt ();
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return the parmeters (TREE_VALUE) for a strub attribute of MODE.
+ We know we use a single parameter, so we bypass the creation of a
+ tree list. */
+
+static tree
+get_strub_mode_attr_value (enum strub_mode mode)
+{
+ return get_strub_mode_attr_parm (mode);
+}
+
+/* Determine whether ID is a well-formed strub mode-specifying attribute
+ parameter for a function (type). Only user-visible modes are accepted, and
+ ID must be non-NULL.
+
+ For unacceptable parms, return 0, otherwise a nonzero value as below.
+
+ If the parm enables strub, return positive, otherwise negative.
+
+ If the affected type must be a distinct, incompatible type,return an integer
+ of absolute value 2, otherwise 1. */
+
+int
+strub_validate_fn_attr_parm (tree id)
+{
+ int ret;
+ const char *s = NULL;
+ size_t len = 0;
+
+ /* do NOT test for NULL. This is only to be called with non-NULL arguments.
+ We assume that the strub parameter applies to a function, because only
+ functions accept an explicit argument. If we accepted NULL, and we
+ happened to be called to verify the argument for a variable, our return
+ values would be wrong. */
+ if (TREE_CODE (id) == STRING_CST)
+ {
+ s = TREE_STRING_POINTER (id);
+ len = TREE_STRING_LENGTH (id) - 1;
+ }
+ else if (TREE_CODE (id) == IDENTIFIER_NODE)
+ {
+ s = IDENTIFIER_POINTER (id);
+ len = IDENTIFIER_LENGTH (id);
+ }
+ else
+ return 0;
+
+ enum strub_mode mode;
+
+ if (len != 8)
+ return 0;
+
+ switch (s[0])
+ {
+ case 'd':
+ mode = STRUB_DISABLED;
+ ret = -1;
+ break;
+
+ case 'a':
+ mode = STRUB_AT_CALLS;
+ ret = 2;
+ break;
+
+ case 'i':
+ mode = STRUB_INTERNAL;
+ ret = 1;
+ break;
+
+ case 'c':
+ mode = STRUB_CALLABLE;
+ ret = -2;
+ break;
+
+ default:
+ /* Other parms are for internal use only. */
+ return 0;
+ }
+
+ tree mode_id = get_strub_mode_attr_parm (mode);
+
+ if (TREE_CODE (id) == IDENTIFIER_NODE
+ ? id != mode_id
+ : strncmp (s, IDENTIFIER_POINTER (mode_id), len) != 0)
+ return 0;
+
+ return ret;
+}
+
+/* Return the strub mode from STRUB_ATTR. VAR_P should be TRUE if the attribute
+ is taken from a variable, rather than from a function, or a type thereof. */
+
+static enum strub_mode
+get_strub_mode_from_attr (tree strub_attr, bool var_p = false)
+{
+ enum strub_mode mode = STRUB_DISABLED;
+
+ if (strub_attr)
+ {
+ if (!TREE_VALUE (strub_attr))
+ mode = !var_p ? STRUB_AT_CALLS : STRUB_INTERNAL;
+ else
+ {
+ gcc_checking_assert (!var_p);
+ tree id = TREE_VALUE (strub_attr);
+ if (TREE_CODE (id) == TREE_LIST)
+ id = TREE_VALUE (id);
+ const char *s = (TREE_CODE (id) == STRING_CST
+ ? TREE_STRING_POINTER (id)
+ : IDENTIFIER_POINTER (id));
+ size_t len = (TREE_CODE (id) == STRING_CST
+ ? TREE_STRING_LENGTH (id) - 1
+ : IDENTIFIER_LENGTH (id));
+
+ switch (len)
+ {
+ case 7:
+ switch (s[6])
+ {
+ case 'r':
+ mode = STRUB_WRAPPER;
+ break;
+
+ case 'd':
+ mode = STRUB_WRAPPED;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case 8:
+ switch (s[0])
+ {
+ case 'd':
+ mode = STRUB_DISABLED;
+ break;
+
+ case 'a':
+ mode = STRUB_AT_CALLS;
+ break;
+
+ case 'i':
+ mode = STRUB_INTERNAL;
+ break;
+
+ case 'c':
+ mode = STRUB_CALLABLE;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case 9:
+ mode = STRUB_INLINABLE;
+ break;
+
+ case 12:
+ mode = STRUB_AT_CALLS_OPT;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ gcc_checking_assert (TREE_CODE (id) == IDENTIFIER_NODE
+ ? id == get_strub_mode_attr_parm (mode)
+ : strncmp (IDENTIFIER_POINTER
+ (get_strub_mode_attr_parm (mode)),
+ s, len) == 0);
+ }
+ }
+
+ return mode;
+}
+
+/* Look up, decode and return the strub mode associated with FNDECL. */
+
+static enum strub_mode
+get_strub_mode_from_fndecl (tree fndecl)
+{
+ return get_strub_mode_from_attr (get_strub_attr_from_decl (fndecl));
+}
+
+/* Look up, decode and return the strub mode associated with NODE. */
+
+static enum strub_mode
+get_strub_mode (cgraph_node *node)
+{
+ return get_strub_mode_from_fndecl (node->decl);
+}
+
+/* Look up, decode and return the strub mode associated with TYPE. */
+
+static enum strub_mode
+get_strub_mode_from_type (tree type)
+{
+ bool var_p = !FUNC_OR_METHOD_TYPE_P (type);
+ tree attr = get_strub_attr_from_type (type);
+
+ if (attr)
+ return get_strub_mode_from_attr (attr, var_p);
+
+ if (flag_strub >= -1 && !var_p)
+ return STRUB_CALLABLE;
+
+ return STRUB_DISABLED;
+}
+
+
+/* Return TRUE iff NODE calls builtin va_start. */
+
+static bool
+calls_builtin_va_start_p (cgraph_node *node)
+{
+ bool result = false;
+
+ for (cgraph_edge *e = node->callees; e; e = e->next_callee)
+ {
+ tree cdecl = e->callee->decl;
+ if (fndecl_built_in_p (cdecl, BUILT_IN_VA_START))
+ return true;
+ }
+
+ return result;
+}
+
+/* Return TRUE iff NODE calls builtin apply_args, and optionally REPORT it. */
+
+static bool
+calls_builtin_apply_args_p (cgraph_node *node, bool report = false)
+{
+ bool result = false;
+
+ for (cgraph_edge *e = node->callees; e; e = e->next_callee)
+ {
+ tree cdecl = e->callee->decl;
+ if (!fndecl_built_in_p (cdecl, BUILT_IN_APPLY_ARGS))
+ continue;
+
+ result = true;
+
+ if (!report)
+ break;
+
+ sorry_at (e->call_stmt
+ ? gimple_location (e->call_stmt)
+ : DECL_SOURCE_LOCATION (node->decl),
+ "at-calls %<strub%> does not support call to %qD",
+ cdecl);
+ }
+
+ return result;
+}
+
+/* Return TRUE iff NODE carries the always_inline attribute. */
+
+static inline bool
+strub_always_inline_p (cgraph_node *node)
+{
+ return lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl));
+}
+
+/* Return TRUE iff NODE is potentially eligible for any strub-enabled mode, and
+ optionally REPORT the reasons for ineligibility. */
+
+static inline bool
+can_strub_p (cgraph_node *node, bool report = false)
+{
+ bool result = true;
+
+ if (!report && strub_always_inline_p (node))
+ return result;
+
+ if (lookup_attribute ("noipa", DECL_ATTRIBUTES (node->decl)))
+ {
+ result = false;
+
+ if (!report)
+ return result;
+
+ sorry_at (DECL_SOURCE_LOCATION (node->decl),
+ "%qD is not eligible for %<strub%>"
+ " because of attribute %<noipa%>",
+ node->decl);
+ }
+
+ /* We can't, and don't want to vectorize the watermark and other
+ strub-introduced parms. */
+ if (lookup_attribute ("simd", DECL_ATTRIBUTES (node->decl)))
+ {
+ result = false;
+
+ if (!report)
+ return result;
+
+ sorry_at (DECL_SOURCE_LOCATION (node->decl),
+ "%qD is not eligible for %<strub%>"
+ " because of attribute %<simd%>",
+ node->decl);
+ }
+
+ return result;
+}
+
+/* Return TRUE iff NODE is eligible for at-calls strub, and optionally REPORT
+ the reasons for ineligibility. Besides general non-eligibility for
+ strub-enabled modes, at-calls rules out calling builtin apply_args. */
+
+static bool
+can_strub_at_calls_p (cgraph_node *node, bool report = false)
+{
+ bool result = !report || can_strub_p (node, report);
+
+ if (!result && !report)
+ return result;
+
+ return !calls_builtin_apply_args_p (node, report);
+}
+
+/* Return TRUE iff the called function (pointer or, if available,
+ decl) undergoes a significant type conversion for the call. Strub
+ mode changes between function types, and other non-useless type
+ conversions, are regarded as significant. When the function type
+ is overridden, the effective strub mode for the call is that of the
+ call fntype, rather than that of the pointer or of the decl.
+ Functions called with type overrides cannot undergo type changes;
+ it's as if their address was taken, so they're considered
+ non-viable for implicit at-calls strub mode. */
+
+static inline bool
+strub_call_fntype_override_p (const gcall *gs)
+{
+ if (gimple_call_internal_p (gs))
+ return false;
+ tree fn_type = TREE_TYPE (TREE_TYPE (gimple_call_fn (gs)));
+ if (tree decl = gimple_call_fndecl (gs))
+ fn_type = TREE_TYPE (decl);
+
+ /* We do NOT want to take the mode from the decl here. This
+ function is used to tell whether we can change the strub mode of
+ a function, and whether the effective mode for the call is to be
+ taken from the decl or from an overrider type. When the strub
+ mode is explicitly declared, or overridden with a type cast, the
+ difference will be noticed in function types. However, if the
+ strub mode is implicit due to e.g. strub variables or -fstrub=*
+ command-line flags, we will adjust call types along with function
+ types. In either case, the presence of type or strub mode
+ overriders in calls will prevent a function from having its strub
+ modes changed in ways that would imply type changes, but taking
+ strub modes from decls would defeat this, since we set strub
+ modes and then call this function to tell whether the original
+ type was overridden to decide whether to adjust the call. We
+ need the answer to be about the type, not the decl. */
+ enum strub_mode mode = get_strub_mode_from_type (fn_type);
+ return (get_strub_mode_from_type (gs->u.fntype) != mode
+ || !useless_type_conversion_p (gs->u.fntype, fn_type));
+}
+
+/* Return TRUE iff NODE is called directly with a type override. */
+
+static bool
+called_directly_with_type_override_p (cgraph_node *node, void *)
+{
+ for (cgraph_edge *e = node->callers; e; e = e->next_caller)
+ if (e->call_stmt && strub_call_fntype_override_p (e->call_stmt))
+ return true;
+
+ return false;
+}
+
+/* Return TRUE iff NODE or any other nodes aliased to it are called
+ with type overrides. We can't safely change the type of such
+ functions. */
+
+static bool
+called_with_type_override_p (cgraph_node *node)
+{
+ return (node->call_for_symbol_thunks_and_aliases
+ (called_directly_with_type_override_p, NULL, true, true));
+}
+
+/* Symbolic macro for the max number of arguments that internal strub may add to
+ a function. */
+
+#define STRUB_INTERNAL_MAX_EXTRA_ARGS 3
+
+/* We can't perform internal strubbing if the function body involves certain
+ features:
+
+ - a non-default __builtin_va_start (e.g. x86's __builtin_ms_va_start) is
+ currently unsupported because we can't discover the corresponding va_copy and
+ va_end decls in the wrapper, and we don't convey the alternate variable
+ arguments ABI to the modified wrapped function. The default
+ __builtin_va_start is supported by calling va_start/va_end at the wrapper,
+ that takes variable arguments, passing a pointer to the va_list object to the
+ wrapped function, that runs va_copy from it where the original function ran
+ va_start.
+
+ __builtin_next_arg is currently unsupported because the wrapped function
+ won't be a variable argument function. We could process it in the wrapper,
+ that remains a variable argument function, and replace calls in the wrapped
+ body, but we currently don't.
+
+ __builtin_return_address is rejected because it's generally used when the
+ actual caller matters, and introducing a wrapper breaks such uses as those in
+ the unwinder. */
+
+static bool
+can_strub_internally_p (cgraph_node *node, bool report = false)
+{
+ bool result = !report || can_strub_p (node, report);
+
+ if (!result && !report)
+ return result;
+
+ if (!report && strub_always_inline_p (node))
+ return result;
+
+ /* Since we're not changing the function identity proper, just
+ moving its full implementation, we *could* disable
+ fun->cannot_be_copied_reason and/or temporarily drop a noclone
+ attribute, but we'd have to prevent remapping of the labels. */
+ if (lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
+ {
+ result = false;
+
+ if (!report)
+ return result;
+
+ sorry_at (DECL_SOURCE_LOCATION (node->decl),
+ "%qD is not eligible for internal %<strub%>"
+ " because of attribute %<noclone%>",
+ node->decl);
+ }
+
+ if (node->has_gimple_body_p ())
+ {
+ for (cgraph_edge *e = node->callees; e; e = e->next_callee)
+ {
+ tree cdecl = e->callee->decl;
+ if (!((fndecl_built_in_p (cdecl, BUILT_IN_VA_START)
+ && cdecl != builtin_decl_explicit (BUILT_IN_VA_START))
+ || fndecl_built_in_p (cdecl, BUILT_IN_NEXT_ARG)
+ || fndecl_built_in_p (cdecl, BUILT_IN_RETURN_ADDRESS)))
+ continue;
+
+ result = false;
+
+ if (!report)
+ return result;
+
+ sorry_at (e->call_stmt
+ ? gimple_location (e->call_stmt)
+ : DECL_SOURCE_LOCATION (node->decl),
+ "%qD is not eligible for internal %<strub%> "
+ "because it calls %qD",
+ node->decl, cdecl);
+ }
+
+ struct function *fun = DECL_STRUCT_FUNCTION (node->decl);
+ if (fun->has_nonlocal_label)
+ {
+ result = false;
+
+ if (!report)
+ return result;
+
+ sorry_at (DECL_SOURCE_LOCATION (node->decl),
+ "%qD is not eligible for internal %<strub%> "
+ "because it contains a non-local goto target",
+ node->decl);
+ }
+
+ if (fun->has_forced_label_in_static)
+ {
+ result = false;
+
+ if (!report)
+ return result;
+
+ sorry_at (DECL_SOURCE_LOCATION (node->decl),
+ "%qD is not eligible for internal %<strub%> "
+ "because the address of a local label escapes",
+ node->decl);
+ }
+
+ /* Catch any other case that would prevent versioning/cloning
+ so as to also have it covered above. */
+ gcc_checking_assert (!result /* || !node->has_gimple_body_p () */
+ || tree_versionable_function_p (node->decl));
+
+
+ /* Label values references are not preserved when copying. If referenced
+ in nested functions, as in 920415-1.c and 920721-4.c their decls get
+ remapped independently. The exclusion below might be too broad, in
+ that we might be able to support correctly cases in which the labels
+ are only used internally in a function, but disconnecting forced labels
+ from their original declarations is undesirable in general. */
+ basic_block bb;
+ FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
+ tree target;
+
+ if (!label_stmt)
+ break;
+
+ target = gimple_label_label (label_stmt);
+
+ if (!FORCED_LABEL (target))
+ continue;
+
+ result = false;
+
+ if (!report)
+ return result;
+
+ sorry_at (gimple_location (label_stmt),
+ "internal %<strub%> does not support forced labels");
+ }
+ }
+
+ if (list_length (TYPE_ARG_TYPES (TREE_TYPE (node->decl)))
+ >= (((HOST_WIDE_INT) 1 << IPA_PARAM_MAX_INDEX_BITS)
+ - STRUB_INTERNAL_MAX_EXTRA_ARGS))
+ {
+ result = false;
+
+ if (!report)
+ return result;
+
+ sorry_at (DECL_SOURCE_LOCATION (node->decl),
+ "%qD has too many arguments for internal %<strub%>",
+ node->decl);
+ }
+
+ return result;
+}
+
+/* Return TRUE iff NODE has any strub-requiring local variable, or accesses (as
+ in reading) any variable through a strub-requiring type. */
+
+static bool
+strub_from_body_p (cgraph_node *node)
+{
+ if (!node->has_gimple_body_p ())
+ return false;
+
+ /* If any local variable is marked for strub... */
+ unsigned i;
+ tree var;
+ FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (node->decl),
+ i, var)
+ if (get_strub_mode_from_type (TREE_TYPE (var))
+ != STRUB_DISABLED)
+ return true;
+
+ /* Now scan the body for loads with strub-requiring types.
+ ??? Compound types don't propagate the strub requirement to
+ component types. */
+ basic_block bb;
+ FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+
+ if (!gimple_assign_load_p (stmt))
+ continue;
+
+ tree rhs = gimple_assign_rhs1 (stmt);
+ if (get_strub_mode_from_type (TREE_TYPE (rhs))
+ != STRUB_DISABLED)
+ return true;
+ }
+
+ return false;
+}
+
+/* Return TRUE iff node is associated with a builtin that should be callable
+ from strub contexts. */
+
+static inline bool
+strub_callable_builtin_p (cgraph_node *node)
+{
+ if (DECL_BUILT_IN_CLASS (node->decl) != BUILT_IN_NORMAL)
+ return false;
+
+ enum built_in_function fcode = DECL_FUNCTION_CODE (node->decl);
+
+ switch (fcode)
+ {
+ case BUILT_IN_NONE:
+ gcc_unreachable ();
+
+ /* This temporarily allocates stack for the call, and we can't reasonably
+ update the watermark for that. Besides, we don't check the actual call
+ target, nor its signature, and it seems to be overkill to as much as
+ try to do so. */
+ case BUILT_IN_APPLY:
+ return false;
+
+ /* Conversely, this shouldn't be called from within strub contexts, since
+ the caller may have had its signature modified. STRUB_INTERNAL is ok,
+ the call will remain in the STRUB_WRAPPER, and removed from the
+ STRUB_WRAPPED clone. */
+ case BUILT_IN_APPLY_ARGS:
+ return false;
+
+ /* ??? Make all other builtins callable. We wish to make any builtin call
+ the compiler might introduce on its own callable. Anything that is
+ predictable enough as to be known not to allow stack data that should
+ be strubbed to unintentionally escape to non-strub contexts can be
+ allowed, and pretty much every builtin appears to fit this description.
+ The exceptions to this rule seem to be rare, and only available as
+ explicit __builtin calls, so let's keep it simple and allow all of
+ them... */
+ default:
+ return true;
+ }
+}
+
+/* Compute the strub mode to be used for NODE. STRUB_ATTR should be the strub
+ attribute,found for NODE, if any. */
+
+static enum strub_mode
+compute_strub_mode (cgraph_node *node, tree strub_attr)
+{
+ enum strub_mode req_mode = get_strub_mode_from_attr (strub_attr);
+
+ gcc_checking_assert (flag_strub >= -2 && flag_strub <= 3);
+
+ /* Symbolic encodings of the -fstrub-* flags. */
+ /* Enable strub when explicitly requested through attributes to functions or
+ variables, reporting errors if the requests cannot be satisfied. */
+ const bool strub_flag_auto = flag_strub < 0;
+ /* strub_flag_auto with strub call verification; without this, functions are
+ implicitly callable. */
+ const bool strub_flag_strict = flag_strub < -1;
+ /* Disable strub altogether, ignore attributes entirely. */
+ const bool strub_flag_disabled = flag_strub == 0;
+ /* On top of _auto, also enable strub implicitly for functions that can
+ safely undergo at-calls strubbing. Internal mode will still be used in
+ functions that request it explicitly with attribute strub(2), or when the
+ function body requires strubbing and at-calls strubbing is not viable. */
+ const bool strub_flag_at_calls = flag_strub == 1;
+ /* On top of default, also enable strub implicitly for functions that can
+ safely undergo internal strubbing. At-calls mode will still be used in
+ functions that requiest it explicitly with attribute strub() or strub(1),
+ or when the function body requires strubbing and internal strubbing is not
+ viable. */
+ const bool strub_flag_internal = flag_strub == 2;
+ /* On top of default, also enable strub implicitly for functions that can
+ safely undergo strubbing in either mode. When both modes are viable,
+ at-calls is preferred. */
+ const bool strub_flag_either = flag_strub == 3;
+ /* Besides the default behavior, enable strub implicitly for all viable
+ functions. */
+ const bool strub_flag_viable = flag_strub > 0;
+
+ /* The consider_* variables should be TRUE if selecting the corresponding
+ strub modes would be consistent with requests from attributes and command
+ line flags. Attributes associated with functions pretty much mandate a
+ selection, and should report an error if not satisfied; strub_flag_auto
+ implicitly enables some viable strub mode if that's required by references
+ to variables marked for strub; strub_flag_viable enables strub if viable
+ (even when favoring one mode, body-requested strub can still be satisfied
+ by either mode), and falls back to callable, silently unless variables
+ require strubbing. */
+
+ const bool consider_at_calls
+ = (!strub_flag_disabled
+ && (strub_attr
+ ? req_mode == STRUB_AT_CALLS
+ : true));
+ const bool consider_internal
+ = (!strub_flag_disabled
+ && (strub_attr
+ ? req_mode == STRUB_INTERNAL
+ : true));
+
+ const bool consider_callable
+ = (!strub_flag_disabled
+ && (strub_attr
+ ? req_mode == STRUB_CALLABLE
+ : (!strub_flag_strict
+ || strub_callable_builtin_p (node))));
+
+ /* This is a shorthand for either strub-enabled mode. */
+ const bool consider_strub
+ = (consider_at_calls || consider_internal);
+
+ /* We can cope with always_inline functions even with noipa and noclone,
+ because we just leave them alone. */
+ const bool is_always_inline
+ = strub_always_inline_p (node);
+
+ /* Strubbing in general, and each specific strub mode, may have its own set of
+ requirements. We require noipa for strubbing, either because of cloning
+ required for internal strub, or because of caller enumeration required for
+ at-calls strub. We don't consider the at-calls mode eligible if it's not
+ even considered, it has no further requirements. Internal mode requires
+ cloning and the absence of certain features in the body and, like at-calls,
+ it's not eligible if it's not even under consideration.
+
+ ??? Do we need target hooks for further constraints? E.g., x86's
+ "interrupt" attribute breaks internal strubbing because the wrapped clone
+ carries the attribute and thus isn't callable; in this case, we could use a
+ target hook to adjust the clone instead. */
+ const bool strub_eligible
+ = (consider_strub
+ && (is_always_inline || can_strub_p (node)));
+ const bool at_calls_eligible
+ = (consider_at_calls && strub_eligible
+ && can_strub_at_calls_p (node));
+ const bool internal_eligible
+ = (consider_internal && strub_eligible
+ && (is_always_inline
+ || can_strub_internally_p (node)));
+
+ /* In addition to the strict eligibility requirements, some additional
+ constraints are placed on implicit selection of certain modes. These do
+ not prevent the selection of a mode if explicitly specified as part of a
+ function interface (the strub attribute), but they may prevent modes from
+ being selected by the command line or by function bodies. The only actual
+ constraint is on at-calls mode: since we change the function's exposed
+ signature, we won't do it implicitly if the function can possibly be used
+ in ways that do not expect the signature change, e.g., if the function is
+ available to or interposable by other units, if its address is taken,
+ etc. */
+ const bool at_calls_viable
+ = (at_calls_eligible
+ && (strub_attr
+ || (node->has_gimple_body_p ()
+ && (!node->externally_visible
+ || (node->binds_to_current_def_p ()
+ && node->can_be_local_p ()))
+ && node->only_called_directly_p ()
+ && !called_with_type_override_p (node))));
+ const bool internal_viable
+ = (internal_eligible);
+
+ /* Shorthand. */
+ const bool strub_viable
+ = (at_calls_viable || internal_viable);
+
+ /* We wish to analyze the body, to look for implicit requests for strub, both
+ to implicitly enable it when the body calls for it, and to report errors if
+ the body calls for it but neither mode is viable (even if that follows from
+ non-eligibility because of the explicit specification of some non-strubbing
+ mode). We can refrain from scanning the body only in rare circumstances:
+ when strub is enabled by a function attribute (scanning might be redundant
+ in telling us to also enable it), and when we are enabling strub implicitly
+ but there are non-viable modes: we want to know whether strubbing is
+ required, to fallback to another mode, even if we're only enabling a
+ certain mode, or, when either mode would do, to report an error if neither
+ happens to be viable. */
+ const bool analyze_body
+ = (strub_attr
+ ? !consider_strub
+ : (strub_flag_auto
+ || (strub_flag_viable && (!at_calls_viable && !internal_viable))
+ || (strub_flag_either && !strub_viable)));
+
+ /* Cases in which strubbing is enabled or disabled by strub_flag_auto.
+ Unsatisfiable requests ought to be reported. */
+ const bool strub_required
+ = ((strub_attr && consider_strub)
+ || (analyze_body && strub_from_body_p (node)));
+
+ /* Besides the required cases, we want to abide by the requests to enabling on
+ an if-viable basis. */
+ const bool strub_enable
+ = (strub_required
+ || (strub_flag_at_calls && at_calls_viable)
+ || (strub_flag_internal && internal_viable)
+ || (strub_flag_either && strub_viable));
+
+ /* And now we're finally ready to select a mode that abides by the viability
+ and eligibility constraints, and that satisfies the strubbing requirements
+ and requests, subject to the constraints. If both modes are viable and
+ strub is to be enabled, pick STRUB_AT_CALLS unless STRUB_INTERNAL was named
+ as preferred. */
+ const enum strub_mode mode
+ = ((strub_enable && is_always_inline)
+ ? (strub_required ? STRUB_INLINABLE : STRUB_CALLABLE)
+ : (strub_enable && internal_viable
+ && (strub_flag_internal || !at_calls_viable))
+ ? STRUB_INTERNAL
+ : (strub_enable && at_calls_viable)
+ ? (strub_required && !strub_attr
+ ? STRUB_AT_CALLS_OPT
+ : STRUB_AT_CALLS)
+ : consider_callable
+ ? STRUB_CALLABLE
+ : STRUB_DISABLED);
+
+ switch (mode)
+ {
+ case STRUB_CALLABLE:
+ if (is_always_inline)
+ break;
+ /* Fall through. */
+
+ case STRUB_DISABLED:
+ if (strub_enable && !strub_attr)
+ {
+ gcc_checking_assert (analyze_body);
+ error_at (DECL_SOURCE_LOCATION (node->decl),
+ "%qD requires %<strub%>,"
+ " but no viable %<strub%> mode was found",
+ node->decl);
+ break;
+ }
+ /* Fall through. */
+
+ case STRUB_AT_CALLS:
+ case STRUB_INTERNAL:
+ case STRUB_INLINABLE:
+ /* Differences from an mode requested through a function attribute are
+ reported in set_strub_mode_to. */
+ break;
+
+ case STRUB_AT_CALLS_OPT:
+ /* Functions that select this mode do so because of references to strub
+ variables. Even if we choose at-calls as an optimization, the
+ requirements for internal strub must still be satisfied. Optimization
+ options may render implicit at-calls strub not viable (-O0 sets
+ force_output for static non-inline functions), and it would not be good
+ if changing optimization options turned a well-formed into an
+ ill-formed one. */
+ if (!internal_viable)
+ can_strub_internally_p (node, true);
+ break;
+
+ case STRUB_WRAPPED:
+ case STRUB_WRAPPER:
+ default:
+ gcc_unreachable ();
+ }
+
+ return mode;
+}
+
+/* Set FNDT's strub mode to MODE; FNDT may be a function decl or
+ function type. If OVERRIDE, do not check whether a mode is already
+ set. */
+
+static void
+strub_set_fndt_mode_to (tree fndt, enum strub_mode mode, bool override)
+{
+ gcc_checking_assert (override
+ || !(DECL_P (fndt)
+ ? get_strub_attr_from_decl (fndt)
+ : get_strub_attr_from_type (fndt)));
+
+ tree attr = tree_cons (get_identifier ("strub"),
+ get_strub_mode_attr_value (mode),
+ NULL_TREE);
+ tree *attrp = NULL;
+ if (DECL_P (fndt))
+ {
+ gcc_checking_assert (FUNC_OR_METHOD_TYPE_P (TREE_TYPE (fndt)));
+ attrp = &DECL_ATTRIBUTES (fndt);
+ }
+ else if (FUNC_OR_METHOD_TYPE_P (fndt))
+ attrp = &TYPE_ATTRIBUTES (fndt);
+ else
+ gcc_unreachable ();
+
+ TREE_CHAIN (attr) = *attrp;
+ *attrp = attr;
+}
+
+/* Set FNDT's strub mode to callable.
+ FNDT may be a function decl or a function type. */
+
+void
+strub_make_callable (tree fndt)
+{
+ strub_set_fndt_mode_to (fndt, STRUB_CALLABLE, false);
+}
+
+/* Set NODE to strub MODE. Report incompatibilities between MODE and the mode
+ requested through explicit attributes, and cases of non-eligibility. */
+
+static void
+set_strub_mode_to (cgraph_node *node, enum strub_mode mode)
+{
+ tree attr = get_strub_attr_from_decl (node->decl);
+ enum strub_mode req_mode = get_strub_mode_from_attr (attr);
+
+ if (attr)
+ {
+ /* Check for and report incompatible mode changes. */
+ if (mode != req_mode
+ && !(req_mode == STRUB_INTERNAL
+ && (mode == STRUB_WRAPPED
+ || mode == STRUB_WRAPPER))
+ && !((req_mode == STRUB_INTERNAL
+ || req_mode == STRUB_AT_CALLS
+ || req_mode == STRUB_CALLABLE)
+ && mode == STRUB_INLINABLE))
+ {
+ error_at (DECL_SOURCE_LOCATION (node->decl),
+ "%<strub%> mode %qE selected for %qD, when %qE was requested",
+ get_strub_mode_attr_parm (mode),
+ node->decl,
+ get_strub_mode_attr_parm (req_mode));
+ if (node->alias)
+ {
+ cgraph_node *target = node->ultimate_alias_target ();
+ if (target != node)
+ error_at (DECL_SOURCE_LOCATION (target->decl),
+ "the incompatible selection was determined"
+ " by ultimate alias target %qD",
+ target->decl);
+ }
+
+ /* Report any incompatibilities with explicitly-requested strub. */
+ switch (req_mode)
+ {
+ case STRUB_AT_CALLS:
+ can_strub_at_calls_p (node, true);
+ break;
+
+ case STRUB_INTERNAL:
+ can_strub_internally_p (node, true);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* Drop any incompatible strub attributes leading the decl attribute
+ chain. Return if we find one with the mode we need. */
+ for (;;)
+ {
+ if (mode == req_mode)
+ return;
+
+ if (DECL_ATTRIBUTES (node->decl) != attr)
+ break;
+
+ DECL_ATTRIBUTES (node->decl) = TREE_CHAIN (attr);
+ attr = get_strub_attr_from_decl (node->decl);
+ if (!attr)
+ break;
+
+ req_mode = get_strub_mode_from_attr (attr);
+ }
+ }
+ else if (mode == req_mode)
+ return;
+
+ strub_set_fndt_mode_to (node->decl, mode, attr);
+}
+
+/* Compute and set NODE's strub mode. */
+
+static void
+set_strub_mode (cgraph_node *node)
+{
+ tree attr = get_strub_attr_from_decl (node->decl);
+
+ if (attr)
+ switch (get_strub_mode_from_attr (attr))
+ {
+ /* These can't have been requested through user attributes, so we must
+ have already gone through them. */
+ case STRUB_WRAPPER:
+ case STRUB_WRAPPED:
+ case STRUB_INLINABLE:
+ case STRUB_AT_CALLS_OPT:
+ return;
+
+ case STRUB_DISABLED:
+ case STRUB_AT_CALLS:
+ case STRUB_INTERNAL:
+ case STRUB_CALLABLE:
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ cgraph_node *xnode = node;
+ if (node->alias)
+ xnode = node->ultimate_alias_target ();
+ /* Weakrefs may remain unresolved (the above will return node) if
+ their targets are not defined, so make sure we compute a strub
+ mode for them, instead of defaulting to STRUB_DISABLED and
+ rendering them uncallable. */
+ enum strub_mode mode = (xnode != node && !xnode->alias
+ ? get_strub_mode (xnode)
+ : compute_strub_mode (node, attr));
+
+ set_strub_mode_to (node, mode);
+}
+
+
+/* Non-strub functions shouldn't be called from within strub contexts,
+ except through callable ones. Always inline strub functions can
+ only be called from strub functions. */
+
+static bool
+strub_callable_from_p (strub_mode caller_mode, strub_mode callee_mode)
+{
+ switch (caller_mode)
+ {
+ case STRUB_WRAPPED:
+ case STRUB_AT_CALLS_OPT:
+ case STRUB_AT_CALLS:
+ case STRUB_INTERNAL:
+ case STRUB_INLINABLE:
+ break;
+
+ case STRUB_WRAPPER:
+ case STRUB_DISABLED:
+ case STRUB_CALLABLE:
+ return callee_mode != STRUB_INLINABLE;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ switch (callee_mode)
+ {
+ case STRUB_WRAPPED:
+ case STRUB_AT_CALLS:
+ case STRUB_INLINABLE:
+ break;
+
+ case STRUB_AT_CALLS_OPT:
+ case STRUB_INTERNAL:
+ case STRUB_WRAPPER:
+ return (flag_strub >= -1);
+
+ case STRUB_DISABLED:
+ return false;
+
+ case STRUB_CALLABLE:
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return true;
+}
+
+/* Return TRUE iff CALLEE can be inlined into CALLER. We wish to avoid inlining
+ WRAPPED functions back into their WRAPPERs. More generally, we wish to avoid
+ inlining strubbed functions into non-strubbed ones. CALLER doesn't have to
+ be an immediate caller of CALLEE: the immediate caller may have already been
+ cloned for inlining, and then CALLER may be further up the original call
+ chain. ??? It would be nice if our own caller would retry inlining callee
+ if caller gets inlined. */
+
+bool
+strub_inlinable_to_p (cgraph_node *callee, cgraph_node *caller)
+{
+ strub_mode callee_mode = get_strub_mode (callee);
+
+ switch (callee_mode)
+ {
+ case STRUB_WRAPPED:
+ case STRUB_AT_CALLS:
+ case STRUB_INTERNAL:
+ case STRUB_INLINABLE:
+ case STRUB_AT_CALLS_OPT:
+ break;
+
+ case STRUB_WRAPPER:
+ case STRUB_DISABLED:
+ case STRUB_CALLABLE:
+ /* When we consider inlining, we've already verified callability, so we
+ can even inline callable and then disabled into a strub context. That
+ will get strubbed along with the context, so it's hopefully not a
+ problem. */
+ return true;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ strub_mode caller_mode = get_strub_mode (caller);
+
+ switch (caller_mode)
+ {
+ case STRUB_WRAPPED:
+ case STRUB_AT_CALLS:
+ case STRUB_INTERNAL:
+ case STRUB_INLINABLE:
+ case STRUB_AT_CALLS_OPT:
+ return true;
+
+ case STRUB_WRAPPER:
+ case STRUB_DISABLED:
+ case STRUB_CALLABLE:
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return false;
+}
+
+/* Check that types T1 and T2 are strub-compatible. Return 1 if the strub modes
+ are the same, 2 if they are interchangeable, and 0 otherwise. */
+
+int
+strub_comptypes (tree t1, tree t2)
+{
+ if (TREE_CODE (t1) != TREE_CODE (t2))
+ return 0;
+
+ enum strub_mode m1 = get_strub_mode_from_type (t1);
+ enum strub_mode m2 = get_strub_mode_from_type (t2);
+
+ if (m1 == m2)
+ return 1;
+
+ /* We're dealing with types, so only strub modes that can be selected by
+ attributes in the front end matter. If either mode is at-calls (for
+ functions) or internal (for variables), the conversion is not
+ compatible. */
+ bool var_p = !FUNC_OR_METHOD_TYPE_P (t1);
+ enum strub_mode mr = var_p ? STRUB_INTERNAL : STRUB_AT_CALLS;
+ if (m1 == mr || m2 == mr)
+ return 0;
+
+ return 2;
+}
+
+/* Return the effective strub mode used for CALL, and set *TYPEP to
+ the effective type used for the call. The effective type and mode
+ are those of the callee, unless the call involves a typecast. */
+
+static enum strub_mode
+effective_strub_mode_for_call (gcall *call, tree *typep)
+{
+ tree type;
+ enum strub_mode mode;
+
+ if (strub_call_fntype_override_p (call))
+ {
+ type = gimple_call_fntype (call);
+ mode = get_strub_mode_from_type (type);
+ }
+ else
+ {
+ type = TREE_TYPE (TREE_TYPE (gimple_call_fn (call)));
+ tree decl = gimple_call_fndecl (call);
+ if (decl)
+ mode = get_strub_mode_from_fndecl (decl);
+ else
+ mode = get_strub_mode_from_type (type);
+ }
+
+ if (typep)
+ *typep = type;
+
+ return mode;
+}
+
+/* Create a distinct copy of the type of NODE's function, and change
+ the fntype of all calls to it with the same main type to the new
+ type. */
+
+static void
+distinctify_node_type (cgraph_node *node)
+{
+ tree old_type = TREE_TYPE (node->decl);
+ tree new_type = build_distinct_type_copy (old_type);
+ tree new_ptr_type = NULL_TREE;
+
+ /* Remap any calls to node->decl that use old_type, or a variant
+ thereof, to new_type as well. We don't look for aliases, their
+ declarations will have their types changed independently, and
+ we'll adjust their fntypes then. */
+ for (cgraph_edge *e = node->callers; e; e = e->next_caller)
+ {
+ if (!e->call_stmt)
+ continue;
+ tree fnaddr = gimple_call_fn (e->call_stmt);
+ gcc_checking_assert (TREE_CODE (fnaddr) == ADDR_EXPR
+ && TREE_OPERAND (fnaddr, 0) == node->decl);
+ if (strub_call_fntype_override_p (e->call_stmt))
+ continue;
+ if (!new_ptr_type)
+ new_ptr_type = build_pointer_type (new_type);
+ TREE_TYPE (fnaddr) = new_ptr_type;
+ gimple_call_set_fntype (e->call_stmt, new_type);
+ }
+
+ TREE_TYPE (node->decl) = new_type;
+}
+
+/* Return TRUE iff TYPE and any variants have the same strub mode. */
+
+static bool
+same_strub_mode_in_variants_p (tree type)
+{
+ enum strub_mode mode = get_strub_mode_from_type (type);
+
+ for (tree other = TYPE_MAIN_VARIANT (type);
+ other != NULL_TREE; other = TYPE_NEXT_VARIANT (other))
+ if (type != other && mode != get_strub_mode_from_type (other))
+ return false;
+
+ /* Check that the canonical type, if set, either is in the same
+ variant chain, or has the same strub mode as type. Also check
+ the variants of the canonical type. */
+ if (TYPE_CANONICAL (type)
+ && (TYPE_MAIN_VARIANT (TYPE_CANONICAL (type))
+ != TYPE_MAIN_VARIANT (type)))
+ {
+ if (mode != get_strub_mode_from_type (TYPE_CANONICAL (type)))
+ return false;
+ else
+ return same_strub_mode_in_variants_p (TYPE_CANONICAL (type));
+ }
+
+ return true;
+}
+
+/* Check that strub functions don't call non-strub functions, and that
+ always_inline strub functions are only called by strub
+ functions. */
+
+static void
+verify_strub ()
+{
+ cgraph_node *node;
+
+ /* It's expected that check strub-wise pointer type compatibility of variables
+ and of functions is already taken care of by front-ends, on account of the
+ attribute's being marked as affecting type identity and of the creation of
+ distinct types. */
+
+ /* Check that call targets in strub contexts have strub-callable types. */
+
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
+ {
+ enum strub_mode caller_mode = get_strub_mode (node);
+
+ for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
+ {
+ gcc_checking_assert (e->indirect_unknown_callee);
+
+ if (!e->call_stmt)
+ continue;
+
+ enum strub_mode callee_mode
+ = effective_strub_mode_for_call (e->call_stmt, NULL);
+
+ if (!strub_callable_from_p (caller_mode, callee_mode))
+ error_at (gimple_location (e->call_stmt),
+ "indirect non-%<strub%> call in %<strub%> context %qD",
+ node->decl);
+ }
+
+ for (cgraph_edge *e = node->callees; e; e = e->next_callee)
+ {
+ gcc_checking_assert (!e->indirect_unknown_callee);
+
+ if (!e->call_stmt)
+ continue;
+
+ tree callee_fntype;
+ enum strub_mode callee_mode
+ = effective_strub_mode_for_call (e->call_stmt, &callee_fntype);
+
+ if (!strub_callable_from_p (caller_mode, callee_mode))
+ {
+ if (callee_mode == STRUB_INLINABLE)
+ error_at (gimple_location (e->call_stmt),
+ "calling %<always_inline%> %<strub%> %qD"
+ " in non-%<strub%> context %qD",
+ e->callee->decl, node->decl);
+ else if (fndecl_built_in_p (e->callee->decl, BUILT_IN_APPLY_ARGS)
+ && caller_mode == STRUB_INTERNAL)
+ /* This is ok, it will be kept in the STRUB_WRAPPER, and removed
+ from the STRUB_WRAPPED's strub context. */
+ continue;
+ else if (!strub_call_fntype_override_p (e->call_stmt))
+ error_at (gimple_location (e->call_stmt),
+ "calling non-%<strub%> %qD in %<strub%> context %qD",
+ e->callee->decl, node->decl);
+ else
+ error_at (gimple_location (e->call_stmt),
+ "calling %qD using non-%<strub%> type %qT"
+ " in %<strub%> context %qD",
+ e->callee->decl, callee_fntype, node->decl);
+ }
+ }
+ }
+}
+
+namespace {
+
+/* Define a pass to compute strub modes. */
+const pass_data pass_data_ipa_strub_mode = {
+ SIMPLE_IPA_PASS,
+ "strubm",
+ OPTGROUP_NONE,
+ TV_NONE,
+ PROP_cfg, // properties_required
+ 0, // properties_provided
+ 0, // properties_destroyed
+ 0, // properties_start
+ 0, // properties_finish
+};
+
+class pass_ipa_strub_mode : public simple_ipa_opt_pass
+{
+public:
+ pass_ipa_strub_mode (gcc::context *ctxt)
+ : simple_ipa_opt_pass (pass_data_ipa_strub_mode, ctxt)
+ {}
+ opt_pass *clone () { return new pass_ipa_strub_mode (m_ctxt); }
+ virtual bool gate (function *) {
+ /* In relaxed (-3) and strict (-4) settings, that only enable strub at a
+ function or variable attribute's request, the attribute handler changes
+ flag_strub to -1 or -2, respectively, if any strub-enabling occurence of
+ the attribute is found. Therefore, if it remains at -3 or -4, nothing
+ that would enable strub was found, so we can disable it and avoid the
+ overhead. */
+ if (flag_strub < -2)
+ flag_strub = 0;
+ return flag_strub;
+ }
+ virtual unsigned int execute (function *);
+};
+
+/* Define a pass to introduce strub transformations. */
+const pass_data pass_data_ipa_strub = {
+ SIMPLE_IPA_PASS,
+ "strub",
+ OPTGROUP_NONE,
+ TV_NONE,
+ PROP_cfg | PROP_ssa, // properties_required
+ 0, // properties_provided
+ 0, // properties_destroyed
+ 0, // properties_start
+ TODO_update_ssa
+ | TODO_cleanup_cfg
+ | TODO_rebuild_cgraph_edges
+ | TODO_verify_il, // properties_finish
+};
+
+class pass_ipa_strub : public simple_ipa_opt_pass
+{
+public:
+ pass_ipa_strub (gcc::context *ctxt)
+ : simple_ipa_opt_pass (pass_data_ipa_strub, ctxt)
+ {}
+ opt_pass *clone () { return new pass_ipa_strub (m_ctxt); }
+ virtual bool gate (function *) { return flag_strub && !seen_error (); }
+ virtual unsigned int execute (function *);
+
+ /* Define on demand and cache some types we use often. */
+#define DEF_TYPE(IDX, NAME, INIT) \
+ static inline tree get_ ## NAME () { \
+ int idx = STRUB_TYPE_BASE + IDX; \
+ static tree type = strub_cache[idx]; \
+ if (!type) \
+ strub_cache[idx] = type = (INIT); \
+ return type; \
+ }
+
+ /* Use a distinct ptr_type_node to denote the watermark, so that we can
+ recognize it in arg lists and avoid modifying types twice. */
+ DEF_TYPE (0, wmt, build_variant_type_copy (ptr_type_node))
+
+ DEF_TYPE (1, pwmt, build_reference_type (get_wmt ()))
+
+ DEF_TYPE (2, qpwmt,
+ build_qualified_type (get_pwmt (),
+ TYPE_QUAL_RESTRICT
+ /* | TYPE_QUAL_CONST */))
+
+ DEF_TYPE (3, qptr,
+ build_qualified_type (ptr_type_node,
+ TYPE_QUAL_RESTRICT
+ | TYPE_QUAL_CONST))
+
+ DEF_TYPE (4, qpvalst,
+ build_qualified_type (build_reference_type
+ (va_list_type_node),
+ TYPE_QUAL_RESTRICT
+ /* | TYPE_QUAL_CONST */))
+
+#undef DEF_TYPE
+
+ /* Define non-strub builtins on demand. */
+#define DEF_NM_BUILTIN(NAME, CODE, FNTYPELIST) \
+ static tree get_ ## NAME () { \
+ tree decl = builtin_decl_explicit (CODE); \
+ if (!decl) \
+ { \
+ tree type = build_function_type_list FNTYPELIST; \
+ decl = add_builtin_function \
+ ("__builtin_" #NAME, \
+ type, CODE, BUILT_IN_NORMAL, \
+ NULL, NULL); \
+ TREE_NOTHROW (decl) = true; \
+ set_builtin_decl ((CODE), decl, true); \
+ } \
+ return decl; \
+ }
+
+ DEF_NM_BUILTIN (stack_address,
+ BUILT_IN_STACK_ADDRESS,
+ (ptr_type_node, NULL))
+
+#undef DEF_NM_BUILTIN
+
+ /* Define strub builtins on demand. */
+#define DEF_SS_BUILTIN(NAME, FNSPEC, CODE, FNTYPELIST) \
+ static tree get_ ## NAME () { \
+ tree decl = builtin_decl_explicit (CODE); \
+ if (!decl) \
+ { \
+ tree type = build_function_type_list FNTYPELIST; \
+ tree attrs = NULL; \
+ if (FNSPEC) \
+ attrs = tree_cons (get_identifier ("fn spec"), \
+ build_tree_list \
+ (NULL_TREE, \
+ build_string (strlen (FNSPEC), \
+ (FNSPEC))), \
+ attrs); \
+ decl = add_builtin_function_ext_scope \
+ ("__builtin___strub_" #NAME, \
+ type, CODE, BUILT_IN_NORMAL, \
+ "__strub_" #NAME, attrs); \
+ TREE_NOTHROW (decl) = true; \
+ set_builtin_decl ((CODE), decl, true); \
+ } \
+ return decl; \
+ }
+
+ DEF_SS_BUILTIN (enter, ". Ot",
+ BUILT_IN___STRUB_ENTER,
+ (void_type_node, get_qpwmt (), NULL))
+ DEF_SS_BUILTIN (update, ". Wt",
+ BUILT_IN___STRUB_UPDATE,
+ (void_type_node, get_qpwmt (), NULL))
+ DEF_SS_BUILTIN (leave, ". w ",
+ BUILT_IN___STRUB_LEAVE,
+ (void_type_node, get_qpwmt (), NULL))
+
+#undef DEF_SS_BUILTIN
+
+ /* Define strub identifiers on demand. */
+#define DEF_IDENT(IDX, NAME) \
+ static inline tree get_ ## NAME () { \
+ int idx = STRUB_IDENT_BASE + IDX; \
+ tree identifier = strub_cache[idx]; \
+ if (!identifier) \
+ strub_cache[idx] = identifier = get_identifier (".strub." #NAME); \
+ return identifier; \
+ }
+
+ DEF_IDENT (0, watermark_ptr)
+ DEF_IDENT (1, va_list_ptr)
+ DEF_IDENT (2, apply_args)
+
+#undef DEF_IDENT
+
+ static inline int adjust_at_calls_type (tree);
+ static inline void adjust_at_calls_call (cgraph_edge *, int, tree);
+ static inline void adjust_at_calls_calls (cgraph_node *);
+
+ /* Add to SEQ a call to the strub watermark update builtin, taking NODE's
+ location if given. Optionally add the corresponding edge from NODE, with
+ execution frequency COUNT. Return the modified SEQ. */
+
+ static inline gimple_seq
+ call_update_watermark (tree wmptr, cgraph_node *node, profile_count count,
+ gimple_seq seq = NULL)
+ {
+ tree uwm = get_update ();
+ gcall *update = gimple_build_call (uwm, 1, wmptr);
+ if (node)
+ gimple_set_location (update, DECL_SOURCE_LOCATION (node->decl));
+ gimple_seq_add_stmt (&seq, update);
+ if (node)
+ node->create_edge (cgraph_node::get_create (uwm), update, count, false);
+ return seq;
+ }
+
+};
+
+} // anon namespace
+
+/* Gather with this type a collection of parameters that we're turning into
+ explicit references. */
+
+typedef hash_set<tree> indirect_parms_t;
+
+/* Dereference OP's incoming turned-into-reference parm if it's an
+ INDIRECT_PARMS or an ADDR_EXPR thereof. Set *REC and return according to
+ gimple-walking expectations. */
+
+static tree
+maybe_make_indirect (indirect_parms_t &indirect_parms, tree op, int *rec)
+{
+ if (DECL_P (op))
+ {
+ *rec = 0;
+ if (indirect_parms.contains (op))
+ {
+ tree ret = gimple_fold_indirect_ref (op);
+ if (!ret)
+ ret = build2 (MEM_REF,
+ TREE_TYPE (TREE_TYPE (op)),
+ op,
+ build_int_cst (TREE_TYPE (op), 0));
+ return ret;
+ }
+ }
+ else if (TREE_CODE (op) == ADDR_EXPR
+ && DECL_P (TREE_OPERAND (op, 0)))
+ {
+ *rec = 0;
+ if (indirect_parms.contains (TREE_OPERAND (op, 0)))
+ {
+ op = TREE_OPERAND (op, 0);
+ return op;
+ }
+ }
+
+ return NULL_TREE;
+}
+
+/* A gimple-walking function that adds dereferencing to indirect parms. */
+
+static tree
+walk_make_indirect (tree *op, int *rec, void *arg)
+{
+ walk_stmt_info *wi = (walk_stmt_info *)arg;
+ indirect_parms_t &indirect_parms = *(indirect_parms_t *)wi->info;
+
+ if (!*op || TYPE_P (*op))
+ {
+ *rec = 0;
+ return NULL_TREE;
+ }
+
+ if (tree repl = maybe_make_indirect (indirect_parms, *op, rec))
+ {
+ *op = repl;
+ wi->changed = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* A gimple-walking function that turns any non-gimple-val ADDR_EXPRs into a
+ separate SSA. Though addresses of e.g. parameters, and of members thereof,
+ are gimple vals, turning parameters into references, with an extra layer of
+ indirection and thus explicit dereferencing, need to be regimplified. */
+
+static tree
+walk_regimplify_addr_expr (tree *op, int *rec, void *arg)
+{
+ walk_stmt_info *wi = (walk_stmt_info *)arg;
+ gimple_stmt_iterator &gsi = *(gimple_stmt_iterator *)wi->info;
+
+ *rec = 0;
+
+ if (!*op || TREE_CODE (*op) != ADDR_EXPR)
+ return NULL_TREE;
+
+ if (!is_gimple_val (*op))
+ {
+ tree ret = force_gimple_operand_gsi (&gsi, *op, true,
+ NULL_TREE, true, GSI_SAME_STMT);
+ gcc_assert (ret != *op);
+ *op = ret;
+ wi->changed = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Turn STMT's PHI arg defs into separate SSA defs if they've become
+ non-gimple_val. Return TRUE if any edge insertions need to be committed. */
+
+static bool
+walk_regimplify_phi (gphi *stmt)
+{
+ bool needs_commit = false;
+
+ for (unsigned i = 0, n = gimple_phi_num_args (stmt); i < n; i++)
+ {
+ tree op = gimple_phi_arg_def (stmt, i);
+ if ((TREE_CODE (op) == ADDR_EXPR
+ && !is_gimple_val (op))
+ /* ??? A PARM_DECL that was addressable in the original function and
+ had its address in PHI nodes, but that became a reference in the
+ wrapped clone would NOT be updated by update_ssa in PHI nodes.
+ Alas, if we were to create a default def for it now, update_ssa
+ would complain that the symbol that needed rewriting already has
+ SSA names associated with it. OTOH, leaving the PARM_DECL alone,
+ it eventually causes errors because it remains unchanged in PHI
+ nodes, but it gets rewritten as expected if it appears in other
+ stmts. So we cheat a little here, and force the PARM_DECL out of
+ the PHI node and into an assignment. It's a little expensive,
+ because we insert it at the edge, which introduces a basic block
+ that's entirely unnecessary, but it works, and the block will be
+ removed as the default def gets propagated back into the PHI node,
+ so the final optimized code looks just as expected. */
+ || (TREE_CODE (op) == PARM_DECL
+ && !TREE_ADDRESSABLE (op)))
+ {
+ tree temp = make_ssa_name (TREE_TYPE (op), stmt);
+ if (TREE_CODE (op) == PARM_DECL)
+ SET_SSA_NAME_VAR_OR_IDENTIFIER (temp, DECL_NAME (op));
+ SET_PHI_ARG_DEF (stmt, i, temp);
+
+ gimple *assign = gimple_build_assign (temp, op);
+ if (gimple_phi_arg_has_location (stmt, i))
+ gimple_set_location (assign, gimple_phi_arg_location (stmt, i));
+ gsi_insert_on_edge (gimple_phi_arg_edge (stmt, i), assign);
+ needs_commit = true;
+ }
+ }
+
+ return needs_commit;
+}
+
+/* Create a reference type to use for PARM when turning it into a reference.
+ NONALIASED causes the reference type to gain its own separate alias set, so
+ that accessing the indirectly-passed parm won'will not add aliasing
+ noise. */
+
+static tree
+build_ref_type_for (tree parm, bool nonaliased = true)
+{
+ gcc_checking_assert (TREE_CODE (parm) == PARM_DECL);
+
+ tree ref_type = build_reference_type (TREE_TYPE (parm));
+
+ if (!nonaliased)
+ return ref_type;
+
+ /* Each PARM turned indirect still points to the distinct memory area at the
+ wrapper, and the reference in unchanging, so we might qualify it, but...
+ const is not really important, since we're only using default defs for the
+ reference parm anyway, and not introducing any defs, and restrict seems to
+ cause trouble. E.g., libgnat/s-concat3.adb:str_concat_3 has memmoves that,
+ if it's wrapped, the memmoves are deleted in dse1. Using a distinct alias
+ set seems to not run afoul of this problem, and it hopefully enables the
+ compiler to tell the pointers do point to objects that are not otherwise
+ aliased. */
+ tree qref_type = build_variant_type_copy (ref_type);
+
+ TYPE_ALIAS_SET (qref_type) = new_alias_set ();
+ record_alias_subset (TYPE_ALIAS_SET (qref_type), get_alias_set (ref_type));
+
+ return qref_type;
+}
+
+/* Add cgraph edges from current_function_decl to callees in SEQ with frequency
+ COUNT, assuming all calls in SEQ are direct. */
+
+static void
+add_call_edges_for_seq (gimple_seq seq, profile_count count)
+{
+ cgraph_node *node = cgraph_node::get_create (current_function_decl);
+
+ for (gimple_stmt_iterator gsi = gsi_start (seq);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+
+ gcall *call = dyn_cast <gcall *> (stmt);
+ if (!call)
+ continue;
+
+ tree callee = gimple_call_fndecl (call);
+ gcc_checking_assert (callee);
+ node->create_edge (cgraph_node::get_create (callee), call, count, false);
+ }
+}
+
+/* Insert SEQ after the call at GSI, as if the call was in a try block with SEQ
+ as finally, i.e., SEQ will run after the call whether it returns or
+ propagates an exception. This handles block splitting, EH edge and block
+ creation, noreturn and nothrow optimizations, and even throwing calls without
+ preexisting local handlers. */
+
+static void
+gsi_insert_finally_seq_after_call (gimple_stmt_iterator gsi, gimple_seq seq)
+{
+ if (!seq)
+ return;
+
+ gimple *stmt = gsi_stmt (gsi);
+
+ if (gimple_has_location (stmt))
+ annotate_all_with_location (seq, gimple_location (stmt));
+
+ gcall *call = dyn_cast <gcall *> (stmt);
+ bool noreturn_p = call && gimple_call_noreturn_p (call);
+ int eh_lp = lookup_stmt_eh_lp (stmt);
+ bool must_not_throw_p = eh_lp < 0;
+ bool nothrow_p = (must_not_throw_p
+ || (call && gimple_call_nothrow_p (call))
+ || (eh_lp <= 0
+ && (TREE_NOTHROW (cfun->decl)
+ || !flag_exceptions)));
+
+ if (noreturn_p && nothrow_p)
+ return;
+
+ /* Don't expect an EH edge if we're not to throw, or if we're not in an EH
+ region yet. */
+ bool no_eh_edge_p = (nothrow_p || !eh_lp);
+ bool must_end_bb = stmt_ends_bb_p (stmt);
+
+ edge eft = NULL, eeh = NULL;
+ if (must_end_bb && !(noreturn_p && no_eh_edge_p))
+ {
+ gcc_checking_assert (gsi_one_before_end_p (gsi));
+
+ edge e;
+ edge_iterator ei;
+ FOR_EACH_EDGE (e, ei, gsi_bb (gsi)->succs)
+ {
+ if ((e->flags & EDGE_EH))
+ {
+ gcc_checking_assert (!eeh);
+ eeh = e;
+#if !CHECKING_P
+ if (eft || noreturn_p)
+ break;
+#endif
+ }
+ if ((e->flags & EDGE_FALLTHRU))
+ {
+ gcc_checking_assert (!eft);
+ eft = e;
+#if !CHECKING_P
+ if (eeh || no_eh_edge_p)
+ break;
+#endif
+ }
+ }
+
+ gcc_checking_assert (!(eft && (eft->flags & EDGE_FALLTHRU))
+ == noreturn_p);
+ gcc_checking_assert (!(eeh && (eeh->flags & EDGE_EH))
+ == no_eh_edge_p);
+ gcc_checking_assert (eft != eeh);
+ }
+
+ if (!noreturn_p)
+ {
+ gimple_seq nseq = nothrow_p ? seq : gimple_seq_copy (seq);
+
+ if (must_end_bb)
+ {
+ gcc_checking_assert (gsi_one_before_end_p (gsi));
+ add_call_edges_for_seq (nseq, eft->count ());
+ gsi_insert_seq_on_edge_immediate (eft, nseq);
+ }
+ else
+ {
+ add_call_edges_for_seq (nseq, gsi_bb (gsi)->count);
+ gsi_insert_seq_after (&gsi, nseq, GSI_SAME_STMT);
+ }
+ }
+
+ if (nothrow_p)
+ return;
+
+ if (eh_lp)
+ {
+ add_call_edges_for_seq (seq, eeh->count ());
+ gsi_insert_seq_on_edge_immediate (eeh, seq);
+ return;
+ }
+
+ /* A throwing call may appear within a basic block in a function that doesn't
+ have any EH regions. We're going to add a cleanup if so, therefore the
+ block will have to be split. */
+ basic_block bb = gsi_bb (gsi);
+ if (!gsi_one_before_end_p (gsi))
+ split_block (bb, stmt);
+
+ /* Create a new block for the EH cleanup. */
+ basic_block bb_eh_cleanup = create_empty_bb (bb);
+ if (dom_info_available_p (CDI_DOMINATORS))
+ set_immediate_dominator (CDI_DOMINATORS, bb_eh_cleanup, bb);
+ if (current_loops)
+ add_bb_to_loop (bb_eh_cleanup, current_loops->tree_root);
+
+ /* Make the new block an EH cleanup for the call. */
+ eh_region new_r = gen_eh_region_cleanup (NULL);
+ eh_landing_pad lp = gen_eh_landing_pad (new_r);
+ tree label = gimple_block_label (bb_eh_cleanup);
+ lp->post_landing_pad = label;
+ EH_LANDING_PAD_NR (label) = lp->index;
+ add_stmt_to_eh_lp (stmt, lp->index);
+
+ /* Add the cleanup code to the EH cleanup block. */
+ gsi = gsi_after_labels (bb_eh_cleanup);
+ gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
+
+ /* And then propagate the exception further. */
+ gresx *resx = gimple_build_resx (new_r->index);
+ if (gimple_has_location (stmt))
+ gimple_set_location (resx, gimple_location (stmt));
+ gsi_insert_before (&gsi, resx, GSI_SAME_STMT);
+
+ /* Finally, wire the EH cleanup block into the CFG. */
+ edge neeh = make_eh_edge (stmt);
+ neeh->probability = profile_probability::never ();
+ gcc_checking_assert (neeh->dest == bb_eh_cleanup);
+ gcc_checking_assert (!neeh->dest->count.initialized_p ());
+ neeh->dest->count = neeh->count ();
+ add_call_edges_for_seq (seq, neeh->dest->count);
+}
+
+/* Copy the attribute list at *ATTRS, minus any NAME attributes, leaving
+ shareable trailing nodes alone. */
+
+static inline void
+remove_named_attribute_unsharing (const char *name, tree *attrs)
+{
+ while (tree found = lookup_attribute (name, *attrs))
+ {
+ /* Copy nodes up to the next NAME attribute. */
+ while (*attrs != found)
+ {
+ *attrs = tree_cons (TREE_PURPOSE (*attrs),
+ TREE_VALUE (*attrs),
+ TREE_CHAIN (*attrs));
+ attrs = &TREE_CHAIN (*attrs);
+ }
+ /* Then drop it. */
+ gcc_checking_assert (*attrs == found);
+ *attrs = TREE_CHAIN (*attrs);
+ }
+}
+
+/* Record the order of the last cgraph entry whose mode we've already set, so
+ that we can perform mode setting incrementally without duplication. */
+static int last_cgraph_order;
+
+/* Set strub modes for functions introduced since the last call. */
+
+static void
+ipa_strub_set_mode_for_new_functions ()
+{
+ if (symtab->order == last_cgraph_order)
+ return;
+
+ cgraph_node *node;
+
+ /* Go through the functions twice, once over non-aliases, and then over
+ aliases, so that aliases can reuse the mode computation of their ultimate
+ targets. */
+ for (int aliases = 0; aliases <= 1; aliases++)
+ FOR_EACH_FUNCTION (node)
+ {
+ if (!node->alias != !aliases)
+ continue;
+
+ /* Already done. */
+ if (node->order < last_cgraph_order)
+ continue;
+
+ set_strub_mode (node);
+ }
+
+ last_cgraph_order = symtab->order;
+}
+
+/* Return FALSE if NODE is a strub context, and TRUE otherwise. */
+
+bool
+strub_splittable_p (cgraph_node *node)
+{
+ switch (get_strub_mode (node))
+ {
+ case STRUB_WRAPPED:
+ case STRUB_AT_CALLS:
+ case STRUB_AT_CALLS_OPT:
+ case STRUB_INLINABLE:
+ case STRUB_INTERNAL:
+ case STRUB_WRAPPER:
+ return false;
+
+ case STRUB_CALLABLE:
+ case STRUB_DISABLED:
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return true;
+}
+
+/* Return the PARM_DECL of the incoming watermark pointer, if there is one. */
+
+tree
+strub_watermark_parm (tree fndecl)
+{
+ switch (get_strub_mode_from_fndecl (fndecl))
+ {
+ case STRUB_WRAPPED:
+ case STRUB_AT_CALLS:
+ case STRUB_AT_CALLS_OPT:
+ break;
+
+ case STRUB_INTERNAL:
+ case STRUB_WRAPPER:
+ case STRUB_CALLABLE:
+ case STRUB_DISABLED:
+ case STRUB_INLINABLE:
+ return NULL_TREE;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ for (tree parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
+ /* The type (variant) compare finds the parameter even in a just-created
+ clone, before we set its name, but the type-based compare doesn't work
+ during builtin expansion within the lto compiler, because we'll have
+ created a separate variant in that run. */
+ if (TREE_TYPE (parm) == pass_ipa_strub::get_qpwmt ()
+ || DECL_NAME (parm) == pass_ipa_strub::get_watermark_ptr ())
+ return parm;
+
+ gcc_unreachable ();
+}
+
+/* Adjust a STRUB_AT_CALLS function TYPE, adding a watermark pointer if it
+ hasn't been added yet. Return the named argument count. */
+
+int
+pass_ipa_strub::adjust_at_calls_type (tree type)
+{
+ int named_args = 0;
+
+ gcc_checking_assert (same_strub_mode_in_variants_p (type));
+
+ if (!TYPE_ARG_TYPES (type))
+ return named_args;
+
+ tree *tlist = &TYPE_ARG_TYPES (type);
+ tree qpwmptrt = get_qpwmt ();
+ while (*tlist && TREE_VALUE (*tlist) != void_type_node)
+ {
+ /* The type has already been adjusted. */
+ if (TREE_VALUE (*tlist) == qpwmptrt)
+ return named_args;
+ named_args++;
+ *tlist = tree_cons (TREE_PURPOSE (*tlist),
+ TREE_VALUE (*tlist),
+ TREE_CHAIN (*tlist));
+ tlist = &TREE_CHAIN (*tlist);
+ }
+
+ /* Add the new argument after all named arguments, so as to not mess with
+ attributes that reference parameters. */
+ *tlist = tree_cons (NULL_TREE, get_qpwmt (), *tlist);
+
+#if ATTR_FNSPEC_DECONST_WATERMARK
+ if (!type_already_adjusted)
+ {
+ int flags = flags_from_decl_or_type (type);
+ tree fnspec = lookup_attribute ("fn spec", type);
+
+ if ((flags & (ECF_CONST | ECF_PURE | ECF_NOVOPS)) || fnspec)
+ {
+ size_t xargs = 1;
+ size_t curlen = 0, tgtlen = 2 + 2 * (named_args + xargs);
+ auto_vec<char> nspecv (tgtlen);
+ char *nspec = &nspecv[0]; /* It will *not* be NUL-terminated! */
+ if (fnspec)
+ {
+ tree fnspecstr = TREE_VALUE (TREE_VALUE (fnspec));
+ curlen = TREE_STRING_LENGTH (fnspecstr);
+ memcpy (nspec, TREE_STRING_POINTER (fnspecstr), curlen);
+ }
+ if (!curlen)
+ {
+ nspec[curlen++] = '.';
+ nspec[curlen++] = ((flags & ECF_CONST)
+ ? 'c'
+ : (flags & ECF_PURE)
+ ? 'p'
+ : ' ');
+ }
+ while (curlen < tgtlen - 2 * xargs)
+ {
+ nspec[curlen++] = '.';
+ nspec[curlen++] = ' ';
+ }
+ nspec[curlen++] = 'W';
+ nspec[curlen++] = 't';
+
+ /* The type has already been copied, if needed, before adding
+ parameters. */
+ TYPE_ATTRIBUTES (type)
+ = tree_cons (get_identifier ("fn spec"),
+ build_tree_list (NULL_TREE,
+ build_string (tgtlen, nspec)),
+ TYPE_ATTRIBUTES (type));
+ }
+ }
+#endif
+
+ return named_args;
+}
+
+/* Adjust a call to an at-calls call target. Create a watermark local variable
+ if needed, initialize it before, pass it to the callee according to the
+ modified at-calls interface, and release the callee's stack space after the
+ call, if not deferred. If the call is const or pure, arrange for the
+ watermark to not be assumed unused or unchanged. */
+
+void
+pass_ipa_strub::adjust_at_calls_call (cgraph_edge *e, int named_args,
+ tree callee_fntype)
+{
+ gcc_checking_assert (e->call_stmt);
+ gcall *ocall = e->call_stmt;
+ gimple_stmt_iterator gsi = gsi_for_stmt (ocall);
+
+ /* Make sure we haven't modified this call yet. */
+ gcc_checking_assert (!(int (gimple_call_num_args (ocall)) > named_args
+ && (TREE_TYPE (gimple_call_arg (ocall, named_args))
+ == get_pwmt ())));
+
+ /* If we're already within a strub context, pass on the incoming watermark
+ pointer, and omit the enter and leave calls around the modified call, as an
+ optimization, or as a means to satisfy a tail-call requirement. */
+ tree swmp = ((optimize_size || optimize > 2
+ || gimple_call_must_tail_p (ocall)
+ || (optimize == 2 && gimple_call_tail_p (ocall)))
+ ? strub_watermark_parm (e->caller->decl)
+ : NULL_TREE);
+ bool omit_own_watermark = swmp;
+ tree swm = NULL_TREE;
+ if (!omit_own_watermark)
+ {
+ swm = create_tmp_var (get_wmt (), ".strub.watermark");
+ TREE_ADDRESSABLE (swm) = true;
+ swmp = build1 (ADDR_EXPR, get_pwmt (), swm);
+
+ /* Initialize the watermark before the call. */
+ tree enter = get_enter ();
+ gcall *stptr = gimple_build_call (enter, 1,
+ unshare_expr (swmp));
+ if (gimple_has_location (ocall))
+ gimple_set_location (stptr, gimple_location (ocall));
+ gsi_insert_before (&gsi, stptr, GSI_SAME_STMT);
+ e->caller->create_edge (cgraph_node::get_create (enter),
+ stptr, gsi_bb (gsi)->count, false);
+ }
+
+
+ /* Replace the call with one that passes the swmp argument first. */
+ gcall *wrcall;
+ { gcall *stmt = ocall;
+ // Mostly copied from gimple_call_copy_skip_args.
+ int i = 0;
+ int nargs = gimple_call_num_args (stmt);
+ auto_vec<tree> vargs (MAX (nargs, named_args) + 1);
+ gcall *new_stmt;
+
+ /* pr71109.c calls a prototypeless function, then defines it with
+ additional arguments. It's ill-formed, but after it's inlined,
+ it somehow works out. */
+ for (; i < named_args && i < nargs; i++)
+ vargs.quick_push (gimple_call_arg (stmt, i));
+ for (; i < named_args; i++)
+ vargs.quick_push (null_pointer_node);
+
+ vargs.quick_push (unshare_expr (swmp));
+
+ for (; i < nargs; i++)
+ vargs.quick_push (gimple_call_arg (stmt, i));
+
+ if (gimple_call_internal_p (stmt))
+ gcc_unreachable ();
+ else
+ new_stmt = gimple_build_call_vec (gimple_call_fn (stmt), vargs);
+ gimple_call_set_fntype (new_stmt, callee_fntype);
+
+ if (gimple_call_lhs (stmt))
+ gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
+
+ gimple_move_vops (new_stmt, stmt);
+
+ if (gimple_has_location (stmt))
+ gimple_set_location (new_stmt, gimple_location (stmt));
+ gimple_call_copy_flags (new_stmt, stmt);
+ gimple_call_set_chain (new_stmt, gimple_call_chain (stmt));
+
+ gimple_set_modified (new_stmt, true);
+
+ wrcall = new_stmt;
+ }
+
+ update_stmt (wrcall);
+ gsi_replace (&gsi, wrcall, true);
+ cgraph_edge::set_call_stmt (e, wrcall, false);
+
+ /* Insert the strub code after the call. */
+ gimple_seq seq = NULL;
+
+#if !ATTR_FNSPEC_DECONST_WATERMARK
+ /* If the call will be assumed to not modify or even read the
+ watermark, make it read and modified ourselves. */
+ if ((gimple_call_flags (wrcall)
+ & (ECF_CONST | ECF_PURE | ECF_NOVOPS)))
+ {
+ if (!swm)
+ swm = build2 (MEM_REF,
+ TREE_TYPE (TREE_TYPE (swmp)),
+ swmp,
+ build_int_cst (TREE_TYPE (swmp), 0));
+
+ vec<tree, va_gc> *inputs = NULL;
+ vec<tree, va_gc> *outputs = NULL;
+ vec_safe_push (outputs,
+ build_tree_list
+ (build_tree_list
+ (NULL_TREE, build_string (2, "=m")),
+ unshare_expr (swm)));
+ vec_safe_push (inputs,
+ build_tree_list
+ (build_tree_list
+ (NULL_TREE, build_string (1, "m")),
+ unshare_expr (swm)));
+ gasm *forcemod = gimple_build_asm_vec ("", inputs, outputs,
+ NULL, NULL);
+ gimple_seq_add_stmt (&seq, forcemod);
+
+ /* If the call will be assumed to not even read the watermark,
+ make sure it is already in memory before the call. */
+ if ((gimple_call_flags (wrcall) & ECF_CONST))
+ {
+ vec<tree, va_gc> *inputs = NULL;
+ vec_safe_push (inputs,
+ build_tree_list
+ (build_tree_list
+ (NULL_TREE, build_string (1, "m")),
+ unshare_expr (swm)));
+ gasm *force_store = gimple_build_asm_vec ("", inputs, NULL,
+ NULL, NULL);
+ if (gimple_has_location (wrcall))
+ gimple_set_location (force_store, gimple_location (wrcall));
+ gsi_insert_before (&gsi, force_store, GSI_SAME_STMT);
+ }
+ }
+#endif
+
+ if (!omit_own_watermark)
+ {
+ gcall *sleave = gimple_build_call (get_leave (), 1,
+ unshare_expr (swmp));
+ gimple_seq_add_stmt (&seq, sleave);
+
+ gassign *clobber = gimple_build_assign (swm,
+ build_clobber
+ (TREE_TYPE (swm)));
+ gimple_seq_add_stmt (&seq, clobber);
+ }
+
+ gsi_insert_finally_seq_after_call (gsi, seq);
+}
+
+/* Adjust all at-calls calls in NODE. */
+
+void
+pass_ipa_strub::adjust_at_calls_calls (cgraph_node *node)
+{
+ /* Adjust unknown-callee indirect calls with STRUB_AT_CALLS types within
+ onode. */
+ if (node->indirect_calls)
+ {
+ push_cfun (DECL_STRUCT_FUNCTION (node->decl));
+ for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
+ {
+ gcc_checking_assert (e->indirect_unknown_callee);
+
+ if (!e->call_stmt)
+ continue;
+
+ tree callee_fntype;
+ enum strub_mode callee_mode
+ = effective_strub_mode_for_call (e->call_stmt, &callee_fntype);
+
+ if (callee_mode != STRUB_AT_CALLS
+ && callee_mode != STRUB_AT_CALLS_OPT)
+ continue;
+
+ int named_args = adjust_at_calls_type (callee_fntype);
+
+ adjust_at_calls_call (e, named_args, callee_fntype);
+ }
+ pop_cfun ();
+ }
+
+ if (node->callees)
+ {
+ push_cfun (DECL_STRUCT_FUNCTION (node->decl));
+ for (cgraph_edge *e = node->callees; e; e = e->next_callee)
+ {
+ gcc_checking_assert (!e->indirect_unknown_callee);
+
+ if (!e->call_stmt)
+ continue;
+
+ tree callee_fntype;
+ enum strub_mode callee_mode
+ = effective_strub_mode_for_call (e->call_stmt, &callee_fntype);
+
+ if (callee_mode != STRUB_AT_CALLS
+ && callee_mode != STRUB_AT_CALLS_OPT)
+ continue;
+
+ int named_args = adjust_at_calls_type (callee_fntype);
+
+ adjust_at_calls_call (e, named_args, callee_fntype);
+ }
+ pop_cfun ();
+ }
+}
+
+/* The strubm (strub mode) pass computes a strub mode for each function in the
+ call graph, and checks, before any inlining, that strub callability
+ requirements in effect are satisfied. */
+
+unsigned int
+pass_ipa_strub_mode::execute (function *)
+{
+ last_cgraph_order = 0;
+ ipa_strub_set_mode_for_new_functions ();
+
+ /* Verify before any inlining or other transformations. */
+ verify_strub ();
+
+ return 0;
+}
+
+/* Create a strub mode pass. */
+
+simple_ipa_opt_pass *
+make_pass_ipa_strub_mode (gcc::context *ctxt)
+{
+ return new pass_ipa_strub_mode (ctxt);
+}
+
+/* The strub pass proper adjusts types, signatures, and at-calls calls, and
+ splits internal-strub functions. */
+
+unsigned int
+pass_ipa_strub::execute (function *)
+{
+ cgraph_node *onode;
+
+ ipa_strub_set_mode_for_new_functions ();
+
+ /* First, adjust the signature of at-calls functions. We adjust types of
+ at-calls functions first, so that we don't modify types in place unless
+ strub is explicitly requested. */
+ FOR_EACH_FUNCTION (onode)
+ {
+ enum strub_mode mode = get_strub_mode (onode);
+
+ if (mode == STRUB_AT_CALLS
+ || mode == STRUB_AT_CALLS_OPT)
+ {
+ /* Create a type variant if strubbing was not explicitly requested in
+ the function type. */
+ if (get_strub_mode_from_type (TREE_TYPE (onode->decl)) != mode)
+ distinctify_node_type (onode);
+
+ int named_args = adjust_at_calls_type (TREE_TYPE (onode->decl));
+
+ /* An external function explicitly declared with strub won't have a
+ body. Even with implicit at-calls strub, a function may have had its
+ body removed after we selected the mode, and then we have nothing
+ further to do. */
+ if (!onode->has_gimple_body_p ())
+ continue;
+
+ tree *pargs = &DECL_ARGUMENTS (onode->decl);
+
+ /* A noninterposable_alias reuses the same parm decl chain, don't add
+ the parm twice. */
+ bool aliased_parms = (onode->alias && *pargs
+ && DECL_CONTEXT (*pargs) != onode->decl);
+
+ if (aliased_parms)
+ continue;
+
+ for (int i = 0; i < named_args; i++)
+ pargs = &DECL_CHAIN (*pargs);
+
+ tree wmptr = build_decl (DECL_SOURCE_LOCATION (onode->decl),
+ PARM_DECL,
+ get_watermark_ptr (),
+ get_qpwmt ());
+ DECL_ARTIFICIAL (wmptr) = 1;
+ DECL_ARG_TYPE (wmptr) = get_qpwmt ();
+ DECL_CONTEXT (wmptr) = onode->decl;
+ TREE_USED (wmptr) = 1;
+ DECL_CHAIN (wmptr) = *pargs;
+ *pargs = wmptr;
+
+ if (onode->alias)
+ continue;
+
+ cgraph_node *nnode = onode;
+ push_cfun (DECL_STRUCT_FUNCTION (nnode->decl));
+
+ {
+ edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ gimple_seq seq = call_update_watermark (wmptr, nnode, e->src->count);
+ gsi_insert_seq_on_edge_immediate (e, seq);
+ }
+
+ if (DECL_STRUCT_FUNCTION (nnode->decl)->calls_alloca)
+ {
+ basic_block bb;
+ FOR_EACH_BB_FN (bb, cfun)
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+
+ gcall *call = dyn_cast <gcall *> (stmt);
+
+ if (!call)
+ continue;
+
+ if (gimple_alloca_call_p (call))
+ {
+ /* Capture stack growth. */
+ gimple_seq seq = call_update_watermark (wmptr, NULL,
+ gsi_bb (gsi)
+ ->count);
+ gsi_insert_finally_seq_after_call (gsi, seq);
+ }
+ }
+ }
+
+ pop_cfun ();
+ }
+ }
+
+ FOR_EACH_FUNCTION (onode)
+ {
+ if (!onode->has_gimple_body_p ())
+ continue;
+
+ enum strub_mode mode = get_strub_mode (onode);
+
+ if (mode != STRUB_INTERNAL)
+ {
+ adjust_at_calls_calls (onode);
+ continue;
+ }
+
+ bool is_stdarg = calls_builtin_va_start_p (onode);;
+ bool apply_args = calls_builtin_apply_args_p (onode);
+
+ vec<ipa_adjusted_param, va_gc> *nparms = NULL;
+ unsigned j = 0;
+ {
+ // The following loop copied from ipa-split.c:split_function.
+ for (tree parm = DECL_ARGUMENTS (onode->decl);
+ parm; parm = DECL_CHAIN (parm), j++)
+ {
+ ipa_adjusted_param adj = {};
+ adj.op = IPA_PARAM_OP_COPY;
+ adj.base_index = j;
+ adj.prev_clone_index = j;
+ vec_safe_push (nparms, adj);
+ }
+
+ if (apply_args)
+ {
+ ipa_adjusted_param aaadj = {};
+ aaadj.op = IPA_PARAM_OP_NEW;
+ aaadj.type = get_qptr ();
+ vec_safe_push (nparms, aaadj);
+ }
+
+ if (is_stdarg)
+ {
+ ipa_adjusted_param vladj = {};
+ vladj.op = IPA_PARAM_OP_NEW;
+ vladj.type = get_qpvalst ();
+ vec_safe_push (nparms, vladj);
+ }
+
+ ipa_adjusted_param wmadj = {};
+ wmadj.op = IPA_PARAM_OP_NEW;
+ wmadj.type = get_qpwmt ();
+ vec_safe_push (nparms, wmadj);
+ }
+ ipa_param_adjustments adj (nparms, -1, false);
+
+ cgraph_node *nnode = onode->create_version_clone_with_body
+ (auto_vec<cgraph_edge *> (0),
+ NULL, &adj, NULL, NULL, "strub", NULL);
+
+ if (!nnode)
+ {
+ error_at (DECL_SOURCE_LOCATION (onode->decl),
+ "failed to split %qD for %<strub%>",
+ onode->decl);
+ continue;
+ }
+
+ onode->split_part = true;
+ if (onode->calls_comdat_local)
+ nnode->add_to_same_comdat_group (onode);
+
+ set_strub_mode_to (onode, STRUB_WRAPPER);
+ set_strub_mode_to (nnode, STRUB_WRAPPED);
+
+ adjust_at_calls_calls (nnode);
+
+ /* Decide which of the wrapped function's parms we want to turn into
+ references to the argument passed to the wrapper. In general, we want to
+ copy small arguments, and avoid copying large ones. Variable-sized array
+ lengths given by other arguments, as in 20020210-1.c, would lead to
+ problems if passed by value, after resetting the original function and
+ dropping the length computation; passing them by reference works.
+ DECL_BY_REFERENCE is *not* a substitute for this: it involves copying
+ anyway, but performed at the caller. */
+ indirect_parms_t indirect_nparms (3, false);
+ unsigned adjust_ftype = 0;
+ unsigned named_args = 0;
+ for (tree parm = DECL_ARGUMENTS (onode->decl),
+ nparm = DECL_ARGUMENTS (nnode->decl),
+ nparmt = TYPE_ARG_TYPES (TREE_TYPE (nnode->decl));
+ parm;
+ named_args++,
+ parm = DECL_CHAIN (parm),
+ nparm = DECL_CHAIN (nparm),
+ nparmt = nparmt ? TREE_CHAIN (nparmt) : NULL_TREE)
+ if (!(0 /* DECL_BY_REFERENCE (narg) */
+ || is_gimple_reg_type (TREE_TYPE (nparm))
+ || VECTOR_TYPE_P (TREE_TYPE (nparm))
+ || TREE_CODE (TREE_TYPE (nparm)) == COMPLEX_TYPE
+ || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (nparm)))
+ && (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (nparm)))
+ <= 4 * UNITS_PER_WORD))))
+ {
+ indirect_nparms.add (nparm);
+
+ /* ??? Is there any case in which it is not safe to suggest the parms
+ turned indirect don't alias anything else? They are distinct,
+ unaliased memory in the wrapper, and the wrapped can't possibly
+ take pointers into them because none of the pointers passed to the
+ wrapper can alias other incoming parameters passed by value, even
+ if with transparent reference, and the wrapper doesn't take any
+ extra parms that could point into wrapper's parms. So we can
+ probably drop the TREE_ADDRESSABLE and keep the TRUE. */
+ tree ref_type = build_ref_type_for (nparm,
+ true
+ || !TREE_ADDRESSABLE (parm));
+
+ DECL_ARG_TYPE (nparm) = TREE_TYPE (nparm) = ref_type;
+ relayout_decl (nparm);
+ TREE_ADDRESSABLE (nparm) = 0;
+ DECL_BY_REFERENCE (nparm) = 0;
+ DECL_NOT_GIMPLE_REG_P (nparm) = 0;
+ /* ??? This avoids mismatches in debug info bind stmts in
+ e.g. a-chahan . */
+ DECL_ABSTRACT_ORIGIN (nparm) = NULL;
+
+ if (nparmt)
+ adjust_ftype++;
+ }
+
+ /* Also adjust the wrapped function type, if needed. */
+ if (adjust_ftype)
+ {
+ tree nftype = TREE_TYPE (nnode->decl);
+
+ /* We always add at least one argument at the end of the signature, when
+ cloning the function, so we don't expect to need to duplicate the
+ type here. */
+ gcc_checking_assert (TYPE_ARG_TYPES (nftype)
+ != TYPE_ARG_TYPES (TREE_TYPE (onode->decl)));
+
+ /* Check that fnspec still works for the modified function signature,
+ and drop it otherwise. */
+ bool drop_fnspec = false;
+ tree fnspec = lookup_attribute ("fn spec", TYPE_ATTRIBUTES (nftype));
+ attr_fnspec spec = fnspec ? attr_fnspec (fnspec) : attr_fnspec ("");
+
+ unsigned retcopy;
+ if (!(fnspec && spec.returns_arg (&retcopy)))
+ retcopy = (unsigned) -1;
+
+ unsigned i = 0;
+ for (tree nparm = DECL_ARGUMENTS (nnode->decl),
+ nparmt = TYPE_ARG_TYPES (nftype);
+ adjust_ftype > 0;
+ i++, nparm = DECL_CHAIN (nparm), nparmt = TREE_CHAIN (nparmt))
+ if (indirect_nparms.contains (nparm))
+ {
+ TREE_VALUE (nparmt) = TREE_TYPE (nparm);
+ adjust_ftype--;
+
+ if (fnspec && !drop_fnspec)
+ {
+ if (i == retcopy)
+ drop_fnspec = true;
+ else if (spec.arg_specified_p (i))
+ {
+ /* Properties that apply to pointers only must not be
+ present, because we don't make pointers further
+ indirect. */
+ gcc_checking_assert
+ (!spec.arg_max_access_size_given_by_arg_p (i, NULL));
+ gcc_checking_assert (!spec.arg_copied_to_arg_p (i, NULL));
+
+ /* Any claim of direct access only is invalidated by
+ adding an indirection level. */
+ if (spec.arg_direct_p (i))
+ drop_fnspec = true;
+
+ /* If there's a claim the argument is not read from, the
+ added indirection invalidates it: if the argument is
+ used at all, then the pointer will necessarily be
+ read. */
+ if (!spec.arg_maybe_read_p (i)
+ && spec.arg_used_p (i))
+ drop_fnspec = true;
+ }
+ }
+ }
+
+ /* ??? Maybe we could adjust it instead. */
+ if (drop_fnspec)
+ remove_named_attribute_unsharing ("fn spec",
+ &TYPE_ATTRIBUTES (nftype));
+
+ TREE_TYPE (nnode->decl) = nftype;
+ }
+
+#if ATTR_FNSPEC_DECONST_WATERMARK
+ {
+ int flags = flags_from_decl_or_type (nnode->decl);
+ tree fnspec = lookup_attribute ("fn spec", TREE_TYPE (nnode->decl));
+
+ if ((flags & (ECF_CONST | ECF_PURE | ECF_NOVOPS)) || fnspec)
+ {
+ size_t xargs = 1 + int (is_stdarg) + int (apply_args);
+ size_t curlen = 0, tgtlen = 2 + 2 * (named_args + xargs);
+ auto_vec<char> nspecv (tgtlen);
+ char *nspec = &nspecv[0]; /* It will *not* be NUL-terminated! */
+ bool no_writes_p = true;
+ if (fnspec)
+ {
+ tree fnspecstr = TREE_VALUE (TREE_VALUE (fnspec));
+ curlen = TREE_STRING_LENGTH (fnspecstr);
+ memcpy (nspec, TREE_STRING_POINTER (fnspecstr), curlen);
+ if (!(flags & (ECF_CONST | ECF_PURE | ECF_NOVOPS))
+ && curlen >= 2
+ && nspec[1] != 'c' && nspec[1] != 'C'
+ && nspec[1] != 'p' && nspec[1] != 'P')
+ no_writes_p = false;
+ }
+ if (!curlen)
+ {
+ nspec[curlen++] = '.';
+ nspec[curlen++] = ((flags & ECF_CONST)
+ ? 'c'
+ : (flags & ECF_PURE)
+ ? 'p'
+ : ' ');
+ }
+ while (curlen < tgtlen - 2 * xargs)
+ {
+ nspec[curlen++] = '.';
+ nspec[curlen++] = ' ';
+ }
+
+ /* These extra args are unlikely to be present in const or pure
+ functions. It's conceivable that a function that takes variable
+ arguments, or that passes its arguments on to another function,
+ could be const or pure, but it would not modify the arguments, and,
+ being pure or const, it couldn't possibly modify or even access
+ memory referenced by them. But it can read from these internal
+ data structures created by the wrapper, and from any
+ argument-passing memory referenced by them, so we denote the
+ possibility of reading from multiple levels of indirection, but
+ only of reading because const/pure. */
+ if (apply_args)
+ {
+ nspec[curlen++] = 'r';
+ nspec[curlen++] = ' ';
+ }
+ if (is_stdarg)
+ {
+ nspec[curlen++] = (no_writes_p ? 'r' : '.');
+ nspec[curlen++] = (no_writes_p ? 't' : ' ');
+ }
+
+ nspec[curlen++] = 'W';
+ nspec[curlen++] = 't';
+
+ /* The type has already been copied before adding parameters. */
+ gcc_checking_assert (TYPE_ARG_TYPES (TREE_TYPE (nnode->decl))
+ != TYPE_ARG_TYPES (TREE_TYPE (onode->decl)));
+ TYPE_ATTRIBUTES (TREE_TYPE (nnode->decl))
+ = tree_cons (get_identifier ("fn spec"),
+ build_tree_list (NULL_TREE,
+ build_string (tgtlen, nspec)),
+ TYPE_ATTRIBUTES (TREE_TYPE (nnode->decl)));
+ }
+ }
+#endif
+
+ {
+ tree decl = onode->decl;
+ cgraph_node *target = nnode;
+
+ { // copied from create_wrapper
+
+ /* Preserve DECL_RESULT so we get right by reference flag. */
+ tree decl_result = DECL_RESULT (decl);
+
+ /* Remove the function's body but keep arguments to be reused
+ for thunk. */
+ onode->release_body (true);
+ onode->reset (/* unlike create_wrapper: preserve_comdat_group = */true);
+
+ DECL_UNINLINABLE (decl) = false;
+ DECL_RESULT (decl) = decl_result;
+ DECL_INITIAL (decl) = NULL;
+ allocate_struct_function (decl, false);
+ set_cfun (NULL);
+
+ /* Turn alias into thunk and expand it into GIMPLE representation. */
+ onode->definition = true;
+
+ thunk_info::get_create (onode);
+ onode->thunk = true;
+ onode->create_edge (target, NULL, onode->count);
+ onode->callees->can_throw_external = !TREE_NOTHROW (target->decl);
+
+ tree arguments = DECL_ARGUMENTS (decl);
+
+ while (arguments)
+ {
+ TREE_ADDRESSABLE (arguments) = false;
+ arguments = TREE_CHAIN (arguments);
+ }
+
+ {
+ tree alias = onode->callees->callee->decl;
+ tree thunk_fndecl = decl;
+ tree a;
+
+ int nxargs = 1 + is_stdarg + apply_args;
+
+ { // Simplified from expand_thunk.
+ tree restype;
+ basic_block bb, then_bb, else_bb, return_bb;
+ gimple_stmt_iterator bsi;
+ int nargs = 0;
+ tree arg;
+ int i;
+ tree resdecl;
+ tree restmp = NULL;
+
+ gcall *call;
+ greturn *ret;
+ bool alias_is_noreturn = TREE_THIS_VOLATILE (alias);
+
+ a = DECL_ARGUMENTS (thunk_fndecl);
+
+ current_function_decl = thunk_fndecl;
+
+ /* Ensure thunks are emitted in their correct sections. */
+ resolve_unique_section (thunk_fndecl, 0,
+ flag_function_sections);
+
+ bitmap_obstack_initialize (NULL);
+
+ /* Build the return declaration for the function. */
+ restype = TREE_TYPE (TREE_TYPE (thunk_fndecl));
+ if (DECL_RESULT (thunk_fndecl) == NULL_TREE)
+ {
+ resdecl = build_decl (input_location, RESULT_DECL, 0, restype);
+ DECL_ARTIFICIAL (resdecl) = 1;
+ DECL_IGNORED_P (resdecl) = 1;
+ DECL_CONTEXT (resdecl) = thunk_fndecl;
+ DECL_RESULT (thunk_fndecl) = resdecl;
+ }
+ else
+ resdecl = DECL_RESULT (thunk_fndecl);
+
+ profile_count cfg_count = onode->count;
+ if (!cfg_count.initialized_p ())
+ cfg_count = profile_count::from_gcov_type (BB_FREQ_MAX).guessed_local ();
+
+ bb = then_bb = else_bb = return_bb
+ = init_lowered_empty_function (thunk_fndecl, true, cfg_count);
+
+ bsi = gsi_start_bb (bb);
+
+ /* Build call to the function being thunked. */
+ if (!VOID_TYPE_P (restype)
+ && (!alias_is_noreturn
+ || TREE_ADDRESSABLE (restype)
+ || TREE_CODE (TYPE_SIZE_UNIT (restype)) != INTEGER_CST))
+ {
+ if (DECL_BY_REFERENCE (resdecl))
+ {
+ restmp = gimple_fold_indirect_ref (resdecl);
+ if (!restmp)
+ restmp = build2 (MEM_REF,
+ TREE_TYPE (TREE_TYPE (resdecl)),
+ resdecl,
+ build_int_cst (TREE_TYPE (resdecl), 0));
+ }
+ else if (!is_gimple_reg_type (restype))
+ {
+ if (aggregate_value_p (resdecl, TREE_TYPE (thunk_fndecl)))
+ {
+ restmp = resdecl;
+
+ if (VAR_P (restmp))
+ {
+ add_local_decl (cfun, restmp);
+ BLOCK_VARS (DECL_INITIAL (current_function_decl))
+ = restmp;
+ }
+ }
+ else
+ restmp = create_tmp_var (restype, "retval");
+ }
+ else
+ restmp = create_tmp_reg (restype, "retval");
+ }
+
+ for (arg = a; arg; arg = DECL_CHAIN (arg))
+ nargs++;
+ auto_vec<tree> vargs (nargs + nxargs);
+ i = 0;
+ arg = a;
+
+ if (nargs)
+ for (tree nparm = DECL_ARGUMENTS (nnode->decl);
+ i < nargs;
+ i++, arg = DECL_CHAIN (arg), nparm = DECL_CHAIN (nparm))
+ {
+ tree save_arg = arg;
+ tree tmp = arg;
+
+ /* Arrange to pass indirectly the parms, if we decided to do
+ so, and revert its type in the wrapper. */
+ if (indirect_nparms.contains (nparm))
+ {
+ tree ref_type = TREE_TYPE (nparm);
+ TREE_ADDRESSABLE (arg) = true;
+ tree addr = build1 (ADDR_EXPR, ref_type, arg);
+ tmp = arg = addr;
+ }
+ else
+ DECL_NOT_GIMPLE_REG_P (arg) = 0;
+
+ /* Convert the argument back to the type used by the calling
+ conventions, e.g. a non-prototyped float type is passed as
+ double, as in 930603-1.c, and needs to be converted back to
+ double to be passed on unchanged to the wrapped
+ function. */
+ if (TREE_TYPE (nparm) != DECL_ARG_TYPE (nparm))
+ arg = fold_convert (DECL_ARG_TYPE (nparm), arg);
+
+ if (!is_gimple_val (arg))
+ {
+ tmp = create_tmp_reg (TYPE_MAIN_VARIANT
+ (TREE_TYPE (arg)), "arg");
+ gimple *stmt = gimple_build_assign (tmp, arg);
+ gsi_insert_after (&bsi, stmt, GSI_NEW_STMT);
+ }
+ vargs.quick_push (tmp);
+ arg = save_arg;
+ }
+ /* These strub arguments are adjusted later. */
+ if (apply_args)
+ vargs.quick_push (null_pointer_node);
+ if (is_stdarg)
+ vargs.quick_push (null_pointer_node);
+ vargs.quick_push (null_pointer_node);
+ call = gimple_build_call_vec (build_fold_addr_expr_loc (0, alias),
+ vargs);
+ onode->callees->call_stmt = call;
+ // gimple_call_set_from_thunk (call, true);
+ if (DECL_STATIC_CHAIN (alias))
+ {
+ tree p = DECL_STRUCT_FUNCTION (alias)->static_chain_decl;
+ tree type = TREE_TYPE (p);
+ tree decl = build_decl (DECL_SOURCE_LOCATION (thunk_fndecl),
+ PARM_DECL, create_tmp_var_name ("CHAIN"),
+ type);
+ DECL_ARTIFICIAL (decl) = 1;
+ DECL_IGNORED_P (decl) = 1;
+ TREE_USED (decl) = 1;
+ DECL_CONTEXT (decl) = thunk_fndecl;
+ DECL_ARG_TYPE (decl) = type;
+ TREE_READONLY (decl) = 1;
+
+ struct function *sf = DECL_STRUCT_FUNCTION (thunk_fndecl);
+ sf->static_chain_decl = decl;
+
+ gimple_call_set_chain (call, decl);
+ }
+
+ /* Return slot optimization is always possible and in fact required to
+ return values with DECL_BY_REFERENCE. */
+ if (aggregate_value_p (resdecl, TREE_TYPE (thunk_fndecl))
+ && (!is_gimple_reg_type (TREE_TYPE (resdecl))
+ || DECL_BY_REFERENCE (resdecl)))
+ gimple_call_set_return_slot_opt (call, true);
+
+ if (restmp)
+ {
+ gimple_call_set_lhs (call, restmp);
+ gcc_assert (useless_type_conversion_p (TREE_TYPE (restmp),
+ TREE_TYPE (TREE_TYPE (alias))));
+ }
+ gsi_insert_after (&bsi, call, GSI_NEW_STMT);
+ if (!alias_is_noreturn)
+ {
+ /* Build return value. */
+ if (!DECL_BY_REFERENCE (resdecl))
+ ret = gimple_build_return (restmp);
+ else
+ ret = gimple_build_return (resdecl);
+
+ gsi_insert_after (&bsi, ret, GSI_NEW_STMT);
+ }
+ else
+ {
+ remove_edge (single_succ_edge (bb));
+ }
+
+ cfun->gimple_df->in_ssa_p = true;
+ update_max_bb_count ();
+ profile_status_for_fn (cfun)
+ = cfg_count.initialized_p () && cfg_count.ipa_p ()
+ ? PROFILE_READ : PROFILE_GUESSED;
+ /* FIXME: C++ FE should stop setting TREE_ASM_WRITTEN on thunks. */
+ // TREE_ASM_WRITTEN (thunk_fndecl) = false;
+ delete_unreachable_blocks ();
+ update_ssa (TODO_update_ssa);
+ checking_verify_flow_info ();
+ free_dominance_info (CDI_DOMINATORS);
+
+ /* Since we want to emit the thunk, we explicitly mark its name as
+ referenced. */
+ onode->thunk = false;
+ onode->lowered = true;
+ bitmap_obstack_release (NULL);
+ }
+ current_function_decl = NULL;
+ set_cfun (NULL);
+ }
+
+ thunk_info::remove (onode);
+
+ // some more of create_wrapper at the end of the next block.
+ }
+ }
+
+ {
+ tree aaval = NULL_TREE;
+ tree vaptr = NULL_TREE;
+ tree wmptr = NULL_TREE;
+ for (tree arg = DECL_ARGUMENTS (nnode->decl); arg; arg = DECL_CHAIN (arg))
+ {
+ aaval = vaptr;
+ vaptr = wmptr;
+ wmptr = arg;
+ }
+
+ if (!apply_args)
+ aaval = NULL_TREE;
+ /* The trailing args are [apply_args], [va_list_ptr], and
+ watermark. If we don't have a va_list_ptr, the penultimate
+ argument is apply_args.
+ */
+ else if (!is_stdarg)
+ aaval = vaptr;
+
+ if (!is_stdarg)
+ vaptr = NULL_TREE;
+
+ DECL_NAME (wmptr) = get_watermark_ptr ();
+ DECL_ARTIFICIAL (wmptr) = 1;
+ DECL_IGNORED_P (wmptr) = 1;
+ TREE_USED (wmptr) = 1;
+
+ if (is_stdarg)
+ {
+ DECL_NAME (vaptr) = get_va_list_ptr ();
+ DECL_ARTIFICIAL (vaptr) = 1;
+ DECL_IGNORED_P (vaptr) = 1;
+ TREE_USED (vaptr) = 1;
+ }
+
+ if (apply_args)
+ {
+ DECL_NAME (aaval) = get_apply_args ();
+ DECL_ARTIFICIAL (aaval) = 1;
+ DECL_IGNORED_P (aaval) = 1;
+ TREE_USED (aaval) = 1;
+ }
+
+ push_cfun (DECL_STRUCT_FUNCTION (nnode->decl));
+
+ {
+ edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ gimple_seq seq = call_update_watermark (wmptr, nnode, e->src->count);
+ gsi_insert_seq_on_edge_immediate (e, seq);
+ }
+
+ bool any_indirect = !indirect_nparms.is_empty ();
+
+ if (any_indirect)
+ {
+ basic_block bb;
+ bool needs_commit = false;
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ for (gphi_iterator gsi = gsi_start_nonvirtual_phis (bb);
+ !gsi_end_p (gsi);
+ gsi_next_nonvirtual_phi (&gsi))
+ {
+ gphi *stmt = gsi.phi ();
+
+ walk_stmt_info wi = {};
+ wi.info = &indirect_nparms;
+ walk_gimple_op (stmt, walk_make_indirect, &wi);
+ if (wi.changed && !is_gimple_debug (gsi_stmt (gsi)))
+ if (walk_regimplify_phi (stmt))
+ needs_commit = true;
+ }
+
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+
+ walk_stmt_info wi = {};
+ wi.info = &indirect_nparms;
+ walk_gimple_op (stmt, walk_make_indirect, &wi);
+ if (wi.changed)
+ {
+ if (!is_gimple_debug (stmt))
+ {
+ wi.info = &gsi;
+ walk_gimple_op (stmt, walk_regimplify_addr_expr,
+ &wi);
+ }
+ update_stmt (stmt);
+ }
+ }
+ }
+ if (needs_commit)
+ gsi_commit_edge_inserts ();
+ }
+
+ if (DECL_STRUCT_FUNCTION (nnode->decl)->calls_alloca
+ || is_stdarg || apply_args)
+ for (cgraph_edge *e = nnode->callees, *enext; e; e = enext)
+ {
+ if (!e->call_stmt)
+ continue;
+
+ gcall *call = e->call_stmt;
+ gimple_stmt_iterator gsi = gsi_for_stmt (call);
+ tree fndecl = e->callee->decl;
+
+ enext = e->next_callee;
+
+ if (gimple_alloca_call_p (call))
+ {
+ gimple_seq seq = call_update_watermark (wmptr, NULL,
+ gsi_bb (gsi)->count);
+ gsi_insert_finally_seq_after_call (gsi, seq);
+ }
+ else if (fndecl && is_stdarg
+ && fndecl_built_in_p (fndecl, BUILT_IN_VA_START))
+ {
+ /* Using a non-default stdarg ABI makes the function ineligible
+ for internal strub. */
+ gcc_checking_assert (builtin_decl_explicit (BUILT_IN_VA_START)
+ == fndecl);
+ tree bvacopy = builtin_decl_explicit (BUILT_IN_VA_COPY);
+ gimple_call_set_fndecl (call, bvacopy);
+ tree arg = vaptr;
+ /* The va_copy source must be dereferenced, unless it's an array
+ type, that would have decayed to a pointer. */
+ if (TREE_CODE (TREE_TYPE (TREE_TYPE (vaptr))) != ARRAY_TYPE)
+ {
+ arg = gimple_fold_indirect_ref (vaptr);
+ if (!arg)
+ arg = build2 (MEM_REF,
+ TREE_TYPE (TREE_TYPE (vaptr)),
+ vaptr,
+ build_int_cst (TREE_TYPE (vaptr), 0));
+ if (!is_gimple_val (arg))
+ arg = force_gimple_operand_gsi (&gsi, arg, true,
+ NULL_TREE, true, GSI_SAME_STMT);
+ }
+ gimple_call_set_arg (call, 1, arg);
+ update_stmt (call);
+ e->redirect_callee (cgraph_node::get_create (bvacopy));
+ }
+ else if (fndecl && apply_args
+ && fndecl_built_in_p (fndecl, BUILT_IN_APPLY_ARGS))
+ {
+ tree lhs = gimple_call_lhs (call);
+ gimple *assign = (lhs
+ ? gimple_build_assign (lhs, aaval)
+ : gimple_build_nop ());
+ gsi_replace (&gsi, assign, true);
+ cgraph_edge::remove (e);
+ }
+ }
+
+ { // a little more copied from create_wrapper
+
+ /* Inline summary set-up. */
+ nnode->analyze ();
+ // inline_analyze_function (nnode);
+ }
+
+ pop_cfun ();
+ }
+
+ {
+ push_cfun (DECL_STRUCT_FUNCTION (onode->decl));
+ gimple_stmt_iterator gsi
+ = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+
+ gcall *wrcall;
+ while (!(wrcall = dyn_cast <gcall *> (gsi_stmt (gsi))))
+ gsi_next (&gsi);
+
+ tree swm = create_tmp_var (get_wmt (), ".strub.watermark");
+ TREE_ADDRESSABLE (swm) = true;
+ tree swmp = build1 (ADDR_EXPR, get_pwmt (), swm);
+
+ tree enter = get_enter ();
+ gcall *stptr = gimple_build_call (enter, 1, unshare_expr (swmp));
+ gimple_set_location (stptr, gimple_location (wrcall));
+ gsi_insert_before (&gsi, stptr, GSI_SAME_STMT);
+ onode->create_edge (cgraph_node::get_create (enter),
+ stptr, gsi_bb (gsi)->count, false);
+
+ int nargs = gimple_call_num_args (wrcall);
+
+ gimple_seq seq = NULL;
+
+ if (apply_args)
+ {
+ tree aalst = create_tmp_var (ptr_type_node, ".strub.apply_args");
+ tree bappargs = builtin_decl_explicit (BUILT_IN_APPLY_ARGS);
+ gcall *appargs = gimple_build_call (bappargs, 0);
+ gimple_call_set_lhs (appargs, aalst);
+ gimple_set_location (appargs, gimple_location (wrcall));
+ gsi_insert_before (&gsi, appargs, GSI_SAME_STMT);
+ gimple_call_set_arg (wrcall, nargs - 2 - is_stdarg, aalst);
+ onode->create_edge (cgraph_node::get_create (bappargs),
+ appargs, gsi_bb (gsi)->count, false);
+ }
+
+ if (is_stdarg)
+ {
+ tree valst = create_tmp_var (va_list_type_node, ".strub.va_list");
+ TREE_ADDRESSABLE (valst) = true;
+ tree vaptr = build1 (ADDR_EXPR,
+ build_pointer_type (va_list_type_node),
+ valst);
+ gimple_call_set_arg (wrcall, nargs - 2, unshare_expr (vaptr));
+
+ tree bvastart = builtin_decl_explicit (BUILT_IN_VA_START);
+ gcall *vastart = gimple_build_call (bvastart, 2,
+ unshare_expr (vaptr),
+ integer_zero_node);
+ gimple_set_location (vastart, gimple_location (wrcall));
+ gsi_insert_before (&gsi, vastart, GSI_SAME_STMT);
+ onode->create_edge (cgraph_node::get_create (bvastart),
+ vastart, gsi_bb (gsi)->count, false);
+
+ tree bvaend = builtin_decl_explicit (BUILT_IN_VA_END);
+ gcall *vaend = gimple_build_call (bvaend, 1, unshare_expr (vaptr));
+ gimple_set_location (vaend, gimple_location (wrcall));
+ gimple_seq_add_stmt (&seq, vaend);
+ }
+
+ gimple_call_set_arg (wrcall, nargs - 1, unshare_expr (swmp));
+ // gimple_call_set_tail (wrcall, false);
+ update_stmt (wrcall);
+
+ {
+#if !ATTR_FNSPEC_DECONST_WATERMARK
+ /* If the call will be assumed to not modify or even read the
+ watermark, make it read and modified ourselves. */
+ if ((gimple_call_flags (wrcall)
+ & (ECF_CONST | ECF_PURE | ECF_NOVOPS)))
+ {
+ vec<tree, va_gc> *inputs = NULL;
+ vec<tree, va_gc> *outputs = NULL;
+ vec_safe_push (outputs,
+ build_tree_list
+ (build_tree_list
+ (NULL_TREE, build_string (2, "=m")),
+ swm));
+ vec_safe_push (inputs,
+ build_tree_list
+ (build_tree_list
+ (NULL_TREE, build_string (1, "m")),
+ swm));
+ gasm *forcemod = gimple_build_asm_vec ("", inputs, outputs,
+ NULL, NULL);
+ gimple_seq_add_stmt (&seq, forcemod);
+
+ /* If the call will be assumed to not even read the watermark,
+ make sure it is already in memory before the call. */
+ if ((gimple_call_flags (wrcall) & ECF_CONST))
+ {
+ vec<tree, va_gc> *inputs = NULL;
+ vec_safe_push (inputs,
+ build_tree_list
+ (build_tree_list
+ (NULL_TREE, build_string (1, "m")),
+ swm));
+ gasm *force_store = gimple_build_asm_vec ("", inputs, NULL,
+ NULL, NULL);
+ gimple_set_location (force_store, gimple_location (wrcall));
+ gsi_insert_before (&gsi, force_store, GSI_SAME_STMT);
+ }
+ }
+#endif
+
+ gcall *sleave = gimple_build_call (get_leave (), 1,
+ unshare_expr (swmp));
+ gimple_seq_add_stmt (&seq, sleave);
+
+ gassign *clobber = gimple_build_assign (swm,
+ build_clobber
+ (TREE_TYPE (swm)));
+ gimple_seq_add_stmt (&seq, clobber);
+ }
+
+ gsi_insert_finally_seq_after_call (gsi, seq);
+
+ /* For nnode, we don't rebuild edges because we wish to retain
+ any redirections copied to it from earlier passes, so we add
+ call graph edges explicitly there, but for onode, we create a
+ fresh function, so we may as well just issue the calls and
+ then rebuild all cgraph edges. */
+ // cgraph_edge::rebuild_edges ();
+ onode->analyze ();
+ // inline_analyze_function (onode);
+
+ pop_cfun ();
+ }
+ }
+
+ return 0;
+}
+
+simple_ipa_opt_pass *
+make_pass_ipa_strub (gcc::context *ctxt)
+{
+ return new pass_ipa_strub (ctxt);
+}
+
+#include "gt-ipa-strub.h"
diff --git a/gcc/ipa-strub.h b/gcc/ipa-strub.h
new file mode 100644
index 0000000..f367a4a
--- /dev/null
+++ b/gcc/ipa-strub.h
@@ -0,0 +1,45 @@
+/* strub (stack scrubbing) infrastructure.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ Contributed by Alexandre Oliva <oliva@adacore.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Return TRUE if CALLEE can be inlined into CALLER, as far as stack scrubbing
+ constraints are concerned. CALLEE doesn't have to be called directly by
+ CALLER, but the returned value says nothing about intervening functions. */
+extern bool strub_inlinable_to_p (cgraph_node *callee, cgraph_node *caller);
+
+/* Return FALSE if NODE is a strub context, and TRUE otherwise. */
+extern bool strub_splittable_p (cgraph_node *node);
+
+/* Locate and return the watermark_ptr parameter for FNDECL. If FNDECL is not a
+ strub context, return NULL. */
+extern tree strub_watermark_parm (tree fndecl);
+
+/* Make a function type or declaration callable. */
+extern void strub_make_callable (tree fndecl);
+
+/* Return zero iff ID is NOT an acceptable parameter for a user-supplied strub
+ attribute for a function. Otherwise, return >0 if it enables strub, <0 if it
+ does not. Return +/-1 if the attribute-modified type is compatible with the
+ type without the attribute, or +/-2 if it is not compatible. */
+extern int strub_validate_fn_attr_parm (tree id);
+
+/* Like comptypes, return 0 if t1 and t2 are not compatible, 1 if they are
+ compatible, and 2 if they are nearly compatible. Same strub mode is
+ compatible, interface-compatible strub modes are nearly compatible. */
+extern int strub_comptypes (tree t1, tree t2);
diff --git a/gcc/ira.cc b/gcc/ira.cc
index d7530f0..b5c4c0e 100644
--- a/gcc/ira.cc
+++ b/gcc/ira.cc
@@ -5970,7 +5970,7 @@ do_reload (void)
ira_destroy ();
- lra (ira_dump_file);
+ lra (ira_dump_file, internal_flag_ira_verbose);
/* ???!!! Move it before lra () when we use ira_reg_equiv in
LRA. */
vec_free (reg_equivs);
diff --git a/gcc/jit/ChangeLog b/gcc/jit/ChangeLog
index 4435195..11a0cb1 100644
--- a/gcc/jit/ChangeLog
+++ b/gcc/jit/ChangeLog
@@ -1,3 +1,22 @@
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * dummy-frontend.cc (jit_gnu_attribute_table): Add extra braces
+ to work around PR 16333 in older compilers.
+ (jit_format_attribute_table): Likewise.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * dummy-frontend.cc (jit_format_attribute_table): Change type to
+ scoped_attribute_specs, using...
+ (jit_format_attributes): ...this as the underlying array.
+ (jit_attribute_table): Change to an array of scoped_attribute_specs
+ pointers, using...
+ (jit_gnu_attributes, jit_gnu_attribute_table): ...these new globals
+ for the original array. Include the format attributes.
+ (LANG_HOOKS_COMMON_ATTRIBUTE_TABLE): Delete.
+ (LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE): Delete.
+ (LANG_HOOKS_ATTRIBUTE_TABLE): Define.
+
2023-11-09 Guillaume Gomez <guillaume1.gomez@gmail.com>
* libgccjit++.h:
diff --git a/gcc/jit/dummy-frontend.cc b/gcc/jit/dummy-frontend.cc
index a729086..1ea6ad3 100644
--- a/gcc/jit/dummy-frontend.cc
+++ b/gcc/jit/dummy-frontend.cc
@@ -87,7 +87,7 @@ static const struct attribute_spec::exclusions attr_const_pure_exclusions[] =
};
/* Table of machine-independent attributes supported in libgccjit. */
-const struct attribute_spec jit_attribute_table[] =
+static const attribute_spec jit_gnu_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -128,22 +128,36 @@ const struct attribute_spec jit_attribute_table[] =
/* For internal use only. The leading '*' both prevents its usage in
source code and signals that it may be overridden by machine tables. */
{ "*tm regparm", 0, 0, false, true, true, false,
- ignore_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+ ignore_attribute, NULL }
+};
+
+static const scoped_attribute_specs jit_gnu_attribute_table =
+{
+ "gnu", { jit_gnu_attributes }
};
/* Give the specifications for the format attributes, used by C and all
descendants. */
-const struct attribute_spec jit_format_attribute_table[] =
+static const attribute_spec jit_format_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
{ "format", 3, 3, false, true, true, false,
handle_format_attribute, NULL },
{ "format_arg", 1, 1, false, true, true, false,
- handle_format_arg_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+ handle_format_arg_attribute, NULL }
+};
+
+static const scoped_attribute_specs jit_format_attribute_table =
+{
+ "gnu", { jit_format_attributes }
+};
+
+static const scoped_attribute_specs *const jit_attribute_table[] =
+{
+ &jit_gnu_attribute_table,
+ &jit_format_attribute_table
};
/* Attribute handlers. */
@@ -719,10 +733,8 @@ jit_langhook_getdecls (void)
#define LANG_HOOKS_GETDECLS jit_langhook_getdecls
/* Attribute hooks. */
-#undef LANG_HOOKS_COMMON_ATTRIBUTE_TABLE
-#define LANG_HOOKS_COMMON_ATTRIBUTE_TABLE jit_attribute_table
-#undef LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE
-#define LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE jit_format_attribute_table
+#undef LANG_HOOKS_ATTRIBUTE_TABLE
+#define LANG_HOOKS_ATTRIBUTE_TABLE jit_attribute_table
#undef LANG_HOOKS_DEEP_UNSHARING
#define LANG_HOOKS_DEEP_UNSHARING true
diff --git a/gcc/langhooks-def.h b/gcc/langhooks-def.h
index c6d1852..042fd01 100644
--- a/gcc/langhooks-def.h
+++ b/gcc/langhooks-def.h
@@ -153,9 +153,7 @@ extern const char *lhd_get_sarif_source_language (const char *);
#define LANG_HOOKS_GET_SARIF_SOURCE_LANGUAGE lhd_get_sarif_source_language
/* Attribute hooks. */
-#define LANG_HOOKS_ATTRIBUTE_TABLE NULL
-#define LANG_HOOKS_COMMON_ATTRIBUTE_TABLE NULL
-#define LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE NULL
+#define LANG_HOOKS_ATTRIBUTE_TABLE
/* Tree inlining hooks. */
#define LANG_HOOKS_TREE_INLINING_VAR_MOD_TYPE_P \
@@ -366,9 +364,7 @@ extern void lhd_end_section (void);
LANG_HOOKS_TYPES_COMPATIBLE_P, \
LANG_HOOKS_PRINT_ERROR_FUNCTION, \
LANG_HOOKS_TO_TARGET_CHARSET, \
- LANG_HOOKS_ATTRIBUTE_TABLE, \
- LANG_HOOKS_COMMON_ATTRIBUTE_TABLE, \
- LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE, \
+ { LANG_HOOKS_ATTRIBUTE_TABLE }, \
LANG_HOOKS_TREE_INLINING_INITIALIZER, \
LANG_HOOKS_TREE_DUMP_INITIALIZER, \
LANG_HOOKS_DECLS, \
diff --git a/gcc/langhooks.h b/gcc/langhooks.h
index cca7528..2785a00 100644
--- a/gcc/langhooks.h
+++ b/gcc/langhooks.h
@@ -532,9 +532,7 @@ struct lang_hooks
table of attributes specific to the language, a table of
attributes common to two or more languages (to allow easy
sharing), and a table of attributes for checking formats. */
- const struct attribute_spec *attribute_table;
- const struct attribute_spec *common_attribute_table;
- const struct attribute_spec *format_attribute_table;
+ array_slice<const struct scoped_attribute_specs *const> attribute_table;
struct lang_hooks_for_tree_inlining tree_inlining;
diff --git a/gcc/lra-assigns.cc b/gcc/lra-assigns.cc
index d2ebcfd..7aa210e 100644
--- a/gcc/lra-assigns.cc
+++ b/gcc/lra-assigns.cc
@@ -1835,6 +1835,7 @@ lra_split_hard_reg_for (void)
if (spill_p)
{
bitmap_clear (&failed_reload_pseudos);
+ lra_dump_insns_if_possible ("changed func after splitting hard regs");
return true;
}
bitmap_clear (&non_reload_pseudos);
diff --git a/gcc/lra-coalesce.cc b/gcc/lra-coalesce.cc
index 04a5bbd..d8ca096 100644
--- a/gcc/lra-coalesce.cc
+++ b/gcc/lra-coalesce.cc
@@ -112,9 +112,7 @@ merge_pseudos (int regno1, int regno2)
= (lra_merge_live_ranges
(lra_reg_info[first].live_ranges,
lra_copy_live_range_list (lra_reg_info[first2].live_ranges)));
- if (partial_subreg_p (lra_reg_info[first].biggest_mode,
- lra_reg_info[first2].biggest_mode))
- lra_reg_info[first].biggest_mode = lra_reg_info[first2].biggest_mode;
+ lra_update_biggest_mode (first, lra_reg_info[first2].biggest_mode);
}
/* Change pseudos in *LOC on their coalescing group
diff --git a/gcc/lra-constraints.cc b/gcc/lra-constraints.cc
index 9b6a2af..177c765 100644
--- a/gcc/lra-constraints.cc
+++ b/gcc/lra-constraints.cc
@@ -5537,6 +5537,8 @@ lra_constraints (bool first_p)
lra_assert (df_regs_ever_live_p (hard_regno + j));
}
}
+ if (changed_p)
+ lra_dump_insns_if_possible ("changed func after local");
return changed_p;
}
@@ -7277,7 +7279,7 @@ lra_inheritance (void)
bitmap_release (&invalid_invariant_regs);
bitmap_release (&check_only_regs);
free (usage_insns);
-
+ lra_dump_insns_if_possible ("func after inheritance");
timevar_pop (TV_LRA_INHERITANCE);
}
@@ -7477,13 +7479,16 @@ remove_inheritance_pseudos (bitmap remove_pseudos)
== get_regno (lra_reg_info[prev_sregno].restore_rtx))))
&& ! bitmap_bit_p (remove_pseudos, prev_sregno))
{
+ int restore_regno = get_regno (lra_reg_info[sregno].restore_rtx);
+ if (restore_regno < 0)
+ restore_regno = prev_sregno;
lra_assert (GET_MODE (SET_SRC (prev_set))
- == GET_MODE (regno_reg_rtx[sregno]));
+ == GET_MODE (regno_reg_rtx[restore_regno]));
/* Although we have a single set, the insn can
contain more one sregno register occurrence
as a source. Change all occurrences. */
lra_substitute_pseudo_within_insn (curr_insn, sregno,
- SET_SRC (prev_set),
+ regno_reg_rtx[restore_regno],
false);
/* As we are finishing with processing the insn
here, check the destination too as it might
@@ -7745,5 +7750,7 @@ lra_undo_inheritance (void)
EXECUTE_IF_SET_IN_BITMAP (&lra_split_regs, 0, regno, bi)
lra_reg_info[regno].restore_rtx = NULL_RTX;
change_p = undo_optional_reloads () || change_p;
+ if (change_p)
+ lra_dump_insns_if_possible ("changed func after undoing inheritance");
return change_p;
}
diff --git a/gcc/lra-int.h b/gcc/lra-int.h
index d0752c2..5cdf92b 100644
--- a/gcc/lra-int.h
+++ b/gcc/lra-int.h
@@ -278,6 +278,7 @@ typedef class lra_insn_recog_data *lra_insn_recog_data_t;
/* lra.cc: */
extern FILE *lra_dump_file;
+extern int lra_verbose;
extern bool lra_hard_reg_split_p;
extern bool lra_asm_error_p;
@@ -312,6 +313,9 @@ extern void lra_emit_move (rtx, rtx);
extern void lra_update_dups (lra_insn_recog_data_t, signed char *);
extern void lra_asm_insn_error (rtx_insn *insn);
+extern void lra_dump_insns (FILE *f);
+extern void lra_dump_insns_if_possible (const char *title);
+
extern void lra_process_new_insns (rtx_insn *, rtx_insn *, rtx_insn *,
const char *);
@@ -531,4 +535,19 @@ lra_assign_reg_val (int from, int to)
lra_reg_info[to].offset = lra_reg_info[from].offset;
}
+/* Update REGNO's biggest recorded mode so that it includes a reference
+ in mode MODE. */
+inline void
+lra_update_biggest_mode (int regno, machine_mode mode)
+{
+ if (!ordered_p (GET_MODE_SIZE (lra_reg_info[regno].biggest_mode),
+ GET_MODE_SIZE (mode)))
+ {
+ gcc_checking_assert (HARD_REGISTER_NUM_P (regno));
+ lra_reg_info[regno].biggest_mode = reg_raw_mode[regno];
+ }
+ else if (partial_subreg_p (lra_reg_info[regno].biggest_mode, mode))
+ lra_reg_info[regno].biggest_mode = mode;
+}
+
#endif /* GCC_LRA_INT_H */
diff --git a/gcc/lra-lives.cc b/gcc/lra-lives.cc
index f60e564..0b20423 100644
--- a/gcc/lra-lives.cc
+++ b/gcc/lra-lives.cc
@@ -770,9 +770,7 @@ process_bb_lives (basic_block bb, int &curr_point, bool dead_insn_p)
{
int regno = reg->regno;
- if (partial_subreg_p (lra_reg_info[regno].biggest_mode,
- reg->biggest_mode))
- lra_reg_info[regno].biggest_mode = reg->biggest_mode;
+ lra_update_biggest_mode (regno, reg->biggest_mode);
if (HARD_REGISTER_NUM_P (regno))
lra_hard_reg_usage[regno] += freq;
}
diff --git a/gcc/lra-remat.cc b/gcc/lra-remat.cc
index 681dcf3..db76e95 100644
--- a/gcc/lra-remat.cc
+++ b/gcc/lra-remat.cc
@@ -1331,6 +1331,8 @@ lra_remat (void)
calculate_global_remat_bb_data ();
dump_candidates_and_remat_bb_data ();
result = do_remat ();
+ if (result)
+ lra_dump_insns_if_possible ("changed func after rematerialization");
all_cands.release ();
bitmap_clear (&temp_bitmap);
bitmap_clear (&subreg_regs);
diff --git a/gcc/lra.cc b/gcc/lra.cc
index bcc00ff..29e2a35 100644
--- a/gcc/lra.cc
+++ b/gcc/lra.cc
@@ -581,9 +581,8 @@ new_insn_reg (rtx_insn *insn, int regno, enum op_type type,
lra_insn_reg *ir = lra_insn_reg_pool.allocate ();
ir->type = type;
ir->biggest_mode = mode;
- if (NONDEBUG_INSN_P (insn)
- && partial_subreg_p (lra_reg_info[regno].biggest_mode, mode))
- lra_reg_info[regno].biggest_mode = mode;
+ if (NONDEBUG_INSN_P (insn))
+ lra_update_biggest_mode (regno, mode);
ir->subreg_p = subreg_p;
ir->early_clobber_alts = early_clobber_alts;
ir->regno = regno;
@@ -1879,6 +1878,24 @@ setup_sp_offset (rtx_insn *from, rtx_insn *last)
return offset;
}
+/* Dump all func insns in a slim form. */
+void
+lra_dump_insns (FILE *f)
+{
+ dump_rtl_slim (f, get_insns (), NULL, -1, 0);
+}
+
+/* Dump all func insns in a slim form with TITLE when the dump file is open and
+ lra_verbose >=7. */
+void
+lra_dump_insns_if_possible (const char *title)
+{
+ if (lra_dump_file == NULL || lra_verbose < 7)
+ return;
+ fprintf (lra_dump_file, "%s:", title);
+ lra_dump_insns (lra_dump_file);
+}
+
/* Emit insns BEFORE before INSN and insns AFTER after INSN. Put the
insns onto the stack. Print about emitting the insns with
TITLE. */
@@ -2297,6 +2314,9 @@ bitmap_head lra_subreg_reload_pseudos;
/* File used for output of LRA debug information. */
FILE *lra_dump_file;
+/* How verbose should be the debug information. */
+int lra_verbose;
+
/* True if we split hard reg after the last constraint sub-pass. */
bool lra_hard_reg_split_p;
@@ -2332,14 +2352,15 @@ setup_reg_spill_flag (void)
bool lra_simple_p;
/* Major LRA entry function. F is a file should be used to dump LRA
- debug info. */
+ debug info with given verbosity. */
void
-lra (FILE *f)
+lra (FILE *f, int verbose)
{
int i;
bool live_p, inserted_p;
lra_dump_file = f;
+ lra_verbose = verbose;
lra_asm_error_p = false;
lra_pmode_pseudo = gen_reg_rtx (Pmode);
diff --git a/gcc/lra.h b/gcc/lra.h
index 85dbf92..4c4c4cc 100644
--- a/gcc/lra.h
+++ b/gcc/lra.h
@@ -35,7 +35,7 @@ lra_get_allocno_class (int regno)
}
extern rtx lra_eliminate_regs (rtx, machine_mode, rtx);
-extern void lra (FILE *);
+extern void lra (FILE *, int);
extern void lra_init_once (void);
extern void lra_finish_once (void);
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index 4923319..71727d0 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,3 +1,22 @@
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * lto-lang.cc (lto_gnu_attribute_table): Add extra braces to work
+ around PR 16333 in older compilers.
+ (lto_format_attribute_table): Likewise.
+
+2023-12-02 Richard Sandiford <richard.sandiford@arm.com>
+
+ * lto-lang.cc (lto_format_attribute_table): Change type to
+ scoped_attribute_specs, using...
+ (lto_format_attributes): ...this as the underlying array.
+ (lto_attribute_table): Change to an array of scoped_attribute_specs
+ pointers, using...
+ (lto_gnu_attributes, lto_gnu_attribute_table): ...these new globals
+ for the original array. Include the format attributes.
+ (LANG_HOOKS_COMMON_ATTRIBUTE_TABLE): Delete.
+ (LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE): Delete.
+ (LANG_HOOKS_ATTRIBUTE_TABLE): Define.
+
2023-11-07 Joseph Myers <joseph@codesourcery.com>
* lto-lang.cc (flag_isoc2x): Rename to flag_isoc23.
diff --git a/gcc/lto/lto-lang.cc b/gcc/lto/lto-lang.cc
index 00bd3de..62aaa9b 100644
--- a/gcc/lto/lto-lang.cc
+++ b/gcc/lto/lto-lang.cc
@@ -94,7 +94,7 @@ static const struct attribute_spec::exclusions attr_const_pure_exclusions[] =
};
/* Table of machine-independent attributes supported in GIMPLE. */
-const struct attribute_spec lto_attribute_table[] =
+static const attribute_spec lto_gnu_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -135,14 +135,18 @@ const struct attribute_spec lto_attribute_table[] =
/* For internal use only. The leading '*' both prevents its usage in
source code and signals that it may be overridden by machine tables. */
{ "*tm regparm", 0, 0, false, true, true, false,
- ignore_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+ ignore_attribute, NULL }
+};
+
+static const scoped_attribute_specs lto_gnu_attribute_table =
+{
+ "gnu", { lto_gnu_attributes }
};
/* Give the specifications for the format attributes, used by C and all
descendants. */
-const struct attribute_spec lto_format_attribute_table[] =
+static const attribute_spec lto_format_attributes[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req,
affects_type_identity, handler, exclude } */
@@ -150,7 +154,17 @@ const struct attribute_spec lto_format_attribute_table[] =
handle_format_attribute, NULL },
{ "format_arg", 1, 1, false, true, true, false,
handle_format_arg_attribute, NULL },
- { NULL, 0, 0, false, false, false, false, NULL, NULL }
+};
+
+static const scoped_attribute_specs lto_format_attribute_table =
+{
+ "gnu", { lto_format_attributes }
+};
+
+static const scoped_attribute_specs *const lto_attribute_table[] =
+{
+ &lto_gnu_attribute_table,
+ &lto_format_attribute_table
};
enum built_in_attribute
@@ -1463,10 +1477,8 @@ static void lto_init_ts (void)
#define LANG_HOOKS_EH_PERSONALITY lto_eh_personality
/* Attribute hooks. */
-#undef LANG_HOOKS_COMMON_ATTRIBUTE_TABLE
-#define LANG_HOOKS_COMMON_ATTRIBUTE_TABLE lto_attribute_table
-#undef LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE
-#define LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE lto_format_attribute_table
+#undef LANG_HOOKS_ATTRIBUTE_TABLE
+#define LANG_HOOKS_ATTRIBUTE_TABLE lto_attribute_table
#undef LANG_HOOKS_BEGIN_SECTION
#define LANG_HOOKS_BEGIN_SECTION lto_obj_begin_section
diff --git a/gcc/m2/ChangeLog b/gcc/m2/ChangeLog
index 6c0b328..a1b3392 100644
--- a/gcc/m2/ChangeLog
+++ b/gcc/m2/ChangeLog
@@ -1,3 +1,23 @@
+2023-12-05 Gaius Mulley <gaiusmod2@gmail.com>
+
+ PR modula2/112865
+ * gm2-compiler/M2Quads.mod (BuildReFunction): Use
+ GetDType to retrieve the type of the operand when
+ converting the complex type to its scalar equivalent.
+ (BuildImFunction): Use GetDType to retrieve the type of the
+ operand when converting the complex type to its scalar
+ equivalent.
+
+2023-12-05 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * mc/mc.flex [__GNUC__]: Define alloca as __builtin_alloca.
+ (handleDate): Use strchr instead of index.
+
+2023-12-04 Gaius Mulley <gaiusmod2@gmail.com>
+
+ PR modula2/112825
+ * tools-src/makeSystem: Change all occurrences of -c to -S.
+
2023-11-01 Gaius Mulley <gaiusmod2@gmail.com>
PR modula2/102989
diff --git a/gcc/m2/gm2-compiler/M2Quads.mod b/gcc/m2/gm2-compiler/M2Quads.mod
index 02a7db4..83c9b99 100644
--- a/gcc/m2/gm2-compiler/M2Quads.mod
+++ b/gcc/m2/gm2-compiler/M2Quads.mod
@@ -9845,7 +9845,7 @@ BEGIN
IF IsVar(Var) OR IsConst(Var)
THEN
ReturnVar := MakeTemporary (combinedtok, AreConstant (IsConst (Var))) ;
- PutVar (ReturnVar, ComplexToScalar (GetSType (Var))) ;
+ PutVar (ReturnVar, ComplexToScalar (GetDType (Var))) ;
GenQuadO (combinedtok, StandardFunctionOp, ReturnVar, Re, Var, FALSE) ;
PopN (NoOfParam+1) ; (* destroy arguments to this function *)
PushTFtok (ReturnVar, GetSType (ReturnVar), combinedtok)
@@ -9913,7 +9913,7 @@ BEGIN
IF IsVar(Var) OR IsConst(Var)
THEN
ReturnVar := MakeTemporary (combinedtok, AreConstant (IsConst (Var))) ;
- PutVar (ReturnVar, ComplexToScalar (GetSType (Var))) ;
+ PutVar (ReturnVar, ComplexToScalar (GetDType (Var))) ;
GenQuadO (combinedtok, StandardFunctionOp, ReturnVar, Im, Var, FALSE) ;
PopN (NoOfParam+1) ; (* destroy arguments to this function *)
PushTFtok (ReturnVar, GetSType (ReturnVar), combinedtok)
diff --git a/gcc/m2/lang.opt b/gcc/m2/lang.opt
index 24f3c65..a60c03e 100644
--- a/gcc/m2/lang.opt
+++ b/gcc/m2/lang.opt
@@ -405,6 +405,10 @@ iquote
Modula-2
; Documented in c.opt
+isysroot
+Modula-2
+; Documented in c.opt
+
isystem
Modula-2
; Documented in c.opt
diff --git a/gcc/m2/mc/mc.flex b/gcc/m2/mc/mc.flex
index 4b37755..8ba4e24 100644
--- a/gcc/m2/mc/mc.flex
+++ b/gcc/m2/mc/mc.flex
@@ -28,6 +28,10 @@ along with GNU Modula-2; see the file COPYING3. If not see
#include <time.h>
#include <ctype.h>
+#ifdef __GNUC__
+#define alloca __builtin_alloca
+#endif
+
#if !defined(TRUE)
# define TRUE (1==1)
#endif
@@ -329,7 +333,7 @@ handleDate (void)
time_t clock = time ((long *)0);
char *sdate = ctime (&clock);
char *s = (char *)alloca (strlen (sdate)+2+1);
- char *p = index(sdate, '\n');
+ char *p = strchr(sdate, '\n');
if (p != NULL) {
*p = (char) 0;
diff --git a/gcc/m2/tools-src/makeSystem b/gcc/m2/tools-src/makeSystem
index b1156b5..5f162da 100644
--- a/gcc/m2/tools-src/makeSystem
+++ b/gcc/m2/tools-src/makeSystem
@@ -102,8 +102,8 @@ MINIMAL="-fno-scaffold-main -fno-scaffold-dynamic -fno-scaffold-static -fno-m2-p
rm -f ${OUTPUTFILE}
if ${COMPILER} ${DIALECT} ${LIBRARY} ${MINIMAL} \
- -c -fdump-system-exports ${SYSTEMMOD} -o /dev/null 2>&1 > /dev/null ; then
- types=`${COMPILER} ${DIALECT} ${LIBRARY} ${MINIMAL} -fno-m2-plugin -c -fdump-system-exports ${SYSTEMMOD} -o /dev/null | cut -f5 -d' '`
+ -S -fdump-system-exports ${SYSTEMMOD} -o /dev/null 2>&1 > /dev/null ; then
+ types=`${COMPILER} ${DIALECT} ${LIBRARY} ${MINIMAL} -fno-m2-plugin -S -fdump-system-exports ${SYSTEMMOD} -o /dev/null | cut -f5 -d' '`
touch ${OUTPUTFILE}
displayStart
displayExportedTypes
@@ -112,6 +112,6 @@ if ${COMPILER} ${DIALECT} ${LIBRARY} ${MINIMAL} \
displayEnd
else
${COMPILER} ${DIALECT} ${LIBRARY} ${MINIMAL} \
- -c -fdump-system-exports ${SYSTEMMOD} -o /dev/null
+ -S -fdump-system-exports ${SYSTEMMOD} -o /dev/null
exit $?
fi
diff --git a/gcc/match.pd b/gcc/match.pd
index 95225e4..4d554ba 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -1033,12 +1033,16 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* (nop_outer_cast)-(inner_cast)var -> -(outer_cast)(var)
if var is smaller in precision.
This is always safe for both doing the negative in signed or unsigned
- as the value for undefined will not show up. */
+ as the value for undefined will not show up.
+ Note the outer cast cannot be a boolean type as the only valid values
+ are 0,-1/1 (depending on the signedness of the boolean) and the negative
+ is there to get the correct value. */
(simplify
(convert (negate:s@1 (convert:s @0)))
(if (INTEGRAL_TYPE_P (type)
&& tree_nop_conversion_p (type, TREE_TYPE (@1))
- && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0)))
+ && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
+ && TREE_CODE (type) != BOOLEAN_TYPE)
(negate (convert @0))))
(for op (negate abs)
@@ -2243,6 +2247,9 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
&& (TYPE_UNSIGNED (TREE_TYPE (@1))
|| TYPE_PRECISION (TREE_TYPE (@1)) > 1)
+ && INTEGRAL_TYPE_P (type)
+ && (TYPE_UNSIGNED (type)
+ || TYPE_PRECISION (type) > 1)
&& wi::leu_p (tree_nonzero_bits (@1), 1))))
/* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 }. */
@@ -8732,8 +8739,13 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(plus (POPCOUNT:s @0) (POPCOUNT:s @1))
(if (INTEGRAL_TYPE_P (type)
- && wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
- (POPCOUNT (bit_ior @0 @1))))
+ && (wi::bit_and (widest_int::from (tree_nonzero_bits (@0), UNSIGNED),
+ widest_int::from (tree_nonzero_bits (@1), UNSIGNED))
+ == 0))
+ (with { tree utype = TREE_TYPE (@0);
+ if (TYPE_PRECISION (utype) < TYPE_PRECISION (TREE_TYPE (@1)))
+ utype = TREE_TYPE (@1); }
+ (POPCOUNT (bit_ior (convert:utype @0) (convert:utype @1))))))
/* popcount(X) == 0 is X == 0, and related (in)equalities. */
(for popcount (POPCOUNT)
@@ -8877,7 +8889,14 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* parity(X)^parity(Y) is parity(X^Y). */
(simplify
(bit_xor (PARITY:s @0) (PARITY:s @1))
- (PARITY (bit_xor @0 @1)))
+ (if (types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
+ (PARITY (bit_xor @0 @1))
+ (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
+ (with { tree utype = TREE_TYPE (@0);
+ if (TYPE_PRECISION (utype) < TYPE_PRECISION (TREE_TYPE (@1)))
+ utype = TREE_TYPE (@1); }
+ (PARITY (bit_xor (convert:utype @0) (convert:utype @1)))))))
#if GIMPLE
/* parity(zext(X)) == parity(X). */
diff --git a/gcc/objc/ChangeLog b/gcc/objc/ChangeLog
index fe3bd67..1670426 100644
--- a/gcc/objc/ChangeLog
+++ b/gcc/objc/ChangeLog
@@ -1,3 +1,13 @@
+2023-11-27 Alex Coplan <alex.coplan@arm.com>
+ Iain Sandoe <iain@sandoe.co.uk>
+
+ PR c++/60512
+ * objc-act.cc (struct objc_feature_info): New.
+ (objc_nonfragile_abi_p): New.
+ (objc_common_register_features): New.
+ * objc-act.h (objc_common_register_features): New.
+ * objc-lang.cc (c_family_register_lang_features): New.
+
2023-11-04 Jakub Jelinek <jakub@redhat.com>
* objc-act.h (objc_common_tree_size): Remove.
diff --git a/gcc/objcp/ChangeLog b/gcc/objcp/ChangeLog
index 019292d..3617b59 100644
--- a/gcc/objcp/ChangeLog
+++ b/gcc/objcp/ChangeLog
@@ -1,3 +1,9 @@
+2023-11-27 Alex Coplan <alex.coplan@arm.com>
+ Iain Sandoe <iain@sandoe.co.uk>
+
+ PR c++/60512
+ * objcp-lang.cc (c_family_register_lang_features): New.
+
2023-10-22 Patrick Palka <ppalka@redhat.com>
PR objc++/111920
diff --git a/gcc/passes.cc b/gcc/passes.cc
index 6f894a4..087aed5 100644
--- a/gcc/passes.cc
+++ b/gcc/passes.cc
@@ -2514,6 +2514,11 @@ should_skip_pass_p (opt_pass *pass)
if (strstr (pass->name, "build_cgraph_edges") != NULL)
return false;
+ /* We need to run ISEL as that lowers VEC_COND_EXPR but doesn't provide
+ a property. */
+ if (strstr (pass->name, "isel") != NULL)
+ return false;
+
/* Don't skip df init; later RTL passes need it. */
if (strstr (pass->name, "dfinit") != NULL
|| strstr (pass->name, "dfinish") != NULL)
diff --git a/gcc/passes.def b/gcc/passes.def
index 1e1950b..d3fccdf 100644
--- a/gcc/passes.def
+++ b/gcc/passes.def
@@ -52,6 +52,7 @@ along with GCC; see the file COPYING3. If not see
INSERT_PASSES_AFTER (all_small_ipa_passes)
NEXT_PASS (pass_ipa_free_lang_data);
NEXT_PASS (pass_ipa_function_and_variable_visibility);
+ NEXT_PASS (pass_ipa_strub_mode);
NEXT_PASS (pass_build_ssa_passes);
PUSH_INSERT_PASSES_WITHIN (pass_build_ssa_passes)
NEXT_PASS (pass_fixup_cfg);
@@ -115,6 +116,7 @@ along with GCC; see the file COPYING3. If not see
POP_INSERT_PASSES ()
NEXT_PASS (pass_ipa_remove_symbols);
+ NEXT_PASS (pass_ipa_strub);
NEXT_PASS (pass_ipa_oacc);
PUSH_INSERT_PASSES_WITHIN (pass_ipa_oacc)
NEXT_PASS (pass_ipa_pta);
@@ -533,6 +535,9 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_stack_regs_run);
POP_INSERT_PASSES ()
POP_INSERT_PASSES ()
+ NEXT_PASS (pass_late_thread_prologue_and_epilogue);
+ /* No target-independent code motion is allowed beyond this point,
+ excepting the legacy delayed-branch pass. */
NEXT_PASS (pass_late_compilation);
PUSH_INSERT_PASSES_WITHIN (pass_late_compilation)
NEXT_PASS (pass_zero_call_used_regs);
diff --git a/gcc/plugin.h b/gcc/plugin.h
index ee0a53e..f306adf 100644
--- a/gcc/plugin.h
+++ b/gcc/plugin.h
@@ -201,8 +201,7 @@ invoke_plugin_callbacks (int event ATTRIBUTE_UNUSED,
extern void register_attribute (const struct attribute_spec *attr);
/* The default argument for the third parameter is given in attribs.h. */
-extern struct scoped_attributes* register_scoped_attributes (const struct attribute_spec *,
- const char *,
+extern struct scoped_attributes* register_scoped_attributes (const struct scoped_attribute_spec &,
bool);
#endif /* PLUGIN_H */
diff --git a/gcc/range-op-mixed.h b/gcc/range-op-mixed.h
index 45e11df..7e3ee17 100644
--- a/gcc/range-op-mixed.h
+++ b/gcc/range-op-mixed.h
@@ -138,6 +138,9 @@ public:
const frange &) const final override;
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const final override;
+ // Check op1 and op2 for compatibility.
+ bool operand_check_p (tree, tree t1, tree t2) const final override
+ { return range_compatible_p (t1, t2); }
};
class operator_not_equal : public range_operator
@@ -174,6 +177,9 @@ public:
const frange &) const final override;
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const final override;
+ // Check op1 and op2 for compatibility.
+ bool operand_check_p (tree, tree t1, tree t2) const final override
+ { return range_compatible_p (t1, t2); }
};
class operator_lt : public range_operator
@@ -207,6 +213,9 @@ public:
const frange &) const final override;
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const final override;
+ // Check op1 and op2 for compatibility.
+ bool operand_check_p (tree, tree t1, tree t2) const final override
+ { return range_compatible_p (t1, t2); }
};
class operator_le : public range_operator
@@ -243,6 +252,9 @@ public:
const frange &) const final override;
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const final override;
+ // Check op1 and op2 for compatibility.
+ bool operand_check_p (tree, tree t1, tree t2) const final override
+ { return range_compatible_p (t1, t2); }
};
class operator_gt : public range_operator
@@ -278,6 +290,9 @@ public:
const frange &) const final override;
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const final override;
+ // Check op1 and op2 for compatibility.
+ bool operand_check_p (tree, tree t1, tree t2) const final override
+ { return range_compatible_p (t1, t2); }
};
class operator_ge : public range_operator
@@ -314,6 +329,9 @@ public:
const frange &) const final override;
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const final override;
+ // Check op1 and op2 for compatibility.
+ bool operand_check_p (tree, tree t1, tree t2) const final override
+ { return range_compatible_p (t1, t2); }
};
class operator_identity : public range_operator
@@ -409,7 +427,9 @@ public:
virtual bool overflow_free_p (const irange &lh, const irange &rh,
relation_trio = TRIO_VARYING) const;
-
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
private:
void wi_fold (irange &r, tree type, const wide_int &lh_lb,
const wide_int &lh_ub, const wide_int &rh_lb,
@@ -436,6 +456,9 @@ class operator_abs : public range_operator
relation_trio rel = TRIO_VARYING) const final override;
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const final override;
+ // Check compatibility of LHS and op1.
+ bool operand_check_p (tree t1, tree t2, tree) const final override
+ { return range_compatible_p (t1, t2); }
private:
void wi_fold (irange &r, tree type, const wide_int &lh_lb,
const wide_int &lh_ub, const wide_int &rh_lb,
@@ -477,7 +500,9 @@ public:
virtual bool overflow_free_p (const irange &lh, const irange &rh,
relation_trio = TRIO_VARYING) const;
-
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
private:
void wi_fold (irange &r, tree type, const wide_int &lh_lb,
const wide_int &lh_ub, const wide_int &rh_lb,
@@ -506,6 +531,9 @@ class operator_negate : public range_operator
bool op1_range (frange &r, tree type,
const frange &lhs, const frange &op2,
relation_trio rel = TRIO_VARYING) const final override;
+ // Check compatibility of LHS and op1.
+ bool operand_check_p (tree t1, tree t2, tree) const final override
+ { return range_compatible_p (t1, t2); }
};
@@ -557,7 +585,9 @@ public:
relation_kind kind) const final override;
virtual bool overflow_free_p (const irange &lh, const irange &rh,
relation_trio = TRIO_VARYING) const;
-
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
};
class operator_addr_expr : public range_operator
@@ -586,6 +616,9 @@ public:
relation_trio rel = TRIO_VARYING) const final override;
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const final override;
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
};
class operator_bitwise_xor : public range_operator
@@ -606,6 +639,9 @@ public:
relation_kind rel) const final override;
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const final override;
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
private:
void wi_fold (irange &r, tree type, const wide_int &lh_lb,
const wide_int &lh_ub, const wide_int &rh_lb,
@@ -629,6 +665,9 @@ public:
relation_kind) const override;
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const override;
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
protected:
void wi_fold (irange &r, tree type, const wide_int &lh_lb,
const wide_int &lh_ub, const wide_int &rh_lb,
@@ -651,6 +690,9 @@ public:
relation_trio rel = TRIO_VARYING) const override;
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const override;
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
protected:
void wi_fold (irange &r, tree type, const wide_int &lh_lb,
const wide_int &lh_ub, const wide_int &rh_lb,
@@ -662,6 +704,9 @@ class operator_min : public range_operator
public:
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const override;
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
protected:
void wi_fold (irange &r, tree type, const wide_int &lh_lb,
const wide_int &lh_ub, const wide_int &rh_lb,
@@ -673,6 +718,9 @@ class operator_max : public range_operator
public:
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const override;
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
protected:
void wi_fold (irange &r, tree type, const wide_int &lh_lb,
const wide_int &lh_ub, const wide_int &rh_lb,
diff --git a/gcc/range-op.cc b/gcc/range-op.cc
index 6137f2a..5dbc4bb 100644
--- a/gcc/range-op.cc
+++ b/gcc/range-op.cc
@@ -201,6 +201,10 @@ range_op_handler::fold_range (vrange &r, tree type,
relation_trio rel) const
{
gcc_checking_assert (m_operator);
+#if CHECKING_P
+ if (!lh.undefined_p () && !rh.undefined_p ())
+ gcc_assert (m_operator->operand_check_p (type, lh.type (), rh.type ()));
+#endif
switch (dispatch_kind (r, lh, rh))
{
case RO_III:
@@ -237,9 +241,12 @@ range_op_handler::op1_range (vrange &r, tree type,
relation_trio rel) const
{
gcc_checking_assert (m_operator);
-
if (lhs.undefined_p ())
return false;
+#if CHECKING_P
+ if (!op2.undefined_p ())
+ gcc_assert (m_operator->operand_check_p (lhs.type (), type, op2.type ()));
+#endif
switch (dispatch_kind (r, lhs, op2))
{
case RO_III:
@@ -270,7 +277,10 @@ range_op_handler::op2_range (vrange &r, tree type,
gcc_checking_assert (m_operator);
if (lhs.undefined_p ())
return false;
-
+#if CHECKING_P
+ if (!op1.undefined_p ())
+ gcc_assert (m_operator->operand_check_p (lhs.type (), op1.type (), type));
+#endif
switch (dispatch_kind (r, lhs, op1))
{
case RO_III:
@@ -394,6 +404,13 @@ range_op_handler::overflow_free_p (const vrange &lh,
}
}
+bool
+range_op_handler::operand_check_p (tree t1, tree t2, tree t3) const
+{
+ gcc_checking_assert (m_operator);
+ return m_operator->operand_check_p (t1, t2, t3);
+}
+
// Update the known bitmasks in R when applying the operation CODE to
// LH and RH.
@@ -737,6 +754,14 @@ range_operator::update_bitmask (irange &, const irange &,
{
}
+// Check that operand types are OK. Default to always OK.
+
+bool
+range_operator::operand_check_p (tree, tree, tree) const
+{
+ return true;
+}
+
// Create and return a range from a pair of wide-ints that are known
// to have overflowed (or underflowed).
@@ -2466,6 +2491,9 @@ public:
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const final override
{ update_known_bitmask (r, LSHIFT_EXPR, lh, rh); }
+ // Check compatibility of LHS and op1.
+ bool operand_check_p (tree t1, tree t2, tree) const final override
+ { return range_compatible_p (t1, t2); }
} op_lshift;
class operator_rshift : public cross_product_operator
@@ -2495,6 +2523,9 @@ public:
void update_bitmask (irange &r, const irange &lh,
const irange &rh) const final override
{ update_known_bitmask (r, RSHIFT_EXPR, lh, rh); }
+ // Check compatibility of LHS and op1.
+ bool operand_check_p (tree t1, tree t2, tree) const final override
+ { return range_compatible_p (t1, t2); }
} op_rshift;
@@ -3070,9 +3101,11 @@ public:
const irange &lhs,
const irange &op1,
relation_trio rel = TRIO_VARYING) const;
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
} op_logical_and;
-
bool
operator_logical_and::fold_range (irange &r, tree type,
const irange &lh,
@@ -3082,6 +3115,11 @@ operator_logical_and::fold_range (irange &r, tree type,
if (empty_range_varying (r, type, lh, rh))
return true;
+ // Precision of LHS and both operands must match.
+ if (TYPE_PRECISION (lh.type ()) != TYPE_PRECISION (type)
+ || TYPE_PRECISION (type) != TYPE_PRECISION (rh.type ()))
+ return false;
+
// 0 && anything is 0.
if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
|| (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
@@ -3567,6 +3605,9 @@ public:
const irange &lhs,
const irange &op1,
relation_trio rel = TRIO_VARYING) const;
+ // Check compatibility of all operands.
+ bool operand_check_p (tree t1, tree t2, tree t3) const final override
+ { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
} op_logical_or;
bool
@@ -3993,6 +4034,9 @@ public:
const irange &lhs,
const irange &op2,
relation_trio rel = TRIO_VARYING) const;
+ // Check compatibility of LHS and op1.
+ bool operand_check_p (tree t1, tree t2, tree) const final override
+ { return range_compatible_p (t1, t2); }
} op_logical_not;
// Folding a logical NOT, oddly enough, involves doing nothing on the
@@ -4036,7 +4080,6 @@ operator_logical_not::op1_range (irange &r,
return fold_range (r, type, lhs, op2);
}
-
bool
operator_bitwise_not::fold_range (irange &r, tree type,
const irange &lh,
diff --git a/gcc/range-op.h b/gcc/range-op.h
index 282ce38..ab8f8a3 100644
--- a/gcc/range-op.h
+++ b/gcc/range-op.h
@@ -157,6 +157,10 @@ public:
virtual bool overflow_free_p (const irange &lh, const irange &rh,
relation_trio = TRIO_VARYING) const;
+
+ // Compatability check for operands.
+ virtual bool operand_check_p (tree, tree, tree) const;
+
protected:
// Perform an integral operation between 2 sub-ranges and return it.
virtual void wi_fold (irange &r, tree type,
@@ -226,6 +230,7 @@ public:
const vrange &op2) const;
bool overflow_free_p (const vrange &lh, const vrange &rh,
relation_trio = TRIO_VARYING) const;
+ bool operand_check_p (tree, tree, tree) const;
protected:
unsigned dispatch_kind (const vrange &lhs, const vrange &op1,
const vrange& op2) const;
diff --git a/gcc/recog.cc b/gcc/recog.cc
index eaab79c..ed084fa 100644
--- a/gcc/recog.cc
+++ b/gcc/recog.cc
@@ -1990,13 +1990,17 @@ asm_noperands (const_rtx body)
{
/* Multiple output operands, or 1 output plus some clobbers:
body is
- [(set OUTPUT (asm_operands ...))... (clobber (reg ...))...]. */
- /* Count backwards through CLOBBERs to determine number of SETs. */
+ [(set OUTPUT (asm_operands ...))...
+ (use (reg ...))...
+ (clobber (reg ...))...]. */
+ /* Count backwards through USEs and CLOBBERs to determine
+ number of SETs. */
for (i = XVECLEN (body, 0); i > 0; i--)
{
if (GET_CODE (XVECEXP (body, 0, i - 1)) == SET)
break;
- if (GET_CODE (XVECEXP (body, 0, i - 1)) != CLOBBER)
+ if (GET_CODE (XVECEXP (body, 0, i - 1)) != USE
+ && GET_CODE (XVECEXP (body, 0, i - 1)) != CLOBBER)
return -1;
}
@@ -2023,10 +2027,13 @@ asm_noperands (const_rtx body)
else
{
/* 0 outputs, but some clobbers:
- body is [(asm_operands ...) (clobber (reg ...))...]. */
+ body is [(asm_operands ...)
+ (use (reg ...))...
+ (clobber (reg ...))...]. */
/* Make sure all the other parallel things really are clobbers. */
for (i = XVECLEN (body, 0) - 1; i > 0; i--)
- if (GET_CODE (XVECEXP (body, 0, i)) != CLOBBER)
+ if (GET_CODE (XVECEXP (body, 0, i)) != USE
+ && GET_CODE (XVECEXP (body, 0, i)) != CLOBBER)
return -1;
}
}
@@ -2093,7 +2100,8 @@ decode_asm_operands (rtx body, rtx *operands, rtx **operand_locs,
the SETs. Their constraints are in the ASM_OPERANDS itself. */
for (i = 0; i < nparallel; i++)
{
- if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
+ if (GET_CODE (XVECEXP (body, 0, i)) == USE
+ || GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
break; /* Past last SET */
gcc_assert (GET_CODE (XVECEXP (body, 0, i)) == SET);
if (operands)
diff --git a/gcc/target-def.h b/gcc/target-def.h
index 847698a..79fe8e2 100644
--- a/gcc/target-def.h
+++ b/gcc/target-def.h
@@ -118,6 +118,20 @@
#define TARGET_FUNCTION_INCOMING_ARG TARGET_FUNCTION_ARG
#endif
+/* Declare a target attribute table called NAME that only has GNU attributes.
+ There should be no null trailing element. E.g.:
+
+ TARGET_GNU_ATTRIBUTES (aarch64_attribute_table,
+ {
+ { "aarch64_vector_pcs", ... },
+ ...
+ }); */
+
+#define TARGET_GNU_ATTRIBUTES(NAME, ...) \
+ static const attribute_spec NAME##_2[] = __VA_ARGS__; \
+ static const scoped_attribute_specs NAME##_1 = { "gnu", { NAME##_2 } }; \
+ static const scoped_attribute_specs *const NAME[] = { &NAME##_1 }
+
#include "target-hooks-def.h"
#include "hooks.h"
diff --git a/gcc/target.def b/gcc/target.def
index eae7959..52b83e0 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -2218,15 +2218,36 @@ merging.",
merge_type_attributes)
/* Table of machine attributes and functions to handle them.
- Ignored if NULL. */
+ Ignored if empty. */
DEFHOOKPOD
(attribute_table,
- "If defined, this target hook points to an array of @samp{struct\n\
-attribute_spec} (defined in @file{tree-core.h}) specifying the machine\n\
-specific attributes for this target and some of the restrictions on the\n\
-entities to which these attributes are applied and the arguments they\n\
-take.",
- const struct attribute_spec *, NULL)
+ "If defined, this target hook provides an array of\n\
+@samp{scoped_attribute_spec}s (defined in @file{attribs.h}) that specify the\n\
+machine-specific attributes for this target. The information includes some\n\
+of the restrictions on the entities to which these attributes are applied\n\
+and the arguments that the attributes take.\n\
+\n\
+In C and C++, these attributes are associated with two syntaxes:\n\
+the traditional GNU @code{__attribute__} syntax and the standard\n\
+@samp{[[]]} syntax. Attributes that support the GNU syntax must be\n\
+placed in the @code{gnu} namespace. Such attributes can then also be\n\
+written @samp{[[gnu::@dots{}]]}. Attributes that use only the standard\n\
+syntax should be placed in whichever namespace the attribute specification\n\
+requires. For example, a target might choose to support vendor-specific\n\
+@samp{[[]]} attributes that the vendor places in their own namespace.\n\
+\n\
+Targets that only define attributes in the @code{gnu} namespace\n\
+can uase the following shorthand to define the table:\n\
+\n\
+@smallexample\n\
+TARGET_GNU_ATTRIBUTES (@var{cpu_attribute_table}, @{\n\
+ @{ \"@var{attribute1}\", @dots{} @},\n\
+ @{ \"@var{attribute2}\", @dots{} @},\n\
+ @dots{},\n\
+ @{ \"@var{attributen}\", @dots{} @},\n\
+@});\n\
+@end smallexample",
+ array_slice<const struct scoped_attribute_specs *const>,)
/* Return true iff attribute NAME expects a plain identifier as its first
argument. */
@@ -4132,6 +4153,36 @@ returns @code{VOIDmode}.",
machine_mode, (machine_mode m1, machine_mode m2),
default_cc_modes_compatible)
+DEFHOOK
+(use_late_prologue_epilogue,
+ "Return true if the current function's prologue and epilogue should\n\
+be emitted late in the pass pipeline, instead of at the usual point.\n\
+\n\
+Normally, the prologue and epilogue sequences are introduced soon after\n\
+register allocation is complete. The advantage of this approach is that\n\
+it allows the prologue and epilogue instructions to be optimized and\n\
+scheduled with other code in the function. However, some targets\n\
+require the prologue and epilogue to be the first and last sequences\n\
+executed by the function, with no variation allowed. This hook should\n\
+return true on such targets.\n\
+\n\
+The default implementation returns false, which is correct for most\n\
+targets. The hook should only return true if there is a specific\n\
+target limitation that cannot be described in RTL. For example,\n\
+the hook might return true if the prologue and epilogue need to switch\n\
+between instruction sets.",
+ bool, (),
+ hook_bool_void_false)
+
+DEFHOOK
+(emit_epilogue_for_sibcall,
+ "If defined, this hook emits an epilogue sequence for sibling (tail)\n\
+call instruction @var{call}. Another way of providing epilogues\n\
+for sibling calls is to define the @code{sibcall_epilogue} instruction\n\
+pattern; the main advantage of this hook over the pattern is that it\n\
+has access to the call instruction.",
+ void, (rtx_call_insn *call), NULL)
+
/* Do machine-dependent code transformations. Called just before
delayed-branch scheduling. */
DEFHOOK
@@ -4291,7 +4342,8 @@ DEFHOOK
(md_asm_adjust,
"This target hook may add @dfn{clobbers} to @var{clobbers} and\n\
@var{clobbered_regs} for any hard regs the port wishes to automatically\n\
-clobber for an asm. The @var{outputs} and @var{inputs} may be inspected\n\
+clobber for an asm. It can also add hard registers that are used by the\n\
+asm to @var{uses}. The @var{outputs} and @var{inputs} may be inspected\n\
to avoid clobbering a register that is already used by the asm. @var{loc}\n\
is the source location of the asm.\n\
\n\
@@ -4302,7 +4354,7 @@ changes to @var{inputs} must be accompanied by the corresponding changes\n\
to @var{input_modes}.",
rtx_insn *,
(vec<rtx>& outputs, vec<rtx>& inputs, vec<machine_mode>& input_modes,
- vec<const char *>& constraints, vec<rtx>& clobbers,
+ vec<const char *>& constraints, vec<rtx>& usess, vec<rtx>& clobbers,
HARD_REG_SET& clobbered_regs, location_t loc),
NULL)
@@ -4769,31 +4821,66 @@ not generate any instructions in this case.",
default_setup_incoming_varargs)
DEFHOOK
+(start_call_args,
+ "This target hook is invoked while generating RTL for a function call,\n\
+after the argument values have been computed, and after stack arguments\n\
+have been initialized, but before register arguments have been moved into\n\
+their ABI-defined hard register locations. It precedes calls to the related\n\
+hooks @code{TARGET_CALL_ARGS} and @code{TARGET_END_CALL_ARGS}.\n\
+The significance of this position in the call expansion is that:\n\
+\n\
+@itemize @bullet\n\
+@item\n\
+No argument registers are live.\n\
+@item\n\
+Although a call sequence can in general involve subcalls (such as using\n\
+@code{memcpy} to copy large arguments), no such subcall will occur between\n\
+the call to this hook and the generation of the main call instruction.\n\
+@end itemize\n\
+\n\
+The single argument @var{complete_args} is the state of the target\n\
+function's cumulative argument information after the final call to\n\
+@code{TARGET_FUNCTION_ARG}.\n\
+\n\
+The hook can be used for things like switching processor mode, in cases\n\
+where different calls need different processor modes. Most ports do not\n\
+need to implement anything for this hook.",
+ void, (cumulative_args_t complete_args),
+ hook_void_CUMULATIVE_ARGS)
+
+DEFHOOK
(call_args,
"While generating RTL for a function call, this target hook is invoked once\n\
for each argument passed to the function, either a register returned by\n\
@code{TARGET_FUNCTION_ARG} or a memory location. It is called just\n\
-before the point where argument registers are stored. The type of the\n\
-function to be called is also passed as the second argument; it is\n\
-@code{NULL_TREE} for libcalls. The @code{TARGET_END_CALL_ARGS} hook is\n\
-invoked just after the code to copy the return reg has been emitted.\n\
-This functionality can be used to perform special setup of call argument\n\
-registers if a target needs it.\n\
+before the point where argument registers are stored.\n\
+\n\
+@var{complete_args} is the state of the target function's cumulative\n\
+argument information after the final call to @code{TARGET_FUNCTION_ARG}.\n\
+@var{loc} is the location of the argument. @var{type} is the type of\n\
+the function being called, or @code{NULL_TREE} for libcalls.\n\
+\n\
For functions without arguments, the hook is called once with @code{pc_rtx}\n\
passed instead of an argument register.\n\
-Most ports do not need to implement anything for this hook.",
- void, (rtx, tree),
- hook_void_rtx_tree)
+\n\
+This functionality can be used to perform special setup of call argument\n\
+registers, if a target needs it. Most ports do not need to implement\n\
+anything for this hook.",
+ void, (cumulative_args_t complete_args, rtx loc, tree type),
+ hook_void_CUMULATIVE_ARGS_rtx_tree)
DEFHOOK
(end_call_args,
"This target hook is invoked while generating RTL for a function call,\n\
just after the point where the return reg is copied into a pseudo. It\n\
signals that all the call argument and return registers for the just\n\
-emitted call are now no longer in use.\n\
+emitted call are now no longer in use. @var{complete_args} is the\n\
+state of the target function's cumulative argument information after\n\
+the final call to @code{TARGET_FUNCTION_ARG}.\n\
+\n\
Most ports do not need to implement anything for this hook.",
- void, (void),
- hook_void_void)
+ void, (cumulative_args_t complete_args),
+ hook_void_CUMULATIVE_ARGS)
DEFHOOK
(push_argument,
diff --git a/gcc/targhooks.cc b/gcc/targhooks.cc
index a2dc733..afe91fe1 100644
--- a/gcc/targhooks.cc
+++ b/gcc/targhooks.cc
@@ -781,11 +781,21 @@ hook_int_CUMULATIVE_ARGS_arg_info_0 (cumulative_args_t,
}
void
+hook_void_CUMULATIVE_ARGS (cumulative_args_t)
+{
+}
+
+void
hook_void_CUMULATIVE_ARGS_tree (cumulative_args_t ca ATTRIBUTE_UNUSED,
tree ATTRIBUTE_UNUSED)
{
}
+void
+hook_void_CUMULATIVE_ARGS_rtx_tree (cumulative_args_t, rtx, tree)
+{
+}
+
/* Default implementation of TARGET_PUSH_ARGUMENT. */
bool
diff --git a/gcc/targhooks.h b/gcc/targhooks.h
index 26695ab..5a39e8e 100644
--- a/gcc/targhooks.h
+++ b/gcc/targhooks.h
@@ -142,8 +142,9 @@ extern bool hook_bool_CUMULATIVE_ARGS_arg_info_true
(cumulative_args_t, const function_arg_info &);
extern int hook_int_CUMULATIVE_ARGS_arg_info_0
(cumulative_args_t, const function_arg_info &);
-extern void hook_void_CUMULATIVE_ARGS_tree
- (cumulative_args_t, tree);
+extern void hook_void_CUMULATIVE_ARGS (cumulative_args_t);
+extern void hook_void_CUMULATIVE_ARGS_tree (cumulative_args_t, tree);
+extern void hook_void_CUMULATIVE_ARGS_rtx_tree (cumulative_args_t, rtx, tree);
extern const char *hook_invalid_arg_for_unprototyped_fn
(const_tree, const_tree, const_tree);
extern void default_function_arg_advance
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index e331429..b516297 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,3455 @@
+2023-12-06 Alexandre Oliva <oliva@adacore.com>
+
+ * c-c++-common/strub-O0.c: New.
+ * c-c++-common/strub-O1.c: New.
+ * c-c++-common/strub-O2.c: New.
+ * c-c++-common/strub-O2fni.c: New.
+ * c-c++-common/strub-O3.c: New.
+ * c-c++-common/strub-O3fni.c: New.
+ * c-c++-common/strub-Og.c: New.
+ * c-c++-common/strub-Os.c: New.
+ * c-c++-common/strub-all1.c: New.
+ * c-c++-common/strub-all2.c: New.
+ * c-c++-common/strub-apply1.c: New.
+ * c-c++-common/strub-apply2.c: New.
+ * c-c++-common/strub-apply3.c: New.
+ * c-c++-common/strub-apply4.c: New.
+ * c-c++-common/strub-at-calls1.c: New.
+ * c-c++-common/strub-at-calls2.c: New.
+ * c-c++-common/strub-defer-O1.c: New.
+ * c-c++-common/strub-defer-O2.c: New.
+ * c-c++-common/strub-defer-O3.c: New.
+ * c-c++-common/strub-defer-Os.c: New.
+ * c-c++-common/strub-internal1.c: New.
+ * c-c++-common/strub-internal2.c: New.
+ * c-c++-common/strub-parms1.c: New.
+ * c-c++-common/strub-parms2.c: New.
+ * c-c++-common/strub-parms3.c: New.
+ * c-c++-common/strub-relaxed1.c: New.
+ * c-c++-common/strub-relaxed2.c: New.
+ * c-c++-common/strub-short-O0-exc.c: New.
+ * c-c++-common/strub-short-O0.c: New.
+ * c-c++-common/strub-short-O1.c: New.
+ * c-c++-common/strub-short-O2.c: New.
+ * c-c++-common/strub-short-O3.c: New.
+ * c-c++-common/strub-short-Os.c: New.
+ * c-c++-common/strub-strict1.c: New.
+ * c-c++-common/strub-strict2.c: New.
+ * c-c++-common/strub-tail-O1.c: New.
+ * c-c++-common/strub-tail-O2.c: New.
+ * c-c++-common/torture/strub-callable1.c: New.
+ * c-c++-common/torture/strub-callable2.c: New.
+ * c-c++-common/torture/strub-const1.c: New.
+ * c-c++-common/torture/strub-const2.c: New.
+ * c-c++-common/torture/strub-const3.c: New.
+ * c-c++-common/torture/strub-const4.c: New.
+ * c-c++-common/torture/strub-data1.c: New.
+ * c-c++-common/torture/strub-data2.c: New.
+ * c-c++-common/torture/strub-data3.c: New.
+ * c-c++-common/torture/strub-data4.c: New.
+ * c-c++-common/torture/strub-data5.c: New.
+ * c-c++-common/torture/strub-indcall1.c: New.
+ * c-c++-common/torture/strub-indcall2.c: New.
+ * c-c++-common/torture/strub-indcall3.c: New.
+ * c-c++-common/torture/strub-inlinable1.c: New.
+ * c-c++-common/torture/strub-inlinable2.c: New.
+ * c-c++-common/torture/strub-ptrfn1.c: New.
+ * c-c++-common/torture/strub-ptrfn2.c: New.
+ * c-c++-common/torture/strub-ptrfn3.c: New.
+ * c-c++-common/torture/strub-ptrfn4.c: New.
+ * c-c++-common/torture/strub-pure1.c: New.
+ * c-c++-common/torture/strub-pure2.c: New.
+ * c-c++-common/torture/strub-pure3.c: New.
+ * c-c++-common/torture/strub-pure4.c: New.
+ * c-c++-common/torture/strub-run1.c: New.
+ * c-c++-common/torture/strub-run2.c: New.
+ * c-c++-common/torture/strub-run3.c: New.
+ * c-c++-common/torture/strub-run4.c: New.
+ * c-c++-common/torture/strub-run4c.c: New.
+ * c-c++-common/torture/strub-run4d.c: New.
+ * c-c++-common/torture/strub-run4i.c: New.
+ * g++.dg/strub-run1.C: New.
+ * g++.dg/torture/strub-init1.C: New.
+ * g++.dg/torture/strub-init2.C: New.
+ * g++.dg/torture/strub-init3.C: New.
+ * gnat.dg/strub_attr.adb, gnat.dg/strub_attr.ads: New.
+ * gnat.dg/strub_ind.adb, gnat.dg/strub_ind.ads: New.
+ * c-c++-common/strub-var1.c: New file.
+ * gnat.dg/strub_access.adb: New file.
+ * gnat.dg/strub_access1.adb: New file.
+ * gnat.dg/strub_disp.adb: New file.
+ * gnat.dg/strub_disp1.adb: New file.
+ * gnat.dg/strub_ind1.adb: New file.
+ * gnat.dg/strub_ind1.ads: New file.
+ * gnat.dg/strub_ind2.adb: New file.
+ * gnat.dg/strub_ind2.ads: New file.
+ * gnat.dg/strub_intf.adb: New file.
+ * gnat.dg/strub_intf1.adb: New file.
+ * gnat.dg/strub_intf2.adb: New file.
+ * gnat.dg/strub_renm.adb: New file.
+ * gnat.dg/strub_renm1.adb: New file.
+ * gnat.dg/strub_renm2.adb: New file.
+ * gnat.dg/strub_var.adb: New file.
+ * gnat.dg/strub_var1.adb: New file.
+
+2023-12-05 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112851
+ PR target/112852
+ * gcc.target/riscv/rvv/autovec/vls/consecutive-1.c: Add LMUL = 8 option.
+ * gcc.target/riscv/rvv/autovec/vls/consecutive-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mod-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-10.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-11.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-12.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-13.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-14.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-15.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-16.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-17.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-3.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-5.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-7.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-8.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/mov-9.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/spill-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/spill-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/spill-3.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/spill-5.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/vls/spill-6.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/zve32f-1.c: Adapt test.
+ * gcc.target/riscv/rvv/autovec/pr112851.c: New test.
+ * gcc.target/riscv/rvv/autovec/pr112852.c: New test.
+
+2023-12-05 David Faust <david.faust@oracle.com>
+
+ PR debug/112849
+ * gcc.dg/debug/btf/btf-datasec-3.c: New test.
+
+2023-12-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/112795
+ * g++.dg/ext/unroll-2.C: Use { target c++11 } instead of dg-skip-if for
+ -std=gnu++98.
+ * g++.dg/ext/unroll-3.C: Likewise.
+ * g++.dg/ext/unroll-7.C: New test.
+ * g++.dg/ext/unroll-8.C: New test.
+
+2023-12-05 Harald Anlauf <anlauf@gmx.de>
+ Tobias Burnus <tobias@codesourcery.com>
+
+ PR fortran/100988
+ * gfortran.dg/coarray_poly_6.f90: Adjust pattern.
+ * gfortran.dg/coarray_poly_7.f90: Likewise.
+ * gfortran.dg/coarray_poly_8.f90: Likewise.
+ * gfortran.dg/missing_optional_dummy_6a.f90: Likewise.
+ * gfortran.dg/pr100988.f90: New test.
+
+2023-12-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/110734
+ * g++.dg/DRs/dr2262.C: New test.
+ * g++.dg/cpp0x/gen-attrs-76.C (foo, bar): Don't expect errors
+ on attributes on asm definitions.
+ * g++.dg/gomp/attrs-11.C: Remove 2 expected errors.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/112830
+ * gcc.target/avr/pr112830.c: New testcase.
+ * gcc.target/i386/pr112830.c: Likewise.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/109689
+ PR tree-optimization/112856
+ * gcc.dg/torture/pr109689.c: New testcase.
+ * gcc.dg/torture/pr112856.c: Likewise.
+
+2023-12-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112845
+ * gcc.dg/pr112845.c: New file.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/acle/asm/test_sve_acle.h: Provide a way
+ for test functions to share ZT0.
+ (ATTR): Update accordingly.
+ (TEST_LOAD_COUNT, TEST_STORE_COUNT, TEST_PN, TEST_COUNT_PN)
+ (TEST_EXTRACT_PN, TEST_SELECT_P, TEST_COMPARE_S_X2, TEST_COMPARE_S_C)
+ (TEST_CREATE_B, TEST_GET_B, TEST_SET_B, TEST_XN, TEST_XN_SINGLE)
+ (TEST_XN_SINGLE_Z15, TEST_XN_SINGLE_AWKWARD, TEST_X2_NARROW)
+ (TEST_X4_NARROW): New macros.
+ * gcc.target/aarch64/sve/acle/asm/create2_1.c: Add _b tests.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_m_1.c: Remove
+ test for svmopa that becomes valid with SME2.
+ * gcc.target/aarch64/sve/acle/general-c/create_1.c: Adjust for
+ existence of svboolx2_t version of svcreate2.
+ * gcc.target/aarch64/sve/acle/general-c/store_1.c: Adjust error
+ messages to account for svcount_t predication.
+ * gcc.target/aarch64/sve/acle/general-c/store_2.c: Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/ternary_qq_lane_1.c: Adjust
+ error messages to account for new SME2 variants.
+ * gcc.target/aarch64/sve/acle/general-c/ternary_qq_opt_n_2.c: Likewise.
+ * g++.target/aarch64/sme2/aarch64-sme2-acle-asm.exp: New file.
+ * gcc.target/aarch64/sme/acle-asm/clamp_s16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/clamp_s32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/clamp_s64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/clamp_s8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/clamp_u16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/clamp_u32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/clamp_u64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/clamp_u8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_bf16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_f16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_f32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_f64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_s16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_s32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_s64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_s8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_u16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_u32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_u64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/revd_u8.c: New file.
+ * gcc.target/aarch64/sme/clamp_1.c: New file.
+ * gcc.target/aarch64/sme/clamp_2.c: New file.
+ * gcc.target/aarch64/sme/clamp_3.c: New file.
+ * gcc.target/aarch64/sme/clamp_4.c: New file.
+ * gcc.target/aarch64/sme2/aarch64-sme2-acle-asm.exp: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_write_za32_s32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_write_za32_s32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_write_za32_u32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_write_za32_u32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_write_za64_s64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_write_za64_s64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_write_za64_u64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_write_za64_u64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za32_f32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za32_f32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za32_s32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za32_s32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za32_u32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za32_u32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za64_f64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za64_f64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za64_s64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za64_s64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za64_u64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/add_za64_u64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/bfmlslb_f32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/bfmlslb_lane_f32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/bfmlslt_f32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/bfmlslt_lane_f32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/bmopa_za32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/bmops_za32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_f16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_f32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_f64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/clamp_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cntp_c16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cntp_c32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cntp_c64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cntp_c8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvt_bf16_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvt_f16_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvt_f32_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvt_f32_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvt_f32_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvt_f32_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvt_s32_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvt_s32_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvt_u32_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvt_u32_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvtn_bf16_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/cvtn_f16_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_f32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_f32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_s32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_u32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_bf16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_bf16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_f16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_f16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s8_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u8_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_s16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_s16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_u16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_u16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_s32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_u32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_bf16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_bf16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_f16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_f16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_s16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_s16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_s8_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_s8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_u16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_u16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_u8_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za32_u8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za64_s16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za64_s16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za64_u16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/dot_za64_u16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_bf16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_bf16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ld1_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_bf16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_bf16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldnt1_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ldr_zt.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_bf16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_bf16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_bf16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_f16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_f32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_s16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_s32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_s8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_u16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_u32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_u8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti2_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_bf16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_bf16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_bf16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_f16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_f32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_s16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_s32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_s8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_u16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_u32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_u8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/luti4_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/max_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/maxnm_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/maxnm_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/maxnm_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/maxnm_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/maxnm_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/maxnm_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/min_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/minnm_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/minnm_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/minnm_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/minnm_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/minnm_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/minnm_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_f64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_f64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_f32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_f32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za64_f64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za64_f64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_f64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_f64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_f32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_f32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za64_f64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za64_f64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x1.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mopa_za32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/mops_za32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/pext_c16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/pext_c16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/pext_c32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/pext_c32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/pext_c64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/pext_c64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/pext_c8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/pext_c8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/pfalse_c.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/psel_b16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/psel_b32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/psel_b64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/psel_b8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/psel_c16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/psel_c32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/psel_c64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/psel_c8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ptrue_c16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ptrue_c32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ptrue_c64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/ptrue_c8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvt_s16_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvt_s16_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvt_s8_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvt_u16_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvt_u16_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvt_u16_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvt_u16_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvt_u8_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvt_u8_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvtn_s16_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvtn_s16_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvtn_s8_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvtn_u8_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qcvtn_u8_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qdmulh_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qdmulh_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qdmulh_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qdmulh_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qdmulh_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qdmulh_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qdmulh_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qdmulh_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshr_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshr_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshr_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshr_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshr_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshr_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshrn_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshrn_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshrn_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshrn_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshrn_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshrn_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshru_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshru_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshru_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshrun_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshrun_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/qrshrun_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_hor_za16_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_hor_za16_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_hor_za32_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_hor_za32_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_hor_za64_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_hor_za64_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_hor_za8_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_hor_za8_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_ver_za16_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_ver_za16_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_ver_za32_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_ver_za32_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_ver_za64_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_ver_za64_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_ver_za8_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_ver_za8_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_za16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_za16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_za32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_za32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_za64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_za64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_za8_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/read_za8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rinta_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rinta_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rintm_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rintm_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rintn_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rintn_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rintp_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rintp_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/rshl_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_bf16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_bf16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sel_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_bf16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_bf16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/st1_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_bf16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_bf16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/stnt1_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/str_zt.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_write_za32_s32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_write_za32_s32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_write_za32_u32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_write_za32_u32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_write_za64_s64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_write_za64_s64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_write_za64_u64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_write_za64_u64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za32_f32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za32_f32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za32_s32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za32_s32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za32_u32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za32_u32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za64_f64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za64_f64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za64_s64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za64_s64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za64_u64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sub_za64_u64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sudot_lane_za32_s8_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sudot_lane_za32_s8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sudot_za32_s8_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/sudot_za32_s8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/suvdot_lane_za32_s8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/test_sme2_acle.h: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/unpk_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/usdot_lane_za32_u8_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/usdot_lane_za32_u8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/usdot_za32_u8_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/usdot_za32_u8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/usvdot_lane_za32_u8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_bf16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_bf16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzp_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_bf16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_bf16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/uzpq_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_bf16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_f16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_s16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_s8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_u16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_u8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/vdot_lane_za64_s16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/vdot_lane_za64_u16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilege_b16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilege_b32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilege_b64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilege_b8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilege_c16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilege_c32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilege_c64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilege_c8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilegt_b16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilegt_b32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilegt_b64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilegt_b8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilegt_c16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilegt_c32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilegt_c64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilegt_c8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilele_b16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilele_b32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilele_b64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilele_b8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilele_c16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilele_c32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilele_c64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilele_c8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilelt_b16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilelt_b32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilelt_b64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilelt_b8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilelt_c16.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilelt_c32.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilelt_c64.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/whilelt_c8.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_hor_za16_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_hor_za16_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_hor_za32_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_hor_za32_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_hor_za64_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_hor_za64_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_hor_za8_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_hor_za8_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_ver_za16_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_ver_za16_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_ver_za32_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_ver_za32_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_ver_za64_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_ver_za64_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_ver_za8_vg2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_ver_za8_vg4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_za16_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_za16_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_za32_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_za32_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_za64_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_za64_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_za8_vg1x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/write_za8_vg1x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zero_zt.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_bf16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_bf16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zip_u8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_bf16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_bf16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_f16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_f16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_f32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_f32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_f64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_f64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_s16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_s16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_s32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_s32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_s64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_s64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_s8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_s8_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_u16_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_u16_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_u32_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_u32_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_u64_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_u64_x4.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_u8_x2.c: New file.
+ * gcc.target/aarch64/sme2/acle-asm/zipq_u8_x4.c: New file.
+ * gcc.target/aarch64/sve/acle/asm/get2_b.c: New file.
+ * gcc.target/aarch64/sve/acle/asm/set2_b.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_int_opt_single_n_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_int_opt_single_n_2.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_opt_single_n_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_opt_single_n_2.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_single_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_slice_int_opt_single_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_2.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_3.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_4.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_2.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_3.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_slice_uint_opt_single_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binaryxn_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/binaryxn_2.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/clamp_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/compare_scalar_count_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/dot_za_slice_int_lane_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/dot_za_slice_lane_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/dot_za_slice_lane_2.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/dot_za_slice_uint_lane_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowxn_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/storexn_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/ternary_qq_or_011_lane_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/unary_convertxn_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/unary_za_slice_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/unary_za_slice_2.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/unary_za_slice_3.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/unaryxn_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/write_za_1.c: New file.
+ * gcc.target/aarch64/sve/acle/general-c/write_za_slice_1.c: New file.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sme/zt0_state_1.c: New test.
+ * gcc.target/aarch64/sme/zt0_state_2.c: Likewise.
+ * gcc.target/aarch64/sme/zt0_state_3.c: Likewise.
+ * gcc.target/aarch64/sme/zt0_state_4.c: Likewise.
+ * gcc.target/aarch64/sme/zt0_state_5.c: Likewise.
+ * gcc.target/aarch64/sme/zt0_state_6.c: Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/pcs/struct_3_128.c (test_nonpst3): Adjust
+ stack offsets.
+ (ret_nonpst3): Remove XFAIL.
+ * gcc.target/aarch64/sve/acle/general-c/svboolx2_1.c: New test.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * g++.target/aarch64/sve/acle/general-c++/mangle_1.C: Add test
+ for svcount_t.
+ * g++.target/aarch64/sve/acle/general-c++/mangle_2.C: Likewise.
+ * g++.target/aarch64/sve/acle/general-c++/svcount_1.C: New test.
+ * gcc.target/aarch64/sve/acle/asm/test_sve_acle.h (TEST_DUAL_P)
+ (TEST_DUAL_P_REV): New macros.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_b.c: New test.
+ * gcc.target/aarch64/sve/acle/general-c/load_1.c: Test passing
+ an svcount_t.
+ * gcc.target/aarch64/sve/acle/general-c/svcount_1.c: New test.
+ * gcc.target/aarch64/sve/acle/general-c/unary_convert_1.c: Test
+ reinterprets involving svcount_t.
+ * gcc.target/aarch64/sve/acle/general/attributes_7.c: Test svcount_t.
+ * gcc.target/aarch64/sve/pcs/annotate_1.c: Likewise.
+ * gcc.target/aarch64/sve/pcs/annotate_2.c: Likewise.
+ * gcc.target/aarch64/sve/pcs/args_12.c: New test.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * lib/target-supports.exp (check_effective_target_aarch64_sme2): New
+ target test.
+ (check_effective_target_aarch64_asm_sme2_ok): Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sme/sibcall_1.c: New test.
+ * gcc.target/aarch64/sme/sibcall_2.c: Likewise.
+ * gcc.target/aarch64/sme/sibcall_3.c: Likewise.
+ * gcc.target/aarch64/sme/sibcall_4.c: Likewise.
+ * gcc.target/aarch64/sme/sibcall_5.c: Likewise.
+ * gcc.target/aarch64/sme/sibcall_6.c: Likewise.
+ * gcc.target/aarch64/sme/sibcall_7.c: Likewise.
+ * gcc.target/aarch64/sme/sibcall_8.c: Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sme/inlining_1.c: New test.
+ * gcc.target/aarch64/sme/inlining_2.c: Likewise.
+ * gcc.target/aarch64/sme/inlining_3.c: Likewise.
+ * gcc.target/aarch64/sme/inlining_4.c: Likewise.
+ * gcc.target/aarch64/sme/inlining_5.c: Likewise.
+ * gcc.target/aarch64/sme/inlining_6.c: Likewise.
+ * gcc.target/aarch64/sme/inlining_7.c: Likewise.
+ * gcc.target/aarch64/sme/inlining_8.c: Likewise.
+ * gcc.target/aarch64/sme/inlining_10.c: New file.
+ * gcc.target/aarch64/sme/inlining_11.c: New file.
+ * gcc.target/aarch64/sme/inlining_12.c: New file.
+ * gcc.target/aarch64/sme/inlining_13.c: New file.
+ * gcc.target/aarch64/sme/inlining_14.c: New file.
+ * gcc.target/aarch64/sme/inlining_15.c: New file.
+ * gcc.target/aarch64/sme/inlining_9.c: New file.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * g++.target/aarch64/sme/exceptions_2.C: New test.
+ * gcc.target/aarch64/sme/nonlocal_goto_1.c: Likewise.
+ * gcc.target/aarch64/sme/nonlocal_goto_2.c: Likewise.
+ * gcc.target/aarch64/sme/nonlocal_goto_3.c: Likewise.
+ * gcc.target/aarch64/sme/nonlocal_goto_4.c: Likewise.
+ * gcc.target/aarch64/sme/nonlocal_goto_5.c: Likewise.
+ * gcc.target/aarch64/sme/nonlocal_goto_6.c: Likewise.
+ * gcc.target/aarch64/sme/nonlocal_goto_7.c: Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sme/locally_streaming_1.c: New test.
+ * gcc.target/aarch64/sme/locally_streaming_2.c: Likewise.
+ * gcc.target/aarch64/sme/locally_streaming_3.c: Likewise.
+ * gcc.target/aarch64/sme/locally_streaming_4.c: Likewise.
+ * gcc.target/aarch64/sme/keyword_macros_1.c: Add
+ __arm_locally_streaming.
+ * g++.target/aarch64/sme/keyword_macros_1.C: Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * lib/target-supports.exp: Add sme and sme-i16i64 features.
+ * gcc.target/aarch64/pragma_cpp_predefs_4.c: Test __ARM_FEATURE_SME*
+ macros.
+ * gcc.target/aarch64/sve/acle/asm/test_sve_acle.h: Allow functions
+ to be marked as __arm_streaming, __arm_streaming_compatible, and
+ __arm_inout("za").
+ * g++.target/aarch64/sve/acle/general-c++/func_redef_4.c: Mark the
+ function as __arm_streaming_compatible.
+ * g++.target/aarch64/sve/acle/general-c++/func_redef_5.c: Likewise.
+ * g++.target/aarch64/sve/acle/general-c++/func_redef_7.c: Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/func_redef_4.c: Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/func_redef_5.c: Likewise.
+ * g++.target/aarch64/sme/aarch64-sme-acle-asm.exp: New test harness.
+ * gcc.target/aarch64/sme/aarch64-sme-acle-asm.exp: Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_int_m_1.c: New test.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_m_1.c: Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_m_2.c: Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/binary_za_uint_m_1.c: Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/read_za_m_1.c: Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/unary_za_m_1.c: Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/write_za_m_1.c: Likewise.
+ * gcc.target/aarch64/sme/acle-asm/addha_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/addha_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/addva_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/addva_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/arm_has_sme_sc.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_ns.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_s.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_sc.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/cntsb_s.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/cntsb_sc.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/cntsd_s.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/cntsd_sc.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/cntsh_s.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/cntsh_sc.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/cntsw_s.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/cntsw_sc.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_hor_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_hor_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_hor_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_hor_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_hor_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_ver_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_ver_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_ver_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_ver_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ld1_ver_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ldr_vnum_za_s.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ldr_vnum_za_sc.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ldr_za_s.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/ldr_za_sc.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/mopa_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/mopa_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/mops_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/mops_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/read_hor_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/read_hor_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/read_hor_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/read_hor_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/read_hor_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/read_ver_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/read_ver_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/read_ver_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/read_ver_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/read_ver_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_hor_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_hor_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_hor_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_hor_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_hor_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_ver_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_ver_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_ver_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_ver_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/st1_ver_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/str_vnum_za_s.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/str_vnum_za_sc.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/str_za_s.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/str_za_sc.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/sumopa_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/sumopa_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/sumops_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/sumops_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/test_sme_acle.h: New file.
+ * gcc.target/aarch64/sme/acle-asm/undef_za.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/usmopa_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/usmopa_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/usmops_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/usmops_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/write_hor_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/write_hor_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/write_hor_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/write_hor_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/write_hor_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/write_ver_za128.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/write_ver_za16.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/write_ver_za32.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/write_ver_za64.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/write_ver_za8.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/zero_mask_za.c: New file.
+ * gcc.target/aarch64/sme/acle-asm/zero_za.c: New file.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sme/za_state_1.c: New test.
+ * gcc.target/aarch64/sme/za_state_2.c: Likewise.
+ * gcc.target/aarch64/sme/za_state_3.c: Likewise.
+ * gcc.target/aarch64/sme/za_state_4.c: Likewise.
+ * gcc.target/aarch64/sme/za_state_5.c: Likewise.
+ * gcc.target/aarch64/sme/za_state_6.c: Likewise.
+ * g++.target/aarch64/sme/exceptions_1.C: Likewise.
+ * gcc.target/aarch64/sme/keyword_macros_1.c: Add ZA macros.
+ * g++.target/aarch64/sme/keyword_macros_1.C: Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sme/call_sm_switch_1.c: New test.
+ * gcc.target/aarch64/sme/call_sm_switch_2.c: Likewise.
+ * gcc.target/aarch64/sme/call_sm_switch_3.c: Likewise.
+ * gcc.target/aarch64/sme/call_sm_switch_4.c: Likewise.
+ * gcc.target/aarch64/sme/call_sm_switch_5.c: Likewise.
+ * gcc.target/aarch64/sme/call_sm_switch_6.c: Likewise.
+ * gcc.target/aarch64/sme/call_sm_switch_7.c: Likewise.
+ * gcc.target/aarch64/sme/call_sm_switch_8.c: Likewise.
+ * gcc.target/aarch64/sme/call_sm_switch_9.c: Likewise.
+ * gcc.target/aarch64/sme/call_sm_switch_10.c: Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * g++.target/aarch64/sve/aarch64-ssve.exp: New harness.
+ * g++.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp: Add
+ -DSTREAMING_COMPATIBLE to the list of options.
+ * g++.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp: Likewise.
+ * gcc.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp: Likewise.
+ * gcc.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp: Likewise.
+ Fix pasto in variable name.
+ * gcc.target/aarch64/sve/acle/asm/test_sve_acle.h: Mark functions
+ as streaming-compatible if STREAMING_COMPATIBLE is defined.
+ * gcc.target/aarch64/sve/acle/asm/adda_f16.c: Disable for
+ streaming-compatible code.
+ * gcc.target/aarch64/sve/acle/asm/adda_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/adda_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/adrb.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/adrd.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/adrh.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/adrw.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/bfmmla_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/compact_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/compact_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/compact_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/compact_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/compact_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/compact_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/expa_f16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/expa_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/expa_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1_gather_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1_gather_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_bf16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_f16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_s16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_s8.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_u16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ro_u8.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1sw_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1sw_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1uw_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ld1uw_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_bf16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_f16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_gather_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_gather_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_s16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_s8.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_u16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1_u8.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sb_s16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sb_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sb_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sb_u16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sb_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sb_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sh_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sh_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sh_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sh_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sw_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1sw_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1ub_s16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1ub_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1ub_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1ub_u16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1ub_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1ub_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uh_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uh_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uh_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uh_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uw_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldff1uw_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_bf16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_f16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_s16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_s8.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_u16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1_u8.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sb_s16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sb_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sb_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sb_u16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sb_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sb_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sh_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sh_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sh_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sh_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sw_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1sw_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1ub_s16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1ub_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1ub_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1ub_u16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1ub_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1ub_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1uh_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1uh_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1uh_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1uh_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1uw_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/ldnf1uw_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/mmla_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/mmla_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/mmla_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/mmla_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/prfb_gather.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/prfd_gather.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/prfh_gather.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/prfw_gather.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/rdffr_1.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1_scatter_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1_scatter_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1_scatter_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1_scatter_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1_scatter_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1_scatter_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1b_scatter_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1b_scatter_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1b_scatter_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1b_scatter_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1h_scatter_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1h_scatter_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1h_scatter_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1h_scatter_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1w_scatter_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/st1w_scatter_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/tmad_f16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/tmad_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/tmad_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/tsmul_f16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/tsmul_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/tsmul_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/tssel_f16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/tssel_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/tssel_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/usmmla_s32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/aesd_u8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/aese_u8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bdep_u16.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bdep_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bdep_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bdep_u8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bext_u16.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bext_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bext_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bext_u8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bgrp_u16.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bgrp_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bgrp_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/bgrp_u8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/histcnt_s32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/histcnt_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/histcnt_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/histcnt_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/histseg_s8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/histseg_u8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/match_s16.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/match_s8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/match_u16.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/match_u8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/nmatch_s16.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/nmatch_s8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/nmatch_u16.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/nmatch_u8.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/pmullb_pair_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/pmullt_pair_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/rax1_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/rax1_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/sm4ekey_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u32.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_s64.c: Likewise.
+ * gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_u64.c: Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/movdf_2.c: New test.
+ * gcc.target/aarch64/movdi_3.c: Likewise.
+ * gcc.target/aarch64/movhf_2.c: Likewise.
+ * gcc.target/aarch64/movhi_2.c: Likewise.
+ * gcc.target/aarch64/movqi_2.c: Likewise.
+ * gcc.target/aarch64/movsf_2.c: Likewise.
+ * gcc.target/aarch64/movsi_2.c: Likewise.
+ * gcc.target/aarch64/movtf_3.c: Likewise.
+ * gcc.target/aarch64/movtf_4.c: Likewise.
+ * gcc.target/aarch64/movti_3.c: Likewise.
+ * gcc.target/aarch64/movti_4.c: Likewise.
+ * gcc.target/aarch64/movv16qi_4.c: Likewise.
+ * gcc.target/aarch64/movv16qi_5.c: Likewise.
+ * gcc.target/aarch64/movv8qi_4.c: Likewise.
+ * gcc.target/aarch64/sme/arm_neon_1.c: Likewise.
+ * gcc.target/aarch64/sme/arm_neon_2.c: Likewise.
+ * gcc.target/aarch64/sme/arm_neon_3.c: Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * lib/target-supports.exp (check_effective_target_aarch64_sme): New
+ target test.
+ * gcc.target/aarch64/sme/aarch64-sme.exp: Force SME to be enabled
+ if it isn't by default.
+ * g++.target/aarch64/sme/aarch64-sme.exp: Likewise.
+ * gcc.target/aarch64/sme/streaming_mode_3.c: New test.
+ * gcc.target/aarch64/sme/streaming_mode_4.c: New file.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sme/aarch64-sme.exp: New harness.
+ * gcc.target/aarch64/sme/streaming_mode_1.c: New test.
+ * gcc.target/aarch64/sme/streaming_mode_2.c: Likewise.
+ * gcc.target/aarch64/sme/keyword_macros_1.c: Likewise.
+ * g++.target/aarch64/sme/aarch64-sme.exp: New harness.
+ * g++.target/aarch64/sme/streaming_mode_1.C: New test.
+ * g++.target/aarch64/sme/streaming_mode_2.C: Likewise.
+ * g++.target/aarch64/sme/keyword_macros_1.C: Likewise.
+ * gcc.target/aarch64/auto-init-1.c: Only expect the call insn
+ to contain 1 (const_int 0), not 2.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/acle/asm/test_sve_acle.h (TEST_DUAL_XN):
+ New macro.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_bf16.c: Add tests for
+ tuple forms.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_f16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_f32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_f64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_s16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_s32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_s64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_s8.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_u16.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_u32.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_u64.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/reinterpret_u8.c: Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/acle/general-c/set_1.c: Tweak expected
+ error message.
+ * gcc.target/aarch64/sve/acle/general-c/set_3.c: Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/set_5.c: Likewise.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/acle/general-c/*: Replace "but previous
+ arguments had" with "but argument N had".
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/acle/general-c/*: Update expected error
+ messages.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/acle/asm/cntb.c: Tweak expected output.
+ * gcc.target/aarch64/sve/acle/asm/cnth.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/cntw.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/cntd.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/prfb.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/prfh.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/prfw.c: Likewise.
+ * gcc.target/aarch64/sve/acle/asm/prfd.c: Likewise.
+ * gcc.target/aarch64/sve/loop_add_4.c: Expect RDVL to be used
+ to calculate the -17 and 17 factors.
+ * gcc.target/aarch64/sve/pcs/stack_clash_1.c: Likewise the 18 factor.
+
+2023-12-05 Szabolcs Nagy <szabolcs.nagy@arm.com>
+
+ * gcc.target/aarch64/eh_return-3.c: Fix when retaa is available.
+
+2023-12-05 Thomas Schwinge <thomas@codesourcery.com>
+
+ * gcc.dg/gnu23-builtins-no-dfp-1.c: Remove '-fpermissive'.
+ 'dg-error "implicit"' instead of 'dg-warning "implicit"'.
+
+2023-12-05 Kito Cheng <kito.cheng@sifive.com>
+
+ * gcc.target/riscv/arch-29.c: New test.
+ * gcc.target/riscv/arch-30.c: New test.
+
+2023-12-05 Richard Sandiford <richard.sandiford@arm.com>
+
+ PR rtl-optimization/112278
+ * gcc.target/aarch64/sve/pr112278.c: New test.
+
+2023-12-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/112843
+ * gcc.dg/bitint-47.c: New test.
+
+2023-12-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112816
+ * gcc.target/i386/pr112816.c: New test.
+
+2023-12-05 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * g++.target/riscv/rvv/autovec/bug-2.C: New test.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR c/89270
+ * gcc.target/avr/pr89270.c: New testcase.
+
+2023-12-05 Richard Biener <rguenther@suse.de>
+
+ PR c/86869
+ * gcc.target/avr/pr86869.c: New testcase.
+
+2023-12-05 liuhongt <hongtao.liu@intel.com>
+
+ * gcc.target/i386/udotprodint8_emulate.c: New test.
+
+2023-12-05 Marek Polacek <polacek@redhat.com>
+
+ PR c++/107687
+ PR c++/110997
+ * g++.dg/cpp23/consteval-if10.C: Remove dg-error.
+ * g++.dg/cpp23/consteval-if2.C: Likewise.
+ * g++.dg/cpp23/feat-cxx2b.C: Adjust expected value of __cpp_consteval.
+ * g++.dg/cpp26/feat-cxx26.C: Likewise.
+ * g++.dg/cpp2a/consteval-memfn1.C: Add dg-error.
+ * g++.dg/cpp2a/consteval11.C: Likewise.
+ * g++.dg/cpp2a/consteval3.C: Adjust dg-error.
+ * g++.dg/cpp2a/consteval34.C: Add dg-error.
+ * g++.dg/cpp2a/consteval36.C: Likewise.
+ * g++.dg/cpp2a/consteval9.C: Likewise.
+ * g++.dg/cpp2a/feat-cxx2a.C: Adjust expected value of __cpp_consteval.
+ * g++.dg/cpp2a/spaceship-synth9.C: Adjust dg-error.
+ * g++.dg/cpp2a/consteval-prop1.C: New test.
+ * g++.dg/cpp2a/consteval-prop10.C: New test.
+ * g++.dg/cpp2a/consteval-prop11.C: New test.
+ * g++.dg/cpp2a/consteval-prop12.C: New test.
+ * g++.dg/cpp2a/consteval-prop13.C: New test.
+ * g++.dg/cpp2a/consteval-prop14.C: New test.
+ * g++.dg/cpp2a/consteval-prop15.C: New test.
+ * g++.dg/cpp2a/consteval-prop16.C: New test.
+ * g++.dg/cpp2a/consteval-prop17.C: New test.
+ * g++.dg/cpp2a/consteval-prop18.C: New test.
+ * g++.dg/cpp2a/consteval-prop19.C: New test.
+ * g++.dg/cpp2a/consteval-prop20.C: New test.
+ * g++.dg/cpp2a/consteval-prop2.C: New test.
+ * g++.dg/cpp2a/consteval-prop3.C: New test.
+ * g++.dg/cpp2a/consteval-prop4.C: New test.
+ * g++.dg/cpp2a/consteval-prop5.C: New test.
+ * g++.dg/cpp2a/consteval-prop6.C: New test.
+ * g++.dg/cpp2a/consteval-prop7.C: New test.
+ * g++.dg/cpp2a/consteval-prop8.C: New test.
+ * g++.dg/cpp2a/consteval-prop9.C: New test.
+
+2023-12-04 Jason Merrill <jason@redhat.com>
+
+ * g++.dg/cpp0x/constexpr-noreturn1.C: New test.
+
+2023-12-04 Robin Dapp <rdapp@ventanamicro.com>
+
+ * gcc.target/riscv/rvv/autovec/binop/copysign-zvfh-run.c:
+ Replace riscv_zvfh_hw with riscv_zvfh.
+ * gcc.target/riscv/rvv/autovec/binop/vadd-zvfh-run.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/binop/vdiv-zvfh-run.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/binop/vmax-zvfh-run.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/binop/vmin-zvfh-run.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/binop/vmul-zvfh-run.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/cond/cond_copysign-zvfh-run.c:
+ Ditto.
+ * gcc.target/riscv/rvv/autovec/struct/struct_vect_run-10.c:
+ Ditto.
+ * gcc.target/riscv/rvv/autovec/struct/struct_vect_run-6.c: Allow
+ overriding N.
+ * gcc.target/riscv/rvv/autovec/unop/abs-zvfh-run.c: Replace
+ riscv zvfh_hw with riscv_zvfh.
+ * gcc.target/riscv/rvv/autovec/unop/vneg-zvfh-run.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-10.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-11.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-12.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-3.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-5.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-6.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-7.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-8.c: Ditto.
+ * lib/target-supports.exp: Remove riscv_vector_hw and
+ riscv_zvfh_hw.
+
+2023-12-04 Robin Dapp <rdapp@ventanamicro.com>
+
+ * gcc.target/riscv/rvv/autovec/pr112552.c: Add
+ -Wno-incompatible-pointer-types.
+ * gcc.target/riscv/rvv/autovec/struct/struct_vect_run-10.c:
+ Add -std=gnu99.
+
+2023-12-04 Robin Dapp <rdapp@ventanamicro.com>
+
+ * gcc.target/riscv/rvv/base/cpymem-strategy-1.c: Change to
+ -mstringop-strategy.
+ * gcc.target/riscv/rvv/base/cpymem-strategy-2.c: Ditto.
+ * gcc.target/riscv/rvv/base/cpymem-strategy-3.c: Ditto.
+ * gcc.target/riscv/rvv/base/cpymem-strategy-4.c: Ditto.
+ * gcc.target/riscv/rvv/base/cpymem-strategy-5.c: Ditto.
+
+2023-12-04 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * gcc.target/riscv/rvv/base/pr112431-34.c: New test.
+
+2023-12-04 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * gcc.target/riscv/rvv/base/pr112431-37.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-38.c: New test.
+
+2023-12-04 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112818
+ * gcc.dg/vect/pr112818.c: New testcase.
+
+2023-12-04 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * g++.target/riscv/rvv/autovec/bug-01.C: Moved to...
+ * g++.target/riscv/rvv/autovec/bug-1.C: ...here.
+
+2023-12-04 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112827
+ * gcc.dg/torture/pr112827-1.c: New testcase.
+ * gcc.dg/torture/pr112827-2.c: Likewise.
+
+2023-12-04 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * gcc.target/riscv/rvv/base/pr112431-35.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-36.c: New test.
+
+2023-12-04 Indu Bhagat <indu.bhagat@oracle.com>
+
+ PR debug/112656
+ * gcc.dg/debug/btf/btf-function-7.c: New test.
+
+2023-12-04 Indu Bhagat <indu.bhagat@oracle.com>
+
+ PR debug/112768
+ * gcc.dg/debug/btf/btf-function-6.c: Empty string expected with
+ BTF_KIND_FUNC_PROTO.
+
+2023-12-04 Pan Li <pan2.li@intel.com>
+
+ PR target/112813
+ * gcc.target/riscv/rvv/vsetvl/pr112813-1.c: New test.
+
+2023-12-04 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112837
+ * gcc.dg/pr112837.c: New test.
+
+2023-12-04 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/112816
+ * gcc.target/i386/sse2-pr112816.c: New test.
+
+2023-12-04 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/112795
+ * g++.dg/ext/unroll-5.C: New test.
+ * g++.dg/ext/unroll-6.C: New test.
+
+2023-12-04 Feng Wang <wangfeng@eswincomputing.com>
+
+ * gcc.target/riscv/zvkn-1.c: Replace zvbb with zvkb.
+ * gcc.target/riscv/zvkn.c: Ditto.
+ * gcc.target/riscv/zvknc-1.c:Ditto.
+ * gcc.target/riscv/zvknc-2.c:Ditto.
+ * gcc.target/riscv/zvknc.c: Ditto.
+ * gcc.target/riscv/zvkng-1.c:Ditto.
+ * gcc.target/riscv/zvkng-2.c:Ditto.
+ * gcc.target/riscv/zvkng.c: Ditto.
+ * gcc.target/riscv/zvks-1.c: Ditto.
+ * gcc.target/riscv/zvks.c: Ditto.
+ * gcc.target/riscv/zvksc-1.c:Ditto.
+ * gcc.target/riscv/zvksc-2.c:Ditto.
+ * gcc.target/riscv/zvksc.c: Ditto.
+ * gcc.target/riscv/zvksg-1.c:Ditto.
+ * gcc.target/riscv/zvksg-2.c:Ditto.
+ * gcc.target/riscv/zvksg.c: Ditto.
+
+2023-12-04 Fei Gao <gaofei@eswincomputing.com>
+ Xiao Zeng <zengxiao@eswincomputing.com>
+
+ * gcc.target/riscv/zicond-sfb-primitiveSemantics.c: New test.
+
+2023-12-04 Kito Cheng <kito.cheng@sifive.com>
+
+ * gcc.target/riscv/mcpu-sifive-x280.c: New test.
+
+2023-12-04 Hu, Lin1 <lin1.hu@intel.com>
+
+ * gcc.target/i386/user_msr-1.c: Correct the MSR index for give the user
+ an proper example.
+
+2023-12-03 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112406
+ * gcc.target/aarch64/pr112406.c (MagickPixelPacket): Add missing
+ semicolon.
+ (GetImageChannelMoments_image): Avoid using implicit int.
+ (SetMagickPixelPacket): Use void return type instead of implicit int.
+ (GetImageChannelMoments): Likewise. Use __builtin_atan instead of
+ atan.
+
+2023-12-03 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112807
+ * gcc.dg/bitint-46.c: New test.
+
+2023-12-03 Saurabh Jha <saurabh.jha@arm.com>
+
+ * gcc.target/arm/mve/pr112337.c: Use int32_t instead of int.
+
+2023-12-03 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/base/pr112743-1.c: Fix typo.
+ * gcc.target/riscv/rvv/base/pr112743-2.c: Ditto.
+
+2023-12-03 Jeff Law <jlaw@ventanamicro.com>
+
+ * gcc.dg/gnu23-builtins-no-dfp-1.c: Add -fpermissive.
+
+2023-12-03 Jeff Law <jlaw@ventanamicro.com>
+
+ * gcc.c-torture/execute/pr65369.c: Fix type mismatch.
+
+2023-12-03 Jeff Law <jlaw@ventanamicro.com>
+
+ * gcc.c-torture/execute/comp-goto-1.c: Fix return value of main for
+ 16 bit targets.
+
+2023-12-03 Jeff Law <jlaw@ventanamicro.com>
+
+ * gcc.target/arc/lra-1.c: Fix missing prototypes and implicit
+ types in variable definitions.
+ * gcc.target/arc/pic-1.c: Similarly.
+ * gcc.target/arc/pr9001191897.c: Similarly.
+ * gcc.target/arc/pr9001195952.c: Add -fpermissive.
+
+2023-12-03 Jeff Law <jlaw@ventanamicro.com>
+
+ * gcc.target/nios2/cdx-ldstwm-1.c: Add -fpermissive.
+ * gcc.target/nios2/cdx-ldstwm-2.c: Add prototypes fro abort and exit.
+
+2023-12-03 Jeff Law <jlaw@ventanamicro.com>
+
+ * gcc.target/h8300/pr58400.c: Add -fpermissive.
+ * gcc.target/h8300/pr17306-2.c: Add missing prototype.
+
+2023-12-02 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/93762
+ PR fortran/100651
+ * gfortran.dg/optional_deferred_char_1.f90: New test.
+
+2023-12-02 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ * gcc.target/i386/libcall-1.c: Skip on darwin.
+
+2023-12-02 Li Wei <liwei@loongson.cn>
+
+ * gcc.target/loongarch/lasx-extract-even_odd-opt.c: New test.
+
+2023-12-02 Li Wei <liwei@loongson.cn>
+
+ * gcc.target/loongarch/popcnt.c: New test.
+ * gcc.target/loongarch/popcount.c: New test.
+
+2023-12-02 chenxiaolong <chenxiaolong@loongson.cn>
+
+ * gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c:Remove
+ the default Settings to run the behavior.
+ * gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvadd.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvadda.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvaddi.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvand.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvandi.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvandn.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvbitset.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvclo.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvclz.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvextrins.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvffinth.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvftintl.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvilvh.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvilvl.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvld.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvldi.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmadd.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmsub.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmul.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvneg.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvnor.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvnori.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvor.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvori.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvorn.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvpackev.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvpackod.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvpickev.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvpickod.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvpickve.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvprem.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvpremi.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvreplve.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvrotr.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvrotri.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvseq.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvseqi.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsll.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvslli.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsra.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrai.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsran.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrani.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrar.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrari.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrl.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrli.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrln.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvssran.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvssrani.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvssrln.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvst.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsub.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsubi.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvxor.c:Dito.
+ * gcc.target/loongarch/vector/lasx/lasx-xvxori.c:Dito.
+ * gcc.target/loongarch/vector/loongarch-vector.exp:Added hardware
+ detection to set the behavior of program execution based on the
+ characteristics of the hardware.
+ * gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c:Remove the default
+ Settings to run the behavior.
+ * gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vadd.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vadda.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vaddi.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vand.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vandi.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vandn.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vavg-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vavg-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vbitclr.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vbitclri.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vbitrev.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vbitsel.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vbitseli.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vbitset.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vbitseti.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vbsll.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vbsrl.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vclo.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vclz.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vexth-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vexth-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vextl-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vextl-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vextrins.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vffint-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vffint-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vffint-3.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfrstp.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vftint-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vftint-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vftint-3.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vftint-4.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vilvh.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vilvl.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vld.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vldi.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmadd.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmax-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmax-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmin-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmin-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmini-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmini-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmod-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmod-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmskgez.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmskltz.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmsknz.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmsub.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmul.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vneg.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vnor.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vnori.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vor.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vori.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vorn.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vpackev.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vpackod.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vpcnt.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vpickev.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vpickod.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vpremi.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vreplve.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vreplvei.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vrotr.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vrotri.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsat-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsat-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vseq.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vseqi.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vshuf.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsigncov.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsle-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsle-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vslei-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vslei-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsll.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vslli.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vslt-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vslt-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vslti-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vslti-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsra.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrai.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsran.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrani.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrar.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrari.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrarn.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrarni.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrl.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrli.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrln.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrlni.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrlr.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrlri.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vssran.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vssrani.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vssrarn.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vssrarni.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vssrln.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vssrlni.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vssub-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vssub-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vst.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsub.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsubi.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vxor.c:Dito.
+ * gcc.target/loongarch/vector/lsx/lsx-vxori.c:Dito.
+
+2023-12-02 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112801
+ * gcc.target/riscv/rvv/autovec/pr112801.c: New test.
+
+2023-12-02 Pan Li <pan2.li@intel.com>
+
+ PR target/112743
+ * gcc.target/riscv/rvv/base/pr112743-2.c: New test.
+
+2023-12-01 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/112772
+ * gfortran.dg/missing_optional_dummy_7.f90: New test.
+
+2023-12-01 Jason Merrill <jason@redhat.com>
+
+ * g++.dg/cpp2a/nontype-class4.C: Specify ABI v18.
+ * g++.dg/cpp2a/nontype-class4a.C: New test.
+
+2023-12-01 Jason Merrill <jason@redhat.com>
+
+ * g++.dg/abi/mangle10.C: Disable compat aliases.
+ * g++.dg/abi/mangle52.C: Specify ABI 18.
+ * g++.dg/cpp2a/class-deduction-alias3.C
+ * g++.dg/cpp2a/class-deduction-alias8.C:
+ Avoid builtins in requires-clauses.
+ * g++.dg/abi/mangle-concepts1.C: New test.
+ * g++.dg/abi/mangle-ttp1.C: New test.
+
+2023-12-01 Alexandre Oliva <oliva@adacore.com>
+
+ PR target/112334
+ * c-c++-common/torture/harden-cfr-bret.c: Rework for stricter
+ untyped_return requirements. Require untyped_assembly.
+ * c-c++-common/torture/harden-cfr-bret-except.c: New.
+ * c-c++-common/torture/harden-cfr-bret-always.c: Require
+ untyped_assembly.
+ * c-c++-common/torture/harden-cfr-bret-never.c: Likewise.
+ * c-c++-common/torture/harden-cfr-bret-noopt.c: Likewise.
+ * c-c++-common/torture/harden-cfr-bret-noret.c: Likewise.
+ * c-c++-common/torture/harden-cfr-bret-no-xthrow.c: Likewise.
+ * c-c++-common/torture/harden-cfr-bret-nothrow.c: Likewise.
+ * c-c++-common/torture/harden-cfr-bret-retcl.c: Likewise.
+
+2023-12-01 Vladimir N. Makarov <vmakarov@redhat.com>
+
+ PR target/112445
+ * gcc.target/i386/pr112445.c: New test.
+
+2023-12-01 David Malcolm <dmalcolm@redhat.com>
+
+ * gcc.dg/analyzer/fd-accept.c: Update for fix to missing CWE
+ metadata for -Wanalyzer-fd-phase-mismatch.
+ * gcc.dg/analyzer/fd-bind.c: Likewise.
+ * gcc.dg/analyzer/fd-socket-misuse.c: Likewise.
+ * gcc.dg/plugin/analyzer_cpython_plugin.c: Update for changes to
+ pending_diagnostic::emit.
+ * gcc.dg/plugin/analyzer_gil_plugin.c: Likewise.
+
+2023-12-01 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * gcc.target/riscv/rvv/base/pr112431-22.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-23.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-24.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-25.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-26.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-27.c: New test.
+
+2023-12-01 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * gcc.target/riscv/rvv/base/pr112431-28.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-29.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-30.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-31.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-32.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-33.c: New test.
+
+2023-12-01 Jakub Jelinek <jakub@redhat.com>
+
+ * gcc.target/x86_64/abi/avx512fp16/m512h/test_passing_m512.c
+ (fun_check_passing_m512_8_values, fun_check_passing_m512h_8_values):
+ Add missing void return type.
+ * gcc.target/x86_64/abi/avx512fp16/m256h/test_passing_m256.c
+ (fun_check_passing_m256_8_values, fun_check_passing_m256h_8_values):
+ Likewise.
+ * gcc.dg/graphite/pr83126.c (ew): Add missing casts to __INTPTR_TYPE__
+ and then to int *.
+
+2023-12-01 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112770
+ * gcc.dg/bitint-45.c: New test.
+
+2023-12-01 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112771
+ * gcc.dg/bitint-44.c: New test.
+
+2023-12-01 Jose E. Marchesi <jose.marchesi@oracle.com>
+
+ * gcc.target/bpf/section-name-quoting-1.c: New test.
+
+2023-12-01 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112750
+ * gcc.dg/bitint-41.c: Use -std=c23 rather than -std=c2x.
+ * gcc.dg/torture/bitint-43.c: Likewise.
+ * gcc.dg/torture/bitint-44.c: Likewise.
+ * gcc.dg/torture/bitint-45.c: New test.
+
+2023-12-01 Juergen Christ <jchrist@linux.ibm.com>
+
+ * gcc.target/s390/pr112753.c: New test.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.dg/permerror-default.c (missing_parameter_type):
+ Expect error.
+ * gcc.dg/permerror-fpermissive.c (missing_parameter_type):
+ Expect -Wdeclaration-missing-parameter-type warning.
+ * gcc.dg/permerror-gnu89-nopermissive.c (missing_parameter_type):
+ Expect -Wdeclaration-missing-parameter-type error.
+ * gcc.dg/permerror-gnu89-pedantic.c (missing_parameter_type):
+ Likewise.
+ * gcc.dg/permerror-gnu89.c (missing_parameter_type):
+ Expect -Wdeclaration-missing-parameter-type warning.
+ * gcc.dg/permerror-noerror.c: Add
+ -Wno-error=declaration-missing-parameter-type to build flags.
+ (missing_parameter_type): Expect
+ -Wdeclaration-missing-parameter-type warning.
+ * gcc.dg/permerror-nowarning.c: Build with
+ -Wno-declaration-missing-parameter-type. Remove previously
+ expected warning.
+ * gcc.dg/permerror-fpermissive-nowarning.c: Likewise.
+ * gcc.dg/permerror-pedantic.c (missing_parameter_type):
+ Expect -Wdeclaration-missing-parameter-type error.
+ * gcc.dg/permerror-system.c (missing_parameter_type):
+ Likewise.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.dg/permerror-default.c (incompatible_pointer_types):
+ Expect new permerror.
+ * gcc.dg/permerror-gnu89-nopermissive.c
+ (incompatible_pointer_types): Likewise.
+ * gcc.dg/permerror-pedantic.c (incompatible_pointer_types):
+ Likewise.
+ * gcc.dg/permerror-system.c: Likewise.
+ * gcc.dg/Wincompatible-pointer-types-2.c: Compile with
+ -fpermissive due to expected errors.
+ * gcc.dg/Wincompatible-pointer-types-5.c: New test. Copied
+ from gcc.dg/Wincompatible-pointer-types-2.c. Expect errors.
+ * gcc.dg/anon-struct-11.c: Compile with -fpermissive
+ due to expected errors.
+ * gcc.dg/anon-struct-11a.c: New test. Copied from
+ gcc.dg/anon-struct-11.c. Expect errors.
+ * gcc.dg/anon-struct-13.c: Compile with -fpermissive
+ due to expected errors.
+ * gcc.dg/anon-struct-13a.c: New test. Copied from
+ gcc.dg/anon-struct-13.c. Expect errors.
+ * gcc.dg/builtin-arith-overflow-4.c: Compile with -fpermissive
+ due to expected errors.
+ * gcc.dg/builtin-arith-overflow-4a.c: New test. Copied from
+ gcc.dg/builtin-arith-overflow-4.c. Expect errors.
+ * gcc.dg/c23-qual-4.c: Expect -Wincompatible-pointer-types errors.
+ * gcc.dg/dfp/composite-type.c: Compile with -fpermissive
+ due to expected errors.
+ * gcc.dg/dfp/composite-type-2.c: New test. Copied from
+ gcc.dg/dfp/composite-type.c. Expect errors.
+ * gcc.dg/diag-aka-1.c: Compile with -fpermissive
+ due to expected errors.
+ * gcc.dg/diag-aka-1a.c: New test. Copied from
+ gcc.dg/diag-aka-1a.c. Expect errors.
+ * gcc.dg/enum-compat-1.c: Compile with -fpermissive
+ due to expected errors.
+ * gcc.dg/enum-compat-2.c: New test. Copied from
+ gcc.dg/enum-compat-1.c. Expect errors.
+ * gcc.dg/func-ptr-conv-1.c: Compile with -fpermissive
+ due to expected errors.
+ * gcc.dg/func-ptr-conv-2.c: New test. Copied from
+ gcc.dg/func-ptr-conv-1.c. Expect errors.
+ * gcc.dg/init-bad-7.c: Compile with -fpermissive
+ due to expected errors.
+ * gcc.dg/init-bad-7a.c: New test. Copied from gcc.dg/init-bad-7.c.
+ Expect errors.
+ * gcc.dg/noncompile/incomplete-3.c (foo): Expect
+ -Wincompatible-pointer-types error.
+ * gcc.dg/param-type-mismatch-2.c (test8): Likewise.
+ * gcc.dg/pointer-array-atomic.c: Compile with -fpermissive
+ due to expected errors.
+ * gcc.dg/pointer-array-atomic-2.c: New test. Copied from
+ gcc.dg/pointer-array-atomic.c. Expect errors.
+ * gcc.dg/pointer-array-quals-1.c (test): Expect
+ -Wincompatible-pointer-types errors.
+ * gcc.dg/transparent-union-1.c: Compile with -fpermissive
+ due to expected errors.
+ * gcc.dg/transparent-union-1a.c: New test. Copied from
+ gcc.dg/transparent-union-1.c. Expect errors.
+ * gcc.target/aarch64/acle/memtag_2a.c
+ (test_memtag_warning_return_qualifier): Expect additional
+ errors.
+ * gcc.target/aarch64/sve/acle/general-c/load_2.c (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_1.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_2.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_3.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_4.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/sizeless-1.c (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/sizeless-2.c (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/store_1.c (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/store_2.c (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/store_scatter_index_1.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/store_scatter_index_restricted_1.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_2.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_restricted_1.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general/attributes_7.c
+ (f1): Likewise.
+ * gcc.target/i386/sse2-bfloat16-scalar-typecheck.c (footest):
+ Expect -Wincompatible-pointer-types errors.
+ * gcc.target/i386/vect-bfloat16-typecheck_1.c (footest): Likewise.
+ * gcc.target/i386/vect-bfloat16-typecheck_2.c (footest): Likewise.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.dg/permerror-default.c (return_mismatch_1)
+ (return_mismatch_2): Expect new permerror.
+ * gcc.dg/permerror-gnu89-nopermissive.c (return_mismatch_1):
+ Likewise.
+ * gcc.dg/permerror-system.c: Likewise.
+ * gcc.dg/20030906-1.c: Compile with -fpermissive due to
+ expected -Wreturn-mismatch error.
+ * gcc.dg/20030906-1a.c: New test. Copied from
+ gcc.dg/20030906-1.c. Expect the error.
+ * gcc.dg/20030906-2.c: Compile with -fpermissive due to
+ expected -Wreturn-mismatch error.
+ * gcc.dg/20030906-2a.c: New test. Copied from
+ gcc.dg/20030906-2.c. Expect the error.
+ * gcc.dg/Wreturn-mismatch-1.c: Compile with -fpermissive due to
+ expected -Wreturn-mismatch error.
+ * gcc.dg/Wreturn-mismatch-1a.c: New test. Copied from
+ gcc.dg/Wreturn-mismatch-1.c. Expect the error.
+ * gcc.dg/Wreturn-mismatch-2.c: Compile with -fpermissive due to
+ expected -Wreturn-mismatch error.
+ * gcc.dg/Wreturn-mismatch-2a.c: New test. Copied from
+ gcc.dg/Wreturn-mismatch-2.c. Expect the error.
+ * gcc.dg/diagnostic-range-bad-return.c: Compile with
+ -fpermissive due to expected -Wreturn-mismatch error.
+ * gcc.dg/diagnostic-range-bad-return-2.c: New test.
+ Copied from gcc.dg/diagnostic-range-bad-return.c. Expect the
+ error.
+ * gcc.dg/pr105635-2.c: Expect -Wreturn-mismatch error.
+ * gcc.dg/pr23075.c: Build with -fpermissive due to
+ expected -Wreturn-mismatch error.
+ * gcc.dg/pr23075-2.c: New test. Copied from gcc.dg/pr23075.c.
+ Expect the error.
+ * gcc.dg/pr29521.c: Compile with -fpermissive due to expected
+ -Wreturn-mismatch error.
+ * gcc.dg/pr29521-a.c: New test. Copied from gcc.dg/pr29521.c.
+ Expect error.
+ * gcc.dg/pr67730.c: Compile with -fpermissive due to expected
+ -Wreturn-mismatch error.
+ * gcc.dg/pr67730-a.c: New test. Copied from
+ gcc.dg/pr67730-a.c. Expect error.
+ * gcc.target/powerpc/conditional-return.c: Compile with
+ -fpermissive due to expected -Wreturn-mismatch error.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.dg/permerror-system.c: Expect all -Wimplicit-int
+ permerrors.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.dg/permerror-default.c (implicit_int_1, implicit_int_2)
+ (implicit_int_3, implicit_int_4): Expect new permerror.
+ * gcc.dg/permerror-system.c: Expect a single new permerror.
+ * gcc.dg/Wimplicit-int-1.c: Compile with -fpermissive due to
+ expected warning.
+ * gcc.dg/Wimplicit-int-4.c: Likewise.
+ * gcc.dg/Wimplicit-int-1a.c: New test. Copied from
+ gcc.dg/Wimplicit-int-1.c, but expect errors.
+ * gcc.dg/Wimplicit-int-4a.c: New test. Copied from
+ gcc.dg/Wimplicit-int-4.c, but expect errors.
+ * gcc.dg/gnu23-attr-syntax-2.c: Compile with -fpermissive
+ due to expected implicit-int error.
+ * gcc.dg/gnu23-attr-syntax-3.c: New test. Copied from
+ gcc.dg/gnu23-attr-syntax-2.c, but expect an error.
+ * gcc.dg/pr105635.c: Build with -fpermissive due to implicit
+ int.
+ * gcc.dg/pr105635-2.c: New test. Copied from
+ gcc.dg/pr105635.c. Expect implicit int error.
+ * gcc.dg/noncompile/pr79758.c: Build with -fpermissive due to
+ implicit int.
+ * gcc.dg/noncompile/pr79758-2.c: New test. Copied from
+ gcc.dg/noncompile/pr79758.c. Expect implicit int error.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.dg/permerror-default.c (implicit_function_declaration):
+ Expect the new permerror.
+ * gcc.dg/permerror-system.c: Likewise.
+ * c-c++-common/spellcheck-reserved.c (test, test_2): Expect
+ error instead of warning.
+ (f): Expect error instead of warning.
+ * gcc.dg/Wimplicit-function-declaration-c99.c: Compile with
+ -fpermissive due to expected warning.
+ * gcc.dg/Wimplicit-function-declaration-c99-2.c: New test.
+ Copied from gcc.dg/Wimplicit-function-declaration-c99.c.
+ Expect error.
+ * gcc.dg/missing-header-fixit-1.c: Compile with -fpermissive
+ due to expect error.
+ * gcc.dg/missing-header-fixit-1a.c: New test. Copied from
+ gcc.dg/missing-header-fixit-1.c, but expect error.
+ * gcc.dg/missing-header-fixit-2.c: Compile with -fpermissive
+ due to expect error.
+ * gcc.dg/missing-header-fixit-2a.c: New test. Copied from
+ gcc.dg/missing-header-fixit-2.c, but expect error.
+ * gcc.dg/missing-header-fixit-4.c: Compile with -fpermissive
+ due to expect error.
+ * gcc.dg/missing-header-fixit-4a.c: New test. Copied from
+ gcc.dg/missing-header-fixit-4.c, but expect error.
+ * gcc.dg/missing-header-fixit-5.c: Compile with -fpermissive
+ due to expect error.
+ * gcc.dg/missing-header-fixit-5a.c: New test. Copied from
+ gcc.dg/missing-header-fixit-5.c, but expect error.
+ * gcc.dg/pr61852.c: Expect implicit-function-declaration
+ error instead of warning.
+ * gcc.dg/spellcheck-identifiers-2.c: Compile with
+ -fpermissive due to expected warnings.
+ * gcc.dg/spellcheck-identifiers-2a.c: New test. Copied
+ from gcc.dg/spellcheck-identifiers-2a.c. Expect errors.
+ * gcc.dg/spellcheck-identifiers-3.c: Compile with
+ -fpermissive due to expected warnings.
+ * gcc.dg/spellcheck-identifiers-3a.c: New test. Copied
+ from gcc.dg/spellcheck-identifiers-2a.c. Expect errors.
+ * gcc.dg/spellcheck-identifiers-4.c: Compile with
+ -fpermissive due to expected warnings.
+ * gcc.dg/spellcheck-identifiers-4a.c: New test. Copied
+ from gcc.dg/spellcheck-identifiers-2a.c. Expect error.
+ * gcc.dg/spellcheck-identifiers.c: Compile with
+ -fpermissive due to expected warnings.
+ * gcc.dg/spellcheck-identifiers-1a.c: New test. Copied
+ from gcc.dg/spellcheck-identifiers.c. Expect errors.
+ * gcc.target/aarch64/sve/acle/general-c/ld1sh_gather_1.c (f1):
+ Expect error.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c:
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_restricted_1.c:
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c:
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c:
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c:
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c:
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c:
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_1.c:
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_2.c:
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_3.c:
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_4.c:
+ (f1): Likewise.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.dg/permerror-default.c (int_conversion_1)
+ (int_conversion_2): Expect the new permerrors.
+ * gcc.dg/permerror-gnu89-nopermissive.c (int_conversion_1)
+ (int_conversion_2): Likewise.
+ * gcc.dg/permerror-system.c: Likewise.
+ * c-c++-common/pr77624-1.c (foo, bar): Expect
+ error instead of warning.
+ * gcc.dg/Wint-conversion-2.c: Compile with -fpermissive due
+ to expected int-conversion warning.
+ * gcc.dg/Wint-conversion-3.c: Likewise.
+ * gcc.dg/Wint-conversion-4.c: New test. Based on
+ gcc.dg/Wint-conversion-3.c. Expect int-conversion errors.
+ * gcc.dg/assign-warn-1.c: Compile with -fpermissive.
+ * gcc.dg/assign-warn-4.c: New file. Extracted from
+ assign-warn1.c. Expect int-conversion errors.
+ * gcc.dg/diagnostic-types-1.c: Compile with -fpermissive.
+ * gcc.dg/diagnostic-types-2.c: New file. Extracted from
+ gcc.dg/diagnostic-types-1.c. Expect some errors instead of
+ warnings.
+ * gcc.dg/gomp/pr35738.c: Compile with -fpermissive due to
+ expected int-conversion error.
+ * gcc.dg/gomp/pr35738-2.c: New test. Based on
+ gcc.dg/gomp/pr35738.c. Expect int-converison errors.
+ * gcc.dg/init-excess-3.c: Expect int-converison errors.
+ * gcc.dg/overflow-warn-1.c: Likewise.
+ * gcc.dg/overflow-warn-3.c: Likewise.
+ * gcc.dg/param-type-mismatch.c: Compile with -fpermissive.
+ * gcc.dg/param-type-mismatch-2.c: New test. Copied from
+ gcc.dg/param-type-mismatch.c. Expect errors.
+ * gcc.dg/pr61162-2.c: Compile with -fpermissive.
+ * gcc.dg/pr61162-3.c: New test. Extracted from
+ gcc.dg/pr61162-2.c. Expect int-conversion errors.
+ * gcc.dg/spec-barrier-3.c: Use -fpermissive due to expected
+ int-conversion error.
+ * gcc.dg/spec-barrier-3a.c: New test. Based on
+ gcc.dg/spec-barrier-3.c. Expect int-conversion errors.
+ * gcc.target/aarch64/acle/memtag_2.c: Use -fpermissive due to expected
+ int-conversion error.
+ * gcc.target/aarch64/acle/memtag_2a.c: New test. Copied from
+ gcc.target/aarch64/acle/memtag_2.c. Expect error.
+ * gcc.target/aarch64/sve/acle/general-c/load_3.c (f1): Expect
+ error.
+ * gcc.target/aarch64/sve/acle/general-c/store_2.c (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/store_scatter_index_1.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/store_scatter_index_restricted_1.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_2.c
+ (f1): Likewise.
+ * gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_restricted_1.c
+ (f1): Likewise.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.dg/permerror-default.c: New test.
+ * gcc.dg/permerror-fpermissive.c: Likewise.
+ * gcc.dg/permerror-fpermissive-nowarning.c: Likewise.
+ * gcc.dg/permerror-gnu89-nopermissive.c: Likewise.
+ No permerrors yet, so this matches gcc.dg/permerror-gnu89.c
+ for now.
+ * gcc.dg/permerror-gnu89-pedantic.c: New test.
+ * gcc.dg/permerror-gnu89.c: Likewise.
+ * gcc.dg/permerror-noerror.c: Likewise.
+ * gcc.dg/permerror-nowarning.c: Likewise.
+ * gcc.dg/permerror-pedantic.c: Likewise.
+ * gcc.dg/permerror-system.c: Likewise.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * gm2/link/externalscaffold/pass/scaffold.c (m2pim_M2RTS_Terminate):
+ Declare.
+
+2023-12-01 Florian Weimer <fweimer@redhat.com>
+
+ * gcc.target/aarch64/aapcs64/ice_1.c (foo): Call named.
+
+2023-12-01 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112776
+ * gcc.target/riscv/rvv/vsetvl/avl_single-84.c: Adapt test.
+ * gcc.target/riscv/rvv/vsetvl/pr111037-3.c: Ditto.
+ * gcc.target/riscv/rvv/vsetvl/pr112776.c: New test.
+
+2023-11-30 Marek Polacek <polacek@redhat.com>
+
+ PR c++/112744
+ * g++.dg/lookup/scoped11.C: New test.
+ * g++.dg/lookup/scoped12.C: New test.
+ * g++.dg/lookup/scoped13.C: New test.
+ * g++.dg/lookup/scoped14.C: New test.
+ * g++.dg/lookup/scoped15.C: New test.
+
+2023-11-30 Christophe Lyon <christophe.lyon@linaro.org>
+
+ PR target/112698
+ * gcc.target/arm/bfloat16_vector_typecheck_1.c: Update expected
+ error message.
+ * gcc.target/arm/bfloat16_vector_typecheck_2.c: Likewise.
+
+2023-11-30 Thomas Schwinge <thomas@codesourcery.com>
+
+ * gcc.target/gcn/avgpr-mem-double.c: Remove
+ 'dg-skip-if "incompatible ISA" [...]'.
+ * gcc.target/gcn/avgpr-mem-int.c: Likewise.
+ * gcc.target/gcn/avgpr-mem-long.c: Likewise.
+ * gcc.target/gcn/avgpr-mem-short.c: Likewise.
+ * gcc.target/gcn/avgpr-spill-double.c: Likewise.
+ * gcc.target/gcn/avgpr-spill-int.c: Likewise.
+ * gcc.target/gcn/avgpr-spill-long.c: Likewise.
+ * gcc.target/gcn/avgpr-spill-short.c: Likewise.
+
+2023-11-30 Harald Anlauf <anlauf@gmx.de>
+
+ PR fortran/112764
+ * gfortran.dg/associate_62.f90: New test.
+
+2023-11-30 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112767
+ * gcc.dg/tree-ssa/pr112767.c: New testcase.
+ * gcc.dg/graphite/pr83255.c: Disable SCCP.
+
+2023-11-30 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112766
+ * g++.dg/torture/uninit-pr112766.C: New testcase.
+
+2023-11-30 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * gcc.target/riscv/rvv/base/pr112431-16.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-17.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-18.c: New test.
+
+2023-11-30 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * gcc.target/riscv/rvv/base/pr112431-19.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-20.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-21.c: New test.
+
+2023-11-30 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/110349
+ * g++.dg/cpp26/name-independent-decl1.C: New test.
+ * g++.dg/cpp26/name-independent-decl2.C: New test.
+ * g++.dg/cpp26/name-independent-decl3.C: New test.
+ * g++.dg/cpp26/name-independent-decl4.C: New test.
+ * g++.dg/cpp26/name-independent-decl5.C: New test.
+ * g++.dg/cpp26/name-independent-decl6.C: New test.
+ * g++.dg/cpp26/feat-cxx26.C: Add __cpp_placeholder_variables test.
+
+2023-11-30 liuhongt <hongtao.liu@intel.com>
+
+ * gcc.target/i386/sdotprodint8_emulate.c: New test.
+
+2023-11-30 Alexandre Oliva <oliva@gnu.org>
+
+ Revert:
+ 2023-11-20 Alexandre Oliva <oliva@adacore.com>
+
+ * c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early-O2.c:
+ Expect "unaligned pointer value" warning on short_enums
+ targets, but not in c++.
+ * c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early.c:
+ Likewise.
+
+2023-11-30 Tsukasa OI <research_trasio@irq.a4lg.com>
+
+ * gcc.target/riscv/predef-13.c: Fix 'E' extension version to test.
+
+2023-11-30 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112431
+ * gcc.target/riscv/rvv/base/pr112431-10.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-11.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-12.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-13.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-14.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-15.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-7.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-8.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-9.c: New test.
+
+2023-11-30 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.target/riscv/rvv/base/unop_v_constraint-2.c: Adapt test.
+ * gcc.target/riscv/rvv/base/pr112431-4.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-5.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-6.c: New test.
+
+2023-11-29 Marek Polacek <polacek@redhat.com>
+
+ PR c++/106650
+ * g++.dg/cpp0x/constexpr-array-ptr6.C: Remove dg-error.
+ * g++.dg/cpp0x/constexpr-ref12.C: Likewise.
+ * g++.dg/cpp0x/constexpr-ref2.C: Adjust dg-error.
+ * g++.dg/cpp0x/noexcept34.C: Remove dg-error.
+ * g++.dg/cpp1y/lambda-generic-const10.C: Likewise.
+ * g++.dg/cpp0x/constexpr-ref13.C: New test.
+ * g++.dg/cpp1z/constexpr-ref1.C: New test.
+ * g++.dg/cpp1z/constexpr-ref2.C: New test.
+ * g++.dg/cpp2a/constexpr-ref1.C: New test.
+
+2023-11-29 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/112765
+ * g++.dg/warn/Wparentheses-33.C: Compile with -Wparentheses.
+
+2023-11-29 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/112765
+ * g++.dg/warn/Wparentheses-33.C: New test.
+
+2023-11-29 David Faust <david.faust@oracle.com>
+
+ * gcc.target/bpf/core-builtin-enumvalue-opt.c: Change dg-final
+ scans to not assume a specific comment character.
+ * gcc.target/bpf/core-builtin-enumvalue.c: Likewise.
+ * gcc.target/bpf/core-builtin-type-based.c: Likewise.
+ * gcc.target/bpf/core-builtin-type-id.c: Likewise.
+
+2023-11-29 Andrew MacLeod <amacleod@redhat.com>
+
+ PR tree-optimization/111922
+ * gcc.dg/pr111922.c: New.
+
+2023-11-29 Martin Jambor <mjambor@suse.cz>
+
+ PR tree-optimization/112711
+ PR tree-optimization/112721
+ * g++.dg/tree-ssa/pr112711.C: New test.
+ * gcc.dg/tree-ssa/pr112721.c: Likewise.
+
+2023-11-29 Thomas Schwinge <thomas@codesourcery.com>
+
+ * lib/scanoffload.exp (only_for_offload_target): New 'proc'.
+
+2023-11-29 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ PR testsuite/112729
+ * lib/target-supports.exp (check_effective_target_cfi): New proc.
+ * gcc.target/i386/apx-interrupt-1.c: Require cfi instead of
+ skipping on *-*-darwin*.
+ * gcc.target/i386/apx-push2pop2_force_drap-1.c: Likewise.
+ * gcc.target/i386/apx-push2pop2-1.c: Likewise.
+
+2023-11-29 Thomas Schwinge <thomas@codesourcery.com>
+
+ * g++.dg/cpp26/static_assert1.C: Fix for '-fno-exceptions'
+ configurations.
+
+2023-11-29 Thomas Schwinge <thomas@codesourcery.com>
+
+ * g++.dg/ext/has-feature.C: Adjust for default-'-fno-exceptions',
+ '-fno-rtti' configurations.
+
+2023-11-29 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/112733
+ * gcc.dg/pr112733.c: New test.
+
+2023-11-29 Iain Sandoe <iains.gcc@gmail.com>
+
+ * lib/target-supports.exp: Test an asm line that fails on broken
+ Darwin assembler versions.
+
+2023-11-29 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * g++.dg/opt/devirt2.C: Adjust scan-assembler-count on sparc for
+ removal of -inline from regexp. Update comment.
+
+2023-11-29 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+ kito-cheng <kito.cheng@sifive.com>
+ kito-cheng <kito.cheng@gmail.com>
+
+ PR target/112431
+ * gcc.target/riscv/rvv/base/pr112431-1.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-2.c: New test.
+ * gcc.target/riscv/rvv/base/pr112431-3.c: New test.
+
+2023-11-29 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ PR testsuite/112728
+ * lib/scanasm.exp (dg-scan): Allow for double-quoted LTO section names.
+ (scan-assembler-times): Likewise.
+ (scan-assembler-dem-not): Likewise.
+
+2023-11-29 Jose E. Marchesi <jose.marchesi@oracle.com>
+
+ * gcc.c-torture/compile/libcall-2.c: Remove.
+ * gcc.target/i386/libcall-1.c: Moved from
+ gcc.c-torture/compile/libcall-2.c and adapted to use
+ effective-target for int128_t.
+
+2023-11-29 Jakub Jelinek <jakub@redhat.com>
+
+ PR bootstrap/111601
+ * g++.dg/opt/pr111601.C: New test.
+
+2023-11-29 Xi Ruoyao <xry111@xry111.site>
+
+ * gcc.target/loongarch/vect-frint-scalar.c: New test.
+ * gcc.target/loongarch/vect-frint-scalar-no-inexact.c: New test.
+
+2023-11-29 Xi Ruoyao <xry111@xry111.site>
+
+ * gcc.target/loongarch/vect-rotr.c: New test.
+
+2023-11-29 Xi Ruoyao <xry111@xry111.site>
+
+ * gcc.target/loongarch/vect-muh.c: New test.
+
+2023-11-29 Xi Ruoyao <xry111@xry111.site>
+
+ PR target/112578
+ * gcc.target/loongarch/vect-frint.c: New test.
+ * gcc.target/loongarch/vect-frint-no-inexact.c: New test.
+ * gcc.target/loongarch/vect-ftint.c: New test.
+ * gcc.target/loongarch/vect-ftint-no-inexact.c: New test.
+
+2023-11-29 Alexandre Oliva <oliva@adacore.com>
+
+ * gcc.dg/hardbool-err.c: New.
+ * gcc.dg/hardbool-trap.c: New.
+ * gcc.dg/torture/hardbool.c: New.
+ * gcc.dg/torture/hardbool-s.c: New.
+ * gcc.dg/torture/hardbool-us.c: New.
+ * gcc.dg/torture/hardbool-i.c: New.
+ * gcc.dg/torture/hardbool-ul.c: New.
+ * gcc.dg/torture/hardbool-ll.c: New.
+ * gcc.dg/torture/hardbool-5a.c: New.
+ * gcc.dg/torture/hardbool-s-5a.c: New.
+ * gcc.dg/torture/hardbool-us-5a.c: New.
+ * gcc.dg/torture/hardbool-i-5a.c: New.
+ * gcc.dg/torture/hardbool-ul-5a.c: New.
+ * gcc.dg/torture/hardbool-ll-5a.c: New.
+
+2023-11-29 Alexandre Oliva <oliva@adacore.com>
+
+ * g++.dg/warn/Wuse-after-free3.C: xfail on arm_eabi.
+
+2023-11-29 Alexandre Oliva <oliva@adacore.com>
+
+ * gcc.dg/torture/inline-mem-cmp-1.c: New.
+ * gcc.dg/torture/inline-mem-cpy-1.c: New.
+ * gcc.dg/torture/inline-mem-cpy-cmp-1.c: New.
+ * gcc.dg/torture/inline-mem-move-1.c: New.
+ * gcc.dg/torture/inline-mem-set-1.c: New.
+
+2023-11-29 Pan Li <pan2.li@intel.com>
+
+ PR target/112743
+ * gcc.target/riscv/rvv/base/pr112743-1.c: New test.
+
+2023-11-29 Jose E. Marchesi <jose.marchesi@oracle.com>
+
+ * gcc.c-torture/compile/libcall-2.c: Skip test in -m32.
+
+2023-11-29 Hongyu Wang <hongyu.wang@intel.com>
+
+ PR target/112729
+ * gcc.target/i386/apx-interrupt-1.c: Add -fomit-frame-pointer.
+ * gcc.target/i386/apx-push2pop2-1.c: Likewise.
+ * gcc.target/i386/apx-push2pop2_force_drap-1.c: Likewise.
+
+2023-11-28 Jason Merrill <jason@redhat.com>
+
+ PR c++/94264
+ PR c++/53220
+ * c-c++-common/array-lit.c: Adjust.
+ * g++.dg/cpp1z/array-prvalue1.C: New test.
+ * g++.dg/ext/complit17.C: New test.
+
+2023-11-28 Roger Sayle <roger@nextmovesoftware.com>
+
+ * gcc.target/arc/jli-1.c: Update dg-final whitespace.
+ * gcc.target/arc/jli-2.c: Likewise.
+ * gcc.target/arc/naked-1.c: Likewise.
+ * gcc.target/arc/naked-2.c: Likewise.
+ * gcc.target/arc/tmac-1.c: Likewise.
+ * gcc.target/arc/tmac-2.c: Likewise.
+
+2023-11-28 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/112741
+ * gcc.dg/ubsan/pr112741.c: New testcase.
+
+2023-11-28 Cupertino Miranda <cupertino.miranda@oracle.com>
+
+ Revert:
+ 2023-11-28 Cupertino Miranda <cupertino.miranda@oracle.com>
+
+ * gcc.target/bpf/atomic-cmpxchg-2.c: Adapted.
+ * gcc.target/bpf/atomic-fetch-op-3.c: Adapted.
+ * gcc.target/bpf/atomic-op-3.c: Adapted.
+ * gcc.target/bpf/atomic-xchg-2.c: Adapted.
+ * gcc.target/bpf/diag-sdiv.c: Adapted.
+ * gcc.target/bpf/diag-smod.c: Adapted.
+
+2023-11-28 Andrew Jenner <andrew@codesourcery.com>
+ Tobias Burnus <tobias@codesourcery.com>
+
+ PR fortran/110415
+ * gfortran.dg/pr110415.f90: New test.
+ * gfortran.dg/asan/pr110415-2.f90: New test.
+ * gfortran.dg/asan/pr110415-3.f90: New test.
+
+2023-11-28 Jose E. Marchesi <jose.marchesi@oracle.com>
+
+ PR target/109253
+ * gcc.target/bpf/divmod-libcall-1.c: New test.
+ * gcc.target/bpf/divmod-libcall-2.c: Likewise.
+ * gcc.c-torture/compile/libcall-2.c: Likewise.
+
+2023-11-28 Cupertino Miranda <cupertino.miranda@oracle.com>
+
+ * gcc.dg/debug/btf/btf-enum-small.c: Added test.
+
+2023-11-28 Cupertino Miranda <cupertino.miranda@oracle.com>
+
+ * gcc.target/bpf/atomic-cmpxchg-2.c: Adapted.
+ * gcc.target/bpf/atomic-fetch-op-3.c: Adapted.
+ * gcc.target/bpf/atomic-op-3.c: Adapted.
+ * gcc.target/bpf/atomic-xchg-2.c: Adapted.
+ * gcc.target/bpf/diag-sdiv.c: Adapted.
+ * gcc.target/bpf/diag-smod.c: Adapted.
+
+2023-11-28 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * gcc.dg/pr111409.c: Allow for " before .debug_macro.
+ Quote literals dots.
+
+2023-11-28 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112694
+ * gcc.target/riscv/rvv/autovec/pr112694-2.c: New test.
+ * gcc.target/riscv/rvv/autovec/pr112694-3.c: New test.
+
+2023-11-28 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/111754
+ * gcc.dg/vect/pr111754.c: Use dg-additional-options rather than
+ dg-options, add -Wno-psabi and use -fdump-tree-forwprop1 rather than
+ -fdump-tree-optimized. Scan forwprop1 dump rather than optimized and
+ scan for either direct return or setting of <retval> to the vector.
+
+2023-11-28 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/112719
+ * gcc.dg/bitint-43.c: New test.
+
+2023-11-28 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/112719
+ * gcc.dg/pr112719.c: New file.
+
+2023-11-28 Lewis Hyatt <lhyatt@gmail.com>
+
+ PR preprocessor/112701
+ * gcc.dg/cpp/expr.c: Add additional tests to cover divide by 0 in an
+ unevaluated context, where the unsignedness still matters.
+
+2023-11-28 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ PR target/112713
+ * gcc.target/riscv/rvv/vsetvl/pr112713-1.c: New test.
+ * gcc.target/riscv/rvv/vsetvl/pr112713-2.c: New test.
+
+2023-11-27 Andrew Pinski <quic_apinski@quicinc.com>
+
+ * gcc.target/aarch64/csinc-3.c: New test.
+
+2023-11-27 Andrew Pinski <quic_apinski@quicinc.com>
+
+ PR testsuite/112689
+ * gcc.dg/tree-prof/time-profiler-3.c: Add -fno-ipa-vrp.
+
+2023-11-27 Prathamesh Kulkarni <prathamesh.kulkarni@linaro.org>
+ Richard Sandiford <richard.sandiford@arm.com>
+
+ PR middle-end/111754
+ * gcc.target/aarch64/sve/slp_3.c: Adjust code-gen.
+ * gcc.target/aarch64/sve/slp_4.c: Likewise.
+ * gcc.dg/vect/pr111754.c: New test.
+
+2023-11-27 Szabolcs Nagy <szabolcs.nagy@arm.com>
+
+ * gcc.target/aarch64/aapcs64/func-ret-1.c: Disable branch-protection.
+ * gcc.target/aarch64/aapcs64/func-ret-2.c: Likewise.
+ * gcc.target/aarch64/aapcs64/func-ret-3.c: Likewise.
+ * gcc.target/aarch64/aapcs64/func-ret-4.c: Likewise.
+ * gcc.target/aarch64/aapcs64/func-ret-64x1_1.c: Likewise.
+
+2023-11-27 Szabolcs Nagy <szabolcs.nagy@arm.com>
+
+ * gcc.target/aarch64/eh_return-2.c: New test.
+ * gcc.target/aarch64/eh_return-3.c: New test.
+
+2023-11-27 Szabolcs Nagy <szabolcs.nagy@arm.com>
+
+ * gcc.target/aarch64/return_address_sign_1.c: Move func4 to ...
+ * gcc.target/aarch64/return_address_sign_2.c: ... here and fix the
+ scan asm check.
+ * gcc.target/aarch64/return_address_sign_b_1.c: Move func4 to ...
+ * gcc.target/aarch64/return_address_sign_b_2.c: ... here and fix the
+ scan asm check.
+
+2023-11-27 Richard Sandiford <richard.sandiford@arm.com>
+
+ PR target/106326
+ * gcc.target/aarch64/sve/acle/general/pr106326_1.c: New test.
+
+2023-11-27 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112653
+ * gcc.dg/tree-ssa/pta-return-1.c: New testcase.
+
+2023-11-27 Richard Biener <rguenther@suse.de>
+ Richard Sandiford <richard.sandiford@arm.com>
+
+ * g++.dg/vect/pr36648.cc: Remove XFAIL for VLA load-lanes.
+
+2023-11-27 Alex Coplan <alex.coplan@arm.com>
+ Iain Sandoe <iain@sandoe.co.uk>
+
+ PR c++/60512
+ * c-c++-common/has-feature-common.c: New test.
+ * c-c++-common/has-feature-pedantic.c: New test.
+ * g++.dg/ext/has-feature.C: New test.
+ * gcc.dg/asan/has-feature-asan.c: New test.
+ * gcc.dg/has-feature.c: New test.
+ * gcc.dg/ubsan/has-feature-ubsan.c: New test.
+ * obj-c++.dg/has-feature.mm: New test.
+ * objc.dg/has-feature.m: New test.
+
+2023-11-27 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/112706
+ * gcc.dg/tree-ssa/pr112706.c: New testcase.
+
+2023-11-27 Stefan Schulze Frielinghaus <stefansf@linux.ibm.com>
+
+ * gcc.target/s390/zvector/vec-nnpa-fp16-convert.c: Replace V8HI
+ types with UV8HI.
+ * gcc.target/s390/zvector/vec-nnpa-fp32-convert-1.c: Dito.
+ * gcc.target/s390/zvector/vec_convert_from_fp16.c: Dito.
+ * gcc.target/s390/zvector/vec_convert_to_fp16.c: Dito.
+ * gcc.target/s390/zvector/vec_extend_to_fp32_hi.c: Dito.
+ * gcc.target/s390/zvector/vec_extend_to_fp32_lo.c: Dito.
+ * gcc.target/s390/zvector/vec_round_from_fp32.c: Dito.
+
+2023-11-27 Juzhe-Zhong <juzhe.zhong@rivai.ai>
+
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_gather_load_32-1.c: Adapt test.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_gather_load_32-10.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_gather_load_32-11.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_gather_load_32-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_gather_load_32-3.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_gather_load_32-4.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_gather_load_32-5.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_gather_load_32-6.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_gather_load_32-7.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_gather_load_32-8.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_gather_load_32-9.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_scatter_store_32-1.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_scatter_store_32-10.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_scatter_store_32-2.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_scatter_store_32-3.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_scatter_store_32-4.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_scatter_store_32-5.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_scatter_store_32-6.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_scatter_store_32-7.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_scatter_store_32-8.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/gather-scatter/mask_scatter_store_32-9.c: Ditto.
+
+2023-11-27 Tsukasa OI <research_trasio@irq.a4lg.com>
+
+ * gcc.target/riscv/predef-1.c: Test for __riscv_64e.
+ * gcc.target/riscv/predef-2.c: Ditto.
+ * gcc.target/riscv/predef-3.c: Ditto.
+ * gcc.target/riscv/predef-4.c: Ditto.
+ * gcc.target/riscv/predef-5.c: Ditto.
+ * gcc.target/riscv/predef-6.c: Ditto.
+ * gcc.target/riscv/predef-7.c: Ditto.
+ * gcc.target/riscv/predef-8.c: Ditto.
+ * gcc.target/riscv/predef-9.c: New test for RV64E and LP64E,
+ based on predef-7.c.
+
+2023-11-27 Jose E. Marchesi <jose.marchesi@oracle.com>
+
+ * gcc.target/bpf/helper-bind.c: Do not include bpf-helpers.h.
+ * gcc.target/bpf/helper-skb-ancestor-cgroup-id.c: Likewise, and
+ renamed from skb-ancestor-cgroup-id.c.
+ * gcc.target/bpf/helper-bpf-redirect.c: Remove.
+ * gcc.target/bpf/helper-clone-redirect.c: Likewise.
+ * gcc.target/bpf/helper-csum-diff.c: Likewise.
+ * gcc.target/bpf/helper-csum-update.c: Likewise.
+ * gcc.target/bpf/helper-current-task-under-cgroup.c: Likewise.
+ * gcc.target/bpf/helper-fib-lookup.c: Likewise.
+ * gcc.target/bpf/helper-get-cgroup-classid.c: Likewise.
+ * gcc.target/bpf/helper-get-current-cgroup-id.c: Likewise.
+ * gcc.target/bpf/helper-get-current-comm.c: Likewise.
+ * gcc.target/bpf/helper-get-current-pid-tgid.c: Likewise.
+ * gcc.target/bpf/helper-get-current-task.c: Likewise.
+ * gcc.target/bpf/helper-get-current-uid-gid.c: Likewise.
+ * gcc.target/bpf/helper-get-hash-recalc.c: Likewise.
+ * gcc.target/bpf/helper-get-listener-sock.c: Likewise.
+ * gcc.target/bpf/helper-get-local-storage.c: Likewise.
+ * gcc.target/bpf/helper-get-numa-node-id.c: Likewise.
+ * gcc.target/bpf/helper-get-prandom-u32.c: Likewise.
+ * gcc.target/bpf/helper-get-route-realm.c: Likewise.
+ * gcc.target/bpf/helper-get-smp-processor-id.c: Likewise.
+ * gcc.target/bpf/helper-get-socket-cookie.c: Likewise.
+ * gcc.target/bpf/helper-get-socket-uid.c: Likewise.
+ * gcc.target/bpf/helper-get-stack.c: Likewise.
+ * gcc.target/bpf/helper-get-stackid.c: Likewise.
+ * gcc.target/bpf/helper-getsockopt.c: Likewise.
+ * gcc.target/bpf/helper-ktime-get-ns.c: Likewise.
+ * gcc.target/bpf/helper-l3-csum-replace.c: Likewise.
+ * gcc.target/bpf/helper-l4-csum-replace.c: Likewise.
+ * gcc.target/bpf/helper-lwt-push-encap.c: Likewise.
+ * gcc.target/bpf/helper-lwt-seg6-action.c: Likewise.
+ * gcc.target/bpf/helper-lwt-seg6-adjust-srh.c: Likewise.
+ * gcc.target/bpf/helper-lwt-seg6-store-bytes.c: Likewise.
+ * gcc.target/bpf/helper-map-delete-elem.c: Likewise.
+ * gcc.target/bpf/helper-map-lookup-elem.c: Likewise.
+ * gcc.target/bpf/helper-map-peek-elem.c: Likewise.
+ * gcc.target/bpf/helper-map-pop-elem.c: Likewise.
+ * gcc.target/bpf/helper-map-push-elem.c: Likewise.
+ * gcc.target/bpf/helper-map-update-elem.c: Likewise.
+ * gcc.target/bpf/helper-msg-apply-bytes.c: Likewise.
+ * gcc.target/bpf/helper-msg-cork-bytes.c: Likewise.
+ * gcc.target/bpf/helper-msg-pop-data.c: Likewise.
+ * gcc.target/bpf/helper-msg-pull-data.c: Likewise.
+ * gcc.target/bpf/helper-msg-push-data.c: Likewise.
+ * gcc.target/bpf/helper-msg-redirect-hash.c: Likewise.
+ * gcc.target/bpf/helper-msg-redirect-map.c: Likewise.
+ * gcc.target/bpf/helper-override-return.c: Likewise.
+ * gcc.target/bpf/helper-perf-event-output.c: Likewise.
+ * gcc.target/bpf/helper-perf-event-read-value.c: Likewise.
+ * gcc.target/bpf/helper-perf-event-read.c: Likewise.
+ * gcc.target/bpf/helper-perf-prog-read-value.c: Likewise.
+ * gcc.target/bpf/helper-probe-read-str.c: Likewise.
+ * gcc.target/bpf/helper-probe-read.c: Likewise.
+ * gcc.target/bpf/helper-probe-write-user.c: Likewise.
+ * gcc.target/bpf/helper-rc-keydown.c: Likewise.
+ * gcc.target/bpf/helper-rc-pointer-rel.c: Likewise.
+ * gcc.target/bpf/helper-rc-repeat.c: Likewise.
+ * gcc.target/bpf/helper-redirect-map.c: Likewise.
+ * gcc.target/bpf/helper-set-hash-invalid.c: Likewise.
+ * gcc.target/bpf/helper-set-hash.c: Likewise.
+ * gcc.target/bpf/helper-setsockopt.c: Likewise.
+ * gcc.target/bpf/helper-sk-fullsock.c: Likewise.
+ * gcc.target/bpf/helper-sk-lookup-tcp.c: Likewise.
+ * gcc.target/bpf/helper-sk-lookup-upd.c: Likewise.
+ * gcc.target/bpf/helper-sk-redirect-hash.c: Likewise.
+ * gcc.target/bpf/helper-sk-redirect-map.c: Likewise.
+ * gcc.target/bpf/helper-sk-release.c: Likewise.
+ * gcc.target/bpf/helper-sk-select-reuseport.c: Likewise.
+ * gcc.target/bpf/helper-sk-storage-delete.c: Likewise.
+ * gcc.target/bpf/helper-sk-storage-get.c: Likewise.
+ * gcc.target/bpf/helper-skb-adjust-room.c: Likewise.
+ * gcc.target/bpf/helper-skb-cgroup-id.c: Likewise.
+ * gcc.target/bpf/helper-skb-change-head.c: Likewise.
+ * gcc.target/bpf/helper-skb-change-proto.c: Likewise.
+ * gcc.target/bpf/helper-skb-change-tail.c: Likewise.
+ * gcc.target/bpf/helper-skb-change-type.c: Likewise.
+ * gcc.target/bpf/helper-skb-ecn-set-ce.c: Likewise.
+ * gcc.target/bpf/helper-skb-get-tunnel-key.c: Likewise.
+ * gcc.target/bpf/helper-skb-get-tunnel-opt.c: Likewise.
+ * gcc.target/bpf/helper-skb-get-xfrm-state.c: Likewise.
+ * gcc.target/bpf/helper-skb-load-bytes-relative.c: Likewise.
+ * gcc.target/bpf/helper-skb-load-bytes.c: Likewise.
+ * gcc.target/bpf/helper-skb-pull-data.c: Likewise.
+ * gcc.target/bpf/helper-skb-set-tunnel-key.c: Likewise.
+ * gcc.target/bpf/helper-skb-set-tunnel-opt.c: Likewise.
+ * gcc.target/bpf/helper-skb-store-bytes.c: Likewise.
+ * gcc.target/bpf/helper-skb-under-cgroup.c: Likewise.
+ * gcc.target/bpf/helper-skb-vlan-pop.c: Likewise.
+ * gcc.target/bpf/helper-skb-vlan-push.c: Likewise.
+ * gcc.target/bpf/helper-skc-lookup-tcp.c: Likewise.
+ * gcc.target/bpf/helper-sock-hash-update.c: Likewise.
+ * gcc.target/bpf/helper-sock-map-update.c: Likewise.
+ * gcc.target/bpf/helper-sock-ops-cb-flags-set.c: Likewise.
+ * gcc.target/bpf/helper-spin-lock.c: Likewise.
+ * gcc.target/bpf/helper-spin-unlock.c: Likewise.
+ * gcc.target/bpf/helper-strtol.c: Likewise.
+ * gcc.target/bpf/helper-strtoul.c: Likewise.
+ * gcc.target/bpf/helper-sysctl-get-current-value.c: Likewise.
+ * gcc.target/bpf/helper-sysctl-get-name.c: Likewise.
+ * gcc.target/bpf/helper-sysctl-get-new-value.c: Likewise.
+ * gcc.target/bpf/helper-sysctl-set-new-value.c: Likewise.
+ * gcc.target/bpf/helper-tail-call.c: Likewise.
+ * gcc.target/bpf/helper-tcp-check-syncookie.c: Likewise.
+ * gcc.target/bpf/helper-tcp-sock.c: Likewise.
+ * gcc.target/bpf/helper-trace-printk.c: Likewise.
+ * gcc.target/bpf/helper-xdp-adjust-head.c: Likewise.
+ * gcc.target/bpf/helper-xdp-adjust-meta.c: Likewise.
+ * gcc.target/bpf/helper-xdp-adjust-tail.c: Likewise.
+ * gcc.target/bpf/skb-ancestor-cgroup-id.c: Likewise.
+
+2023-11-27 Guo Jie <guojie@loongson.cn>
+
+ * gcc.target/loongarch/imm-load1.c: Change old check.
+
2023-11-26 Hans-Peter Nilsson <hp@axis.com>
* gcc.dg/uninit-pred-9_b.c: Remove xfail for line 20. Pass
diff --git a/gcc/testsuite/c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early-O2.c b/gcc/testsuite/c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early-O2.c
index aaa2031..c46ffe9 100644
--- a/gcc/testsuite/c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early-O2.c
+++ b/gcc/testsuite/c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early-O2.c
@@ -61,7 +61,7 @@ static inline enum obj_type obj_type(const enum obj_type *t)
}
static inline struct connection *__objt_conn(enum obj_type *t)
{
- return ((struct connection *)(((char *)(t)) - ((long)&((struct connection *)0)->obj_type))); /* { dg-warning "unaligned pointer value" "warning" { target { short_enums && { ! c++ } } } } */
+ return ((struct connection *)(((char *)(t)) - ((long)&((struct connection *)0)->obj_type)));
}
static inline struct connection *objt_conn(enum obj_type *t)
{
diff --git a/gcc/testsuite/c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early.c b/gcc/testsuite/c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early.c
index 6c96f5a..ef34a76 100644
--- a/gcc/testsuite/c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early.c
+++ b/gcc/testsuite/c-c++-common/analyzer/null-deref-pr108251-smp_fetch_ssl_fc_has_early.c
@@ -60,7 +60,7 @@ static inline enum obj_type obj_type(const enum obj_type *t)
}
static inline struct connection *__objt_conn(enum obj_type *t)
{
- return ((struct connection *)(((char *)(t)) - ((long)&((struct connection *)0)->obj_type))); /* { dg-warning "unaligned pointer value" "warning" { target { short_enums && { ! c++ } } } } */
+ return ((struct connection *)(((char *)(t)) - ((long)&((struct connection *)0)->obj_type)));
}
static inline struct connection *objt_conn(enum obj_type *t)
{
diff --git a/gcc/testsuite/c-c++-common/array-lit.c b/gcc/testsuite/c-c++-common/array-lit.c
index 6505c20..a6b3adf 100644
--- a/gcc/testsuite/c-c++-common/array-lit.c
+++ b/gcc/testsuite/c-c++-common/array-lit.c
@@ -1,10 +1,11 @@
/* { dg-options "-std=c99 -Wc++-compat -Werror" { target c } } */
+/* { dg-options "-Werror=dangling-pointer=1" { target c++ } } */
/* { dg-prune-output "treated as errors" } */
#include <stdio.h>
int main()
{
- for (int *p = (int[]){ 1, 2, 3, 0 }; /* { dg-error "array" } */
+ for (int *p = (int[]){ 1, 2, 3, 0 }; /* { dg-error "array|temporary" } */
*p; ++p) {
printf("%d\n", *p);
}
diff --git a/gcc/testsuite/c-c++-common/fhardened-1.c b/gcc/testsuite/c-c++-common/fhardened-1.c
index 7e67406..23478be 100644
--- a/gcc/testsuite/c-c++-common/fhardened-1.c
+++ b/gcc/testsuite/c-c++-common/fhardened-1.c
@@ -1,7 +1,7 @@
/* { dg-do compile { target *-*-linux* *-*-gnu* } } */
/* { dg-options "-fhardened -O" } */
-#ifndef __SSP_STRONG__
+#if !defined(__SSP_STRONG__) && !defined(__hppa__)
# error "-fstack-protector-strong not enabled"
#endif
diff --git a/gcc/testsuite/c-c++-common/fhardened-2.c b/gcc/testsuite/c-c++-common/fhardened-2.c
index 280ff96..6ac66f9 100644
--- a/gcc/testsuite/c-c++-common/fhardened-2.c
+++ b/gcc/testsuite/c-c++-common/fhardened-2.c
@@ -4,7 +4,7 @@
#ifdef __SSP_STRONG__
# error "-fstack-protector-strong enabled when it should not be"
#endif
-#ifndef __SSP__
+#if !defined(__SSP__) && !defined(__hppa__)
# error "-fstack-protector not enabled"
#endif
diff --git a/gcc/testsuite/c-c++-common/pr77624-1.c b/gcc/testsuite/c-c++-common/pr77624-1.c
index 3567e9b..e25469e 100644
--- a/gcc/testsuite/c-c++-common/pr77624-1.c
+++ b/gcc/testsuite/c-c++-common/pr77624-1.c
@@ -4,11 +4,11 @@
int
foo (int a)
{
- return __atomic_is_lock_free (2, a); /* { dg-warning "pointer from integer" "" { target c } } */
+ return __atomic_is_lock_free (2, a); /* { dg-error "pointer from integer" "" { target c } } */
} /* { dg-error "invalid conversion" "" { target c++ } .-1 } */
int
bar (int a)
{
- return __atomic_always_lock_free (2, a); /* { dg-warning "pointer from integer" "" { target c } } */
+ return __atomic_always_lock_free (2, a); /* { dg-error "pointer from integer" "" { target c } } */
} /* { dg-error "invalid conversion" "" { target c++ } .-1 } */
diff --git a/gcc/testsuite/c-c++-common/spellcheck-reserved.c b/gcc/testsuite/c-c++-common/spellcheck-reserved.c
index 56e59dc..0be35c56 100644
--- a/gcc/testsuite/c-c++-common/spellcheck-reserved.c
+++ b/gcc/testsuite/c-c++-common/spellcheck-reserved.c
@@ -29,7 +29,7 @@ SOME_MACRO foo; /* { dg-bogus "__SOME_MACRO" } */
void test (const char *buf, char ch)
{
__builtin_strtchr (buf, ch); /* { dg-line misspelled_reserved } */
- /* { dg-warning "did you mean '__builtin_strchr'" "" { target c } misspelled_reserved } */
+ /* { dg-error "did you mean '__builtin_strchr'" "" { target c } misspelled_reserved } */
/* { dg-error "'__builtin_strtchr' was not declared in this scope; did you mean '__builtin_strrchr'\\?" "" { target c++ } misspelled_reserved } */
}
@@ -38,7 +38,7 @@ void test (const char *buf, char ch)
void test_2 (const char *buf, char ch)
{
_builtin_strchr (buf, ch); /* { dg-line misspelled_one_underscore } */
- /* { dg-warning "did you mean '__builtin_strchr'" "" { target c } misspelled_one_underscore } */
+ /* { dg-error "did you mean '__builtin_strchr'" "" { target c } misspelled_one_underscore } */
/* { dg-error "'_builtin_strchr' was not declared in this scope; did you mean '__builtin_strchr'\\?" "" { target c++ } misspelled_one_underscore } */
}
diff --git a/gcc/testsuite/c-c++-common/strub-O0.c b/gcc/testsuite/c-c++-common/strub-O0.c
new file mode 100644
index 0000000..c7a79a6
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-O0.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O0 -fstrub=strict -fdump-rtl-expand" } */
+
+/* At -O0, none of the strub builtins are expanded inline. */
+
+int __attribute__ ((__strub__)) var;
+
+int f() {
+ return var;
+}
+
+/* { dg-final { scan-rtl-dump "strub_enter" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_update" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_leave" "expand" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-O1.c b/gcc/testsuite/c-c++-common/strub-O1.c
new file mode 100644
index 0000000..96285c9
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-O1.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O1 -fstrub=strict -fdump-rtl-expand" } */
+
+/* At -O1, without -fno-inline, we fully expand enter, but neither update nor
+ leave. */
+
+int __attribute__ ((__strub__)) var;
+
+int f() {
+ return var;
+}
+
+/* { dg-final { scan-rtl-dump-not "strub_enter" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_update" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_leave" "expand" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-O2.c b/gcc/testsuite/c-c++-common/strub-O2.c
new file mode 100644
index 0000000..8edc0d8
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-O2.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstrub=strict -fdump-rtl-expand" } */
+
+/* At -O2, without -fno-inline, we fully expand enter and update, and add a test
+ around the leave call. */
+
+int __attribute__ ((__strub__)) var;
+
+int f() {
+ return var;
+}
+
+/* { dg-final { scan-rtl-dump-not "strub_enter" "expand" } } */
+/* { dg-final { scan-rtl-dump-not "strub_update" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_leave" "expand" } } */
+/* { dg-final { scan-rtl-dump "\[(\]call\[^\n\]*strub_leave.*\n\[(\]code_label" "expand" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-O2fni.c b/gcc/testsuite/c-c++-common/strub-O2fni.c
new file mode 100644
index 0000000..c6d900c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-O2fni.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstrub=strict -fdump-rtl-expand -fno-inline" } */
+
+/* With -fno-inline, none of the strub builtins are inlined. */
+
+int __attribute__ ((__strub__)) var;
+
+int f() {
+ return var;
+}
+
+/* { dg-final { scan-rtl-dump "strub_enter" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_update" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_leave" "expand" } } */
+/* { dg-final { scan-rtl-dump-not "\[(\]call\[^\n\]*strub_leave.*\n\[(\]code_label" "expand" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-O3.c b/gcc/testsuite/c-c++-common/strub-O3.c
new file mode 100644
index 0000000..33ee465
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-O3.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -fstrub=strict -fdump-rtl-expand" } */
+
+int __attribute__ ((__strub__)) var;
+
+int f() {
+ return var;
+}
+
+/* { dg-final { scan-rtl-dump-not "strub_enter" "expand" } } */
+/* { dg-final { scan-rtl-dump-not "strub_update" "expand" } } */
+/* { dg-final { scan-rtl-dump-not "strub_leave" "expand" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-O3fni.c b/gcc/testsuite/c-c++-common/strub-O3fni.c
new file mode 100644
index 0000000..2936f82
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-O3fni.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -fstrub=strict -fdump-rtl-expand -fno-inline" } */
+
+/* With -fno-inline, none of the strub builtins are inlined. */
+
+int __attribute__ ((__strub__)) var;
+
+int f() {
+ return var;
+}
+
+/* { dg-final { scan-rtl-dump "strub_enter" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_update" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_leave" "expand" } } */
+/* { dg-final { scan-rtl-dump-not "\[(\]call\[^\n\]*strub_leave.*\n\[(\]code_label" "expand" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-Og.c b/gcc/testsuite/c-c++-common/strub-Og.c
new file mode 100644
index 0000000..479746e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-Og.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-Og -fstrub=strict -fdump-rtl-expand" } */
+
+/* At -Og, without -fno-inline, we fully expand enter, but neither update nor
+ leave. */
+
+int __attribute__ ((__strub__)) var;
+
+int f() {
+ return var;
+}
+
+/* { dg-final { scan-rtl-dump-not "strub_enter" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_update" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_leave" "expand" } } */
+/* { dg-final { scan-rtl-dump-not "\[(\]call\[^\n\]*strub_leave.*\n\[(\]code_label" "expand" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-Os.c b/gcc/testsuite/c-c++-common/strub-Os.c
new file mode 100644
index 0000000..2241d4e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-Os.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-Os -fstrub=strict -fdump-rtl-expand" } */
+
+/* At -Os, without -fno-inline, we fully expand enter, and also update. The
+ expanded update might be larger than a call proper, but argument saving and
+ restoring required by the call will most often make it larger. The leave
+ call is left untouched. */
+
+int __attribute__ ((__strub__)) var;
+
+int f() {
+ return var;
+}
+
+/* { dg-final { scan-rtl-dump-not "strub_enter" "expand" } } */
+/* { dg-final { scan-rtl-dump-not "strub_update" "expand" } } */
+/* { dg-final { scan-rtl-dump "strub_leave" "expand" } } */
+/* { dg-final { scan-rtl-dump-not "\[(\]call\[^\n\]*strub_leave.*\n\[(\]code_label" "expand" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-all1.c b/gcc/testsuite/c-c++-common/strub-all1.c
new file mode 100644
index 0000000..a322bcc
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-all1.c
@@ -0,0 +1,32 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=all -fdump-ipa-strubm -fdump-ipa-strub" } */
+
+/* h becomes STRUB_CALLABLE, rather than STRUB_INLINABLE, because of the
+ strub-enabling -fstrub flag, and gets inlined before pass_ipa_strub. */
+static inline void
+__attribute__ ((__always_inline__))
+h() {
+}
+
+/* g becomes STRUB_AT_CALLS, because of the flag. */
+static inline void
+g() {
+ h();
+}
+
+/* f becomes STRUB_INTERNAL because of the flag, and gets split into
+ STRUB_WRAPPER and STRUB_WRAPPED. */
+void
+f() {
+ g();
+}
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 3 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]callable\[)\]" 1 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]at-calls\[)\]" 1 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]internal\[)\]" 1 "strubm" } } */
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 3 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]at-calls\[)\]" 1 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapped\[)\]" 1 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapper\[)\]" 1 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-all2.c b/gcc/testsuite/c-c++-common/strub-all2.c
new file mode 100644
index 0000000..db60026
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-all2.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=all -fdump-ipa-strubm -fdump-ipa-strub" } */
+
+/* g becomes STRUB_INTERNAL, because of the flag. Without inline, force_output
+ is set for static non-inline functions when not optimizing, and that keeps
+ only_called_directly_p from returning true, which makes STRUB_AT_CALLS
+ non-viable. */
+static void
+g() {
+}
+
+/* f becomes STRUB_INTERNAL because of the flag, and gets split into
+ STRUB_WRAPPER and STRUB_WRAPPED. */
+void
+f() {
+ g();
+}
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 2 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]internal\[)\]" 2 "strubm" } } */
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 4 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapped\[)\]" 2 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapper\[)\]" 2 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-apply1.c b/gcc/testsuite/c-c++-common/strub-apply1.c
new file mode 100644
index 0000000..2f462ad
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-apply1.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict" } */
+
+void __attribute__ ((__strub__ ("callable")))
+apply_function (void *args)
+{
+ __builtin_apply (0, args, 0);
+}
+
+void __attribute__ ((__strub__ ("internal")))
+apply_args (int i, int j, double d)
+{
+ void *args = __builtin_apply_args ();
+ apply_function (args);
+}
diff --git a/gcc/testsuite/c-c++-common/strub-apply2.c b/gcc/testsuite/c-c++-common/strub-apply2.c
new file mode 100644
index 0000000..a5d7551
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-apply2.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict" } */
+
+extern void __attribute__ ((__strub__))
+apply_function (void *args);
+
+void __attribute__ ((__strub__))
+apply_args (int i, int j, double d) /* { dg-error "selected" } */
+{
+ void *args = __builtin_apply_args (); /* { dg-message "does not support" } */
+ apply_function (args);
+}
diff --git a/gcc/testsuite/c-c++-common/strub-apply3.c b/gcc/testsuite/c-c++-common/strub-apply3.c
new file mode 100644
index 0000000..64422a0
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-apply3.c
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict" } */
+
+void __attribute__ ((__strub__))
+apply_function (void *args)
+{
+ __builtin_apply (0, args, 0); /* { dg-error "in .strub. context" } */
+}
diff --git a/gcc/testsuite/c-c++-common/strub-apply4.c b/gcc/testsuite/c-c++-common/strub-apply4.c
new file mode 100644
index 0000000..15ffaa0
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-apply4.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstrub=strict -fdump-ipa-strubm" } */
+
+/* Check that implicit enabling of strub mode selects internal strub when the
+ function uses __builtin_apply_args, that prevents the optimization to
+ at-calls mode. */
+
+int __attribute__ ((__strub__)) var;
+
+static inline void
+apply_args (int i, int j, double d)
+{
+ var++;
+ __builtin_apply_args ();
+}
+
+void f() {
+ apply_args (1, 2, 3);
+}
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]internal\[)\]" 1 "strubm" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-at-calls1.c b/gcc/testsuite/c-c++-common/strub-at-calls1.c
new file mode 100644
index 0000000..b70843b
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-at-calls1.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=at-calls -fdump-ipa-strubm -fdump-ipa-strub" } */
+
+/* h becomes STRUB_CALLABLE, rather than STRUB_INLINABLE, because of the
+ strub-enabling -fstrub flag, and gets inlined before pass_ipa_strub. */
+static inline void
+__attribute__ ((__always_inline__))
+h() {
+}
+
+/* g becomes STRUB_AT_CALLS, because of the flag. */
+static inline void
+g() {
+ h();
+}
+
+/* f does NOT become STRUB_AT_CALLS because it is visible; it becomes
+ STRUB_CALLABLE. */
+void
+f() {
+ g();
+}
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 3 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]at-calls\[)\]" 1 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]callable\[)\]" 2 "strubm" } } */
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 2 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]at-calls\[)\]" 1 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]callable\[)\]" 1 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-at-calls2.c b/gcc/testsuite/c-c++-common/strub-at-calls2.c
new file mode 100644
index 0000000..97a3988
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-at-calls2.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=at-calls -fdump-ipa-strubm -fdump-ipa-strub" } */
+
+/* g does NOT become STRUB_AT_CALLS because it's not viable. Without inline,
+ force_output is set for static non-inline functions when not optimizing, and
+ that keeps only_called_directly_p from returning true, which makes
+ STRUB_AT_CALLS non-viable. It becomes STRUB_CALLABLE instead. */
+static void
+g() {
+}
+
+/* f does NOT become STRUB_AT_CALLS because it is visible; it becomes
+ STRUB_CALLABLE. */
+void
+f() {
+ g();
+}
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 2 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]callable\[)\]" 2 "strubm" } } */
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 2 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]callable\[)\]" 2 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-defer-O1.c b/gcc/testsuite/c-c++-common/strub-defer-O1.c
new file mode 100644
index 0000000..3d73431
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-defer-O1.c
@@ -0,0 +1,7 @@
+/* { dg-do run } */
+/* { dg-options "-fstrub=strict -O1" } */
+
+/* Check that a strub function called by another strub function does NOT defer
+ the strubbing to its caller at -O1. */
+
+#include "strub-defer-O2.c"
diff --git a/gcc/testsuite/c-c++-common/strub-defer-O2.c b/gcc/testsuite/c-c++-common/strub-defer-O2.c
new file mode 100644
index 0000000..fddf3c7
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-defer-O2.c
@@ -0,0 +1,8 @@
+/* { dg-do run } */
+/* { dg-options "-fstrub=strict -O2" } */
+
+/* Check that a strub function called by another strub function does NOT defer
+ the strubbing to its caller at -O2. */
+
+#define EXPECT_DEFERRAL !
+#include "strub-defer-O3.c"
diff --git a/gcc/testsuite/c-c++-common/strub-defer-O3.c b/gcc/testsuite/c-c++-common/strub-defer-O3.c
new file mode 100644
index 0000000..7ebc65b
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-defer-O3.c
@@ -0,0 +1,110 @@
+/* { dg-do run } */
+/* { dg-options "-fstrub=strict -O3" } */
+
+/* Check that a strub function called by another strub function defers the
+ strubbing to its caller at -O3. */
+
+#ifndef EXPECT_DEFERRAL
+/* Other strub-defer*.c tests override this macro. */
+# define EXPECT_DEFERRAL
+#endif
+
+const char test_string[] = "\x55\xde\xad\xbe\xef\xc0\x1d\xca\xfe\x55\xaa";
+
+/* Pad before and after the string on the stack, so that it's not overwritten by
+ regular stack use. */
+#define PAD 7
+
+static inline __attribute__ ((__always_inline__, __strub__ ("callable")))
+char *
+leak_string (void)
+{
+ /* We use this variable to avoid any stack red zone. Stack scrubbing covers
+ it, but __builtin_stack_address, that we take as a reference, doesn't, so
+ if e.g. callable() were to store the string in the red zone, we wouldn't
+ find it because it would be outside the range we searched. */
+ typedef void __attribute__ ((__strub__ ("callable"))) callable_t (char *);
+ callable_t *f = 0;
+
+ char s[2 * PAD + 1][sizeof (test_string)];
+ __builtin_strcpy (s[PAD], test_string);
+ asm ("" : "+m" (s), "+r" (f));
+
+ if (__builtin_expect (!f, 1))
+ return (char*)__builtin_stack_address ();
+
+ f (s[PAD]);
+ return 0;
+}
+
+static inline __attribute__ ((__always_inline__, __strub__ ("callable")))
+int
+look_for_string (char *e)
+{
+ char *p = (char*)__builtin_stack_address ();
+
+ if (p == e)
+ __builtin_abort ();
+
+ if (p > e)
+ {
+ char *q = p;
+ p = e;
+ e = q;
+ }
+
+ for (char *re = e - sizeof (test_string); p < re; p++)
+ for (int i = 0; p[i] == test_string[i]; i++)
+ if (i == sizeof (test_string) - 1)
+ return i;
+
+ return 0;
+}
+
+static __attribute__ ((__strub__ ("at-calls"), __noinline__, __noclone__))
+char *
+at_calls ()
+{
+ return leak_string ();
+}
+
+static __attribute__ ((__strub__ ("at-calls")))
+char *
+deferred_at_calls ()
+{
+ char *ret;
+ int i = 1;
+ /* Since these test check stack contents above the top of the stack, an
+ unexpected asynchronous signal or interrupt might overwrite the bits we
+ expect to find and cause spurious fails. Tolerate one such overall
+ spurious fail by retrying. */
+ while (EXPECT_DEFERRAL !look_for_string ((ret = at_calls ())))
+ if (!i--) __builtin_abort ();
+ return ret;
+}
+
+static __attribute__ ((__strub__ ("internal")))
+char *
+deferred_internal ()
+{
+ int i = 1;
+ char *ret;
+ while (EXPECT_DEFERRAL !look_for_string ((ret = at_calls ())))
+ if (!i--) __builtin_abort ();
+ return ret;
+}
+
+int main ()
+{
+ int i = 1;
+ /* These calls should not be subject to spurious fails: whether or not some
+ asynchronous event overwrites the scrubbed stack space, the string won't
+ remain there. Unless the asynchronous event happens to write the string
+ where we look for it, but what are the odds? Anyway, it doesn't hurt to
+ retry, even if just for symmetry. */
+ while (look_for_string (deferred_at_calls ()))
+ if (!i--) __builtin_abort ();
+ while (look_for_string (deferred_internal ()))
+ if (!i--) __builtin_abort ();
+ __builtin_exit (0);
+}
diff --git a/gcc/testsuite/c-c++-common/strub-defer-Os.c b/gcc/testsuite/c-c++-common/strub-defer-Os.c
new file mode 100644
index 0000000..fbaf85f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-defer-Os.c
@@ -0,0 +1,7 @@
+/* { dg-do run } */
+/* { dg-options "-fstrub=strict -Os" } */
+
+/* Check that a strub function called by another strub function defers the
+ strubbing to its caller at -Os. */
+
+#include "strub-defer-O3.c"
diff --git a/gcc/testsuite/c-c++-common/strub-internal1.c b/gcc/testsuite/c-c++-common/strub-internal1.c
new file mode 100644
index 0000000..e9d7b7b
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-internal1.c
@@ -0,0 +1,31 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=internal -fdump-ipa-strubm -fdump-ipa-strub" } */
+
+/* h becomes STRUB_CALLABLE, rather than STRUB_INLINABLE, because of the
+ strub-enabling -fstrub flag, and gets inlined before pass_ipa_strub. */
+static inline void
+__attribute__ ((__always_inline__))
+h() {
+}
+
+/* g becomes STRUB_INTERNAL because of the flag, and gets split into
+ STRUB_WRAPPER and STRUB_WRAPPED. */
+static inline void
+g() {
+ h();
+}
+
+/* f becomes STRUB_INTERNAL because of the flag, and gets split into
+ STRUB_WRAPPER and STRUB_WRAPPED. */
+void
+f() {
+ g();
+}
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 3 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]callable\[)\]" 1 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]internal\[)\]" 2 "strubm" } } */
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 4 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapped\[)\]" 2 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapper\[)\]" 2 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-internal2.c b/gcc/testsuite/c-c++-common/strub-internal2.c
new file mode 100644
index 0000000..8b8e15a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-internal2.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=internal -fdump-ipa-strubm -fdump-ipa-strub" } */
+
+/* g becomes STRUB_INTERNAL, because of the flag. */
+static void
+g() {
+}
+
+/* f becomes STRUB_INTERNAL because of the flag, and gets split into
+ STRUB_WRAPPER and STRUB_WRAPPED. */
+void
+f() {
+ g();
+}
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 2 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]internal\[)\]" 2 "strubm" } } */
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 4 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapped\[)\]" 2 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapper\[)\]" 2 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-parms1.c b/gcc/testsuite/c-c++-common/strub-parms1.c
new file mode 100644
index 0000000..0a4a753
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-parms1.c
@@ -0,0 +1,48 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+#include <stdarg.h>
+
+void __attribute__ ((__strub__ ("internal")))
+small_args (int i, long long l, void *p, void **q, double d, char c)
+{
+}
+
+/* { dg-final { scan-ipa-dump "\n(void )?\[^ \]*small_args\[^ \]*.strub.\[0-9\]* \[(\]int i, long long int l, void \\* p, void \\* \\* q, double d, char c, void \\* &\[^&,\]*.strub.watermark_ptr\[)\]" "strub" } } */
+/* { dg-final { scan-ipa-dump " \[^ \]*small_args\[^ \]*.strub.\[0-9\]* \[(\]\[^&\]*&.strub.watermark.\[0-9\]*\[)\]" "strub" } } */
+
+
+struct large_arg {
+ int x[128];
+};
+
+void __attribute__ ((__strub__ ("internal")))
+large_byref_arg (struct large_arg la)
+{
+}
+
+/* { dg-final { scan-ipa-dump "\n(void )?\[^ \]*large_byref_arg\[^ \]*.strub.\[0-9\]* \[(\]struct large_arg & la, void \\* &\[^&,\]*.strub.watermark_ptr\[)\]" "strub" } } */
+/* { dg-final { scan-ipa-dump " \[^ \]*large_byref_arg\[^ \]*.strub.\[0-9\]* \[(\]&\[^&\]*&.strub.watermark.\[0-9\]*\[)\]" "strub" } } */
+
+void __attribute__ ((__strub__ ("internal")))
+std_arg (int i, ...)
+{
+ va_list vl;
+ va_start (vl, i);
+ va_end (vl);
+}
+
+/* { dg-final { scan-ipa-dump "\n(void )?\[^ \]*std_arg\[^ \]*.strub.\[0-9\]* \[(\]int i, \[^&,\]* &\[^&,\]*.strub.va_list_ptr, void \\* &\[^&,\]*.strub.watermark_ptr\[)\]" "strub" } } */
+/* { dg-final { scan-ipa-dump " \[^ \]*std_arg\[^ \]*.strub.\[0-9\]* \[(\]\[^&\]*&.strub.va_list.\[0-9\]*, &.strub.watermark.\[0-9\]*\[)\]" "strub" } } */
+/* { dg-final { scan-ipa-dump-times "va_start \\(" 1 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "va_copy \\(" 1 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "va_end \\(" 2 "strub" } } */
+
+void __attribute__ ((__strub__ ("internal")))
+apply_args (int i, int j, double d)
+{
+ __builtin_apply_args ();
+}
+
+/* { dg-final { scan-ipa-dump "\n(void )?\[^ \]*apply_args\[^ \]*.strub.\[0-9\]* \[(\]int i, int j, double d, void \\*\[^&,\]*.strub.apply_args, void \\* &\[^&,\]*.strub.watermark_ptr\[)\]" "strub" } } */
+/* { dg-final { scan-ipa-dump " \[^ \]*apply_args\[^ \]*.strub.\[0-9\]* \[(\]\[^&\]*.strub.apply_args.\[0-9\]*_\[0-9\]*, &.strub.watermark.\[0-9\]*\[)\]" "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-parms2.c b/gcc/testsuite/c-c++-common/strub-parms2.c
new file mode 100644
index 0000000..147171d
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-parms2.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+#include <stdarg.h>
+
+void __attribute__ ((__strub__ ("at-calls")))
+small_args (int i, long long l, void *p, void **q, double d, char c)
+{
+}
+
+/* { dg-final { scan-ipa-dump "\n(void )?\[^ \]*small_args\[^ \]* \[(\]int i, long long int l, void \\* p, void \\* \\* q, double d, char c, void \\* &\[^&,\]*.strub.watermark_ptr\[)\]" "strub" } } */
+
+
+struct large_arg {
+ int x[128];
+};
+
+void __attribute__ ((__strub__ ("at-calls")))
+large_byref_arg (struct large_arg la)
+{
+}
+
+/* { dg-final { scan-ipa-dump "\n(void )?\[^ \]*large_byref_arg\[^ \]* \[(\]struct large_arg la, void \\* &\[^&,\]*.strub.watermark_ptr\[)\]" "strub" } } */
+
+void __attribute__ ((__strub__ ("at-calls")))
+std_arg (int i, ...)
+{
+ va_list vl;
+ va_start (vl, i);
+ va_end (vl);
+}
+
+/* { dg-final { scan-ipa-dump "\n(void )?\[^ \]*std_arg\[^ \]* \[(\]int i, void \\* &\[^&,\]*.strub.watermark_ptr\[, .]*\[)\]" "strub" } } */
+/* { dg-final { scan-ipa-dump-times "va_start \\(" 1 "strub" } } */
+/* { dg-final { scan-ipa-dump-not "va_copy \\(" "strub" } } */
+/* { dg-final { scan-ipa-dump-times "va_end \\(" 1 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-parms3.c b/gcc/testsuite/c-c++-common/strub-parms3.c
new file mode 100644
index 0000000..4e92682
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-parms3.c
@@ -0,0 +1,58 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* Check that uses of a strub variable implicitly enables internal strub for
+ publicly-visible functions, and causes the same transformations to their
+ signatures as those in strub-parms1.c. */
+
+#include <stdarg.h>
+
+int __attribute__ ((__strub__)) var;
+
+void
+small_args (int i, long long l, void *p, void **q, double d, char c)
+{
+ var++;
+}
+
+/* { dg-final { scan-ipa-dump "\n(void )?\[^ \]*small_args\[^ \]*.strub.\[0-9\]* \[(\]int i, long long int l, void \\* p, void \\* \\* q, double d, char c, void \\* &\[^&,\]*.strub.watermark_ptr\[)\]" "strub" } } */
+/* { dg-final { scan-ipa-dump " \[^ \]*small_args\[^ \]*.strub.\[0-9\]* \[(\]\[^&\]*&.strub.watermark.\[0-9\]*\[)\]" "strub" } } */
+
+
+struct large_arg {
+ int x[128];
+};
+
+void
+large_byref_arg (struct large_arg la)
+{
+ var++;
+}
+
+/* { dg-final { scan-ipa-dump "\n(void )?\[^ \]*large_byref_arg\[^ \]*.strub.\[0-9\]* \[(\]struct large_arg & la, void \\* &\[^&,\]*.strub.watermark_ptr\[)\]" "strub" } } */
+/* { dg-final { scan-ipa-dump " \[^ \]*large_byref_arg\[^ \]*.strub.\[0-9\]* \[(\]&\[^&\]*&.strub.watermark.\[0-9\]*\[)\]" "strub" } } */
+
+void
+std_arg (int i, ...)
+{
+ va_list vl;
+ va_start (vl, i);
+ var++;
+ va_end (vl);
+}
+
+/* { dg-final { scan-ipa-dump "\n(void )?\[^ \]*std_arg\[^ \]*.strub.\[0-9\]* \[(\]int i, \[^&,\]* &\[^&,\]*.strub.va_list_ptr, void \\* &\[^&,\]*.strub.watermark_ptr\[)\]" "strub" } } */
+/* { dg-final { scan-ipa-dump " \[^ \]*std_arg\[^ \]*.strub.\[0-9\]* \[(\]\[^&\]*&.strub.va_list.\[0-9\]*, &.strub.watermark.\[0-9\]*\[)\]" "strub" } } */
+/* { dg-final { scan-ipa-dump-times "va_start \\(" 1 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "va_copy \\(" 1 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "va_end \\(" 2 "strub" } } */
+
+void
+apply_args (int i, int j, double d)
+{
+ var++;
+ __builtin_apply_args ();
+}
+
+/* { dg-final { scan-ipa-dump "\n(void )?\[^ \]*apply_args\[^ \]*.strub.\[0-9\]* \[(\]int i, int j, double d, void \\*\[^&,\]*.strub.apply_args, void \\* &\[^&,\]*.strub.watermark_ptr\[)\]" "strub" } } */
+/* { dg-final { scan-ipa-dump " \[^ \]*apply_args\[^ \]*.strub.\[0-9\]* \[(\]\[^&\]*.strub.apply_args.\[0-9\]*_\[0-9\]*, &.strub.watermark.\[0-9\]*\[)\]" "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-relaxed1.c b/gcc/testsuite/c-c++-common/strub-relaxed1.c
new file mode 100644
index 0000000..e2f9d8a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-relaxed1.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=relaxed -fdump-ipa-strubm -fdump-ipa-strub" } */
+
+/* The difference between relaxed and strict in this case is that we accept the
+ call from one internal-strub function to another. Without the error,
+ inlining takes place. */
+
+#include "strub-strict1.c"
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 3 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]inlinable\[)\]" 1 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]at-calls-opt\[)\]" 1 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]internal\[)\]" 1 "strubm" } } */
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 3 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]at-calls-opt\[)\]" 1 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapped\[)\]" 1 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapper\[)\]" 1 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-relaxed2.c b/gcc/testsuite/c-c++-common/strub-relaxed2.c
new file mode 100644
index 0000000..9847443
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-relaxed2.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=relaxed -fdump-ipa-strubm -fdump-ipa-strub" } */
+
+/* The difference between relaxed and strict in this case is that we accept the
+ call from one internal-strub function to another. */
+
+#include "strub-strict2.c"
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 2 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]internal\[)\]" 2 "strubm" } } */
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 4 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapped\[)\]" 2 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]wrapper\[)\]" 2 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-short-O0-exc.c b/gcc/testsuite/c-c++-common/strub-short-O0-exc.c
new file mode 100644
index 0000000..1de1534
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-short-O0-exc.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O0 -fstrub=strict -fexceptions -fdump-ipa-strub" } */
+
+/* Check that the expected strub calls are issued. */
+
+#include "torture/strub-callable1.c"
+
+/* { dg-final { scan-ipa-dump-times "strub_enter" 45 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_update" 4 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_leave" 89 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-short-O0.c b/gcc/testsuite/c-c++-common/strub-short-O0.c
new file mode 100644
index 0000000..f9209c8
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-short-O0.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O0 -fstrub=strict -fno-exceptions -fdump-ipa-strub" } */
+
+/* Check that the expected strub calls are issued. */
+
+#include "torture/strub-callable1.c"
+
+/* { dg-final { scan-ipa-dump-times "strub_enter" 45 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_update" 4 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_leave" 45 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-short-O1.c b/gcc/testsuite/c-c++-common/strub-short-O1.c
new file mode 100644
index 0000000..bed1dcf
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-short-O1.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O1 -fstrub=strict -fno-exceptions -fdump-ipa-strub" } */
+
+/* Check that the expected strub calls are issued. */
+
+#include "torture/strub-callable1.c"
+
+/* { dg-final { scan-ipa-dump-times "strub_enter" 45 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_update" 4 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_leave" 45 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-short-O2.c b/gcc/testsuite/c-c++-common/strub-short-O2.c
new file mode 100644
index 0000000..6bf0071
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-short-O2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstrub=strict -fno-exceptions -fdump-ipa-strub" } */
+
+/* Check that the expected strub calls are issued. */
+
+#include "torture/strub-callable1.c"
+
+/* { dg-final { scan-ipa-dump-times "strub_enter" 45 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_update" 4 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_leave" 45 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-short-O3.c b/gcc/testsuite/c-c++-common/strub-short-O3.c
new file mode 100644
index 0000000..4732f51
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-short-O3.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -fstrub=strict -fno-exceptions -fdump-ipa-strub" } */
+
+/* Check that the expected strub calls are issued. At -O3 and -Os, we omit
+ enter and leave calls within strub contexts, passing on the enclosing
+ watermark. */
+
+#include "torture/strub-callable1.c"
+
+/* { dg-final { scan-ipa-dump-times "strub_enter" 15 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_update" 4 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_leave" 15 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-short-Os.c b/gcc/testsuite/c-c++-common/strub-short-Os.c
new file mode 100644
index 0000000..8d6424c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-short-Os.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-Os -fstrub=strict -fno-exceptions -fdump-ipa-strub" } */
+
+/* Check that the expected strub calls are issued. At -O3 and -Os, we omit
+ enter and leave calls within strub contexts, passing on the enclosing
+ watermark. */
+
+#include "torture/strub-callable1.c"
+
+/* { dg-final { scan-ipa-dump-times "strub_enter" 15 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_update" 4 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_leave" 15 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-strict1.c b/gcc/testsuite/c-c++-common/strub-strict1.c
new file mode 100644
index 0000000..3685224
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-strict1.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strubm" } */
+
+static int __attribute__ ((__strub__)) var;
+
+/* h becomes STRUB_INLINABLE, because of the use of the strub variable,
+ and the always_inline flag. It would get inlined before pass_ipa_strub, if
+ it weren't for the error. */
+static inline void
+__attribute__ ((__always_inline__))
+h() {
+ var++;
+}
+
+/* g becomes STRUB_AT_CALLS_OPT, because of the use of the strub variable, and
+ the viability of at-calls strubbing. Though internally a strub context, its
+ interface is not strub-enabled, so it's not callable from within strub
+ contexts. */
+static inline void
+g() {
+ var--;
+ h();
+}
+
+/* f becomes STRUB_INTERNAL because of the use of the strub variable, and gets
+ split into STRUB_WRAPPER and STRUB_WRAPPED. */
+void
+f() {
+ var++;
+ g(); /* { dg-error "calling non-.strub." } */
+}
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 3 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]inlinable\[)\]" 1 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]at-calls-opt\[)\]" 1 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]internal\[)\]" 1 "strubm" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-strict2.c b/gcc/testsuite/c-c++-common/strub-strict2.c
new file mode 100644
index 0000000..b4f2888
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-strict2.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strubm" } */
+
+static int __attribute__ ((__strub__)) var;
+
+/* g becomes STRUB_INTERNAL because of the use of the strub variable, and gets
+ split into STRUB_WRAPPER and STRUB_WRAPPED. It's not STRUB_AT_CALLS_OPT
+ because force_output is set for static non-inline functions when not
+ optimizing, and that keeps only_called_directly_p from returning true, which
+ makes STRUB_AT_CALLS[_OPT] non-viable. */
+static void
+g() {
+ var--;
+}
+
+/* f becomes STRUB_INTERNAL because of the use of the strub variable, and gets
+ split into STRUB_WRAPPER and STRUB_WRAPPED. */
+void
+f() {
+ var++;
+ g(); /* { dg-error "calling non-.strub." } */
+}
+
+/* { dg-final { scan-ipa-dump-times "strub \[(\]" 2 "strubm" } } */
+/* { dg-final { scan-ipa-dump-times "strub \[(\]internal\[)\]" 2 "strubm" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-tail-O1.c b/gcc/testsuite/c-c++-common/strub-tail-O1.c
new file mode 100644
index 0000000..e48e061
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-tail-O1.c
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-O1 -fstrub=strict -fno-exceptions -fdump-ipa-strub" } */
+
+#include "strub-tail-O2.c"
+
+/* { dg-final { scan-ipa-dump-times "strub_enter" 2 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_update" 2 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_leave" 2 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-tail-O2.c b/gcc/testsuite/c-c++-common/strub-tail-O2.c
new file mode 100644
index 0000000..87cda7a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-tail-O2.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fstrub=strict -fno-exceptions -fdump-ipa-strub" } */
+
+/* Check that the expected strub calls are issued.
+ Tail calls are short-circuited at -O2+. */
+
+int __attribute__ ((__strub__))
+g (int i, int j) {
+ return g (i, j);
+}
+
+/* { dg-final { scan-ipa-dump-times "strub_enter" 0 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_update" 2 "strub" } } */
+/* { dg-final { scan-ipa-dump-times "strub_leave" 0 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/strub-var1.c b/gcc/testsuite/c-c++-common/strub-var1.c
new file mode 100644
index 0000000..eb6250f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/strub-var1.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+
+int __attribute__ ((strub)) x;
+float __attribute__ ((strub)) f;
+double __attribute__ ((strub)) d;
+
+/* The attribute applies to the type of the declaration, i.e., to the pointer
+ variable p, not to the pointed-to integer. */
+int __attribute__ ((strub)) *
+p = &x; /* { dg-message "incompatible|invalid conversion" } */
+
+typedef int __attribute__ ((strub)) strub_int;
+strub_int *q = &x; /* Now this is compatible. */
+
+int __attribute__ ((strub))
+a[2]; /* { dg-warning "does not apply to elements" } */
+
+int __attribute__ ((vector_size (4 * sizeof (int))))
+ __attribute__ ((strub))
+v; /* { dg-warning "does not apply to elements" } */
+
+struct s {
+ int i, j;
+} __attribute__ ((strub)) w; /* { dg-warning "does not apply to fields" } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c
index 779896c..3406c4e 100644
--- a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c
@@ -1,5 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-require-effective-target untyped_assembly } */
/* Check that, even enabling all checks before noreturn calls (leaving
returning calls enabled), we get checks before __builtin_return without
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-except.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-except.c
new file mode 100644
index 0000000..3acb61c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-except.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fexceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-require-effective-target untyped_assembly } */
+
+/* Check that, with exceptions enabled, even in C, the calls initiated by
+ builtin_apply are enclosed in cleanup handlers that add extra checks.
+ Unfortunately, declaring foobar as nothrow is not enough to avoid the
+ handler around the builtin_apply call, so the other bret tests all use
+ -fno-exceptions. */
+
+#include "harden-cfr-bret.c"
+
+/* With exceptions, we get an extra check per function, to check before
+ propagating exceptions, so it's 3 in f and 2 in g. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 5 "hardcfr" } } */
+/* The extra check in g also removes the possibility of inlining the check. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 0 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c
index 49ce17f..7f8fb64 100644
--- a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c
@@ -1,5 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-require-effective-target untyped_assembly } */
/* Check that, even enabling checks before never noreturn calls (leaving
returning calls enabled), we get checks before __builtin_return without
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c
index 78e5bf4..07588e8 100644
--- a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c
@@ -1,5 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=no-xthrow -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-require-effective-target untyped_assembly } */
/* Check that, even enabling checks before no-xthrow-throwing noreturn calls
(leaving returning calls enabled), we get checks before __builtin_return
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c
index 1512614..716d929 100644
--- a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c
@@ -1,5 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fno-hardcfr-check-returning-calls -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-require-effective-target untyped_assembly } */
/* Check that, even disabling checks before both noreturn and returning
calls, we still get checks before __builtin_return. */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c
index fd95bb7..c6d2baa 100644
--- a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c
@@ -1,5 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-require-effective-target untyped_assembly } */
/* Check that, even disabling checks before returning calls (leaving noreturn
calls enabled), we still get checks before __builtin_return. */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c
index c5c3612..2fd0d82 100644
--- a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c
@@ -1,5 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-require-effective-target untyped_assembly } */
/* Check that, even enabling checks before nothrow noreturn calls (leaving
returning calls enabled), we get checks before __builtin_return without
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c
index 137dfbb..b070294 100644
--- a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c
@@ -1,5 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-require-effective-target untyped_assembly } */
/* Check that, even disabling checks before noreturn calls (leaving returning
calls enabled), we still get checks before __builtin_return. */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c
index b459ff6..b6630a6 100644
--- a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c
@@ -1,14 +1,27 @@
/* { dg-do compile } */
-/* { dg-options "-fharden-control-flow-redundancy -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+/* { dg-require-effective-target untyped_assembly } */
-int f(int i) {
+extern int foobar (void);
+
+#if __cplusplus
+typedef void (*fnt)(...);
+#else
+typedef void (*fnt)();
+#endif
+
+int i;
+
+int f(void) {
if (i)
- __builtin_return (&i);
+ __builtin_return (__builtin_apply ((fnt)foobar,
+ __builtin_apply_args (), 0));
return i;
}
-int g(int i) {
- __builtin_return (&i);
+int g(void) {
+ __builtin_return (__builtin_apply ((fnt)foobar,
+ __builtin_apply_args (), 0));
}
/* Out-of-line checking, before both builtin_return and return in f. */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-callable1.c b/gcc/testsuite/c-c++-common/torture/strub-callable1.c
new file mode 100644
index 0000000..b5e45ab
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-callable1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict" } */
+
+/* Check that strub and non-strub functions can be called from non-strub
+ contexts, and that strub and callable functions can be called from strub
+ contexts. */
+
+#define OMIT_IMPERMISSIBLE_CALLS 1
+#include "strub-callable2.c"
diff --git a/gcc/testsuite/c-c++-common/torture/strub-callable2.c b/gcc/testsuite/c-c++-common/torture/strub-callable2.c
new file mode 100644
index 0000000..96aa7fe
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-callable2.c
@@ -0,0 +1,264 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict" } */
+
+/* Check that impermissible (cross-strub-context) calls are reported. */
+
+extern int __attribute__ ((__strub__ ("callable"))) xcallable (void);
+extern int __attribute__ ((__strub__ ("internal"))) xinternal (void);
+extern int __attribute__ ((__strub__ ("at-calls"))) xat_calls (void);
+extern int __attribute__ ((__strub__ ("disabled"))) xdisabled (void);
+
+int __attribute__ ((__strub__ ("callable"))) callable (void);
+int __attribute__ ((__strub__ ("internal"))) internal (void);
+int __attribute__ ((__strub__ ("at-calls"))) at_calls (void);
+int __attribute__ ((__strub__ ("disabled"))) disabled (void);
+
+int __attribute__ ((__strub__)) var;
+int var_user (void);
+
+static inline int __attribute__ ((__always_inline__, __strub__ ("callable")))
+icallable (void);
+static inline int __attribute__ ((__always_inline__, __strub__ ("internal")))
+iinternal (void);
+static inline int __attribute__ ((__always_inline__, __strub__ ("at-calls")))
+iat_calls (void);
+static inline int __attribute__ ((__always_inline__, __strub__ ("disabled")))
+idisabled (void);
+static inline int __attribute__ ((__always_inline__))
+ivar_user (void);
+
+static inline int __attribute__ ((__always_inline__, __strub__ ("callable")))
+i_callable (void) { return 0; }
+static inline int __attribute__ ((__always_inline__, __strub__ ("internal")))
+i_internal (void) { return var; }
+static inline int __attribute__ ((__always_inline__, __strub__ ("at-calls")))
+i_at_calls (void) { return var; }
+static inline int __attribute__ ((__always_inline__, __strub__ ("disabled")))
+i_disabled (void) { return 0; }
+static inline int __attribute__ ((__always_inline__))
+i_var_user (void) { return var; }
+
+#define CALLS_GOOD_FOR_STRUB_CONTEXT(ISEP) \
+ do { \
+ ret += i ## ISEP ## at_calls (); \
+ ret += i ## ISEP ## internal (); \
+ ret += i ## ISEP ## var_user (); \
+ } while (0)
+
+#define CALLS_GOOD_FOR_NONSTRUB_CONTEXT(ISEP) \
+ do { \
+ ret += internal (); \
+ ret += disabled (); \
+ ret += var_user (); \
+ \
+ ret += i ## ISEP ## disabled (); \
+ \
+ ret += xinternal (); \
+ ret += xdisabled (); \
+ } while (0)
+
+#define CALLS_GOOD_FOR_EITHER_CONTEXT(ISEP) \
+ do { \
+ ret += i ## ISEP ## callable (); \
+ \
+ ret += callable (); \
+ ret += at_calls (); \
+ \
+ ret += xat_calls (); \
+ ret += xcallable (); \
+ } while (0)
+
+/* Not a strub context, so it can call anything.
+ Explicitly declared as callable even from within strub contexts. */
+int __attribute__ ((__strub__ ("callable")))
+callable (void) {
+ int ret = 0;
+
+ /* CALLS_GOOD_FOR_STRUB_CONTEXT(); */
+#if !OMIT_IMPERMISSIBLE_CALLS
+ ret += iat_calls (); /* { dg-error "in non-.strub. context" } */
+ ret += iinternal (); /* { dg-error "in non-.strub. context" } */
+ ret += ivar_user (); /* { dg-error "in non-.strub. context" } */
+#endif
+ CALLS_GOOD_FOR_EITHER_CONTEXT();
+ CALLS_GOOD_FOR_NONSTRUB_CONTEXT();
+
+ return ret;
+}
+
+/* Internal strubbing means the body is a strub context, so it can only call
+ strub functions, and it's not itself callable from strub functions. */
+int __attribute__ ((__strub__ ("internal")))
+internal (void) {
+ int ret = var;
+
+ CALLS_GOOD_FOR_STRUB_CONTEXT();
+ CALLS_GOOD_FOR_EITHER_CONTEXT();
+ /* CALLS_GOOD_FOR_NONSTRUB_CONTEXT(); */
+#if !OMIT_IMPERMISSIBLE_CALLS
+ ret += internal (); /* { dg-error "in .strub. context" } */
+ ret += disabled (); /* { dg-error "in .strub. context" } */
+ ret += var_user (); /* { dg-error "in .strub. context" } */
+
+ ret += idisabled (); /* { dg-error "in .strub. context" } */
+
+ ret += xinternal (); /* { dg-error "in .strub. context" } */
+ ret += xdisabled (); /* { dg-error "in .strub. context" } */
+#endif
+
+ return ret;
+}
+
+int __attribute__ ((__strub__ ("at-calls")))
+at_calls (void) {
+ int ret = var;
+
+ CALLS_GOOD_FOR_STRUB_CONTEXT();
+ CALLS_GOOD_FOR_EITHER_CONTEXT();
+ /* CALLS_GOOD_FOR_NONSTRUB_CONTEXT(); */
+#if !OMIT_IMPERMISSIBLE_CALLS
+ ret += internal (); /* { dg-error "in .strub. context" } */
+ ret += disabled (); /* { dg-error "in .strub. context" } */
+ ret += var_user (); /* { dg-error "in .strub. context" } */
+
+ ret += idisabled (); /* { dg-error "in .strub. context" } */
+
+ ret += xinternal (); /* { dg-error "in .strub. context" } */
+ ret += xdisabled (); /* { dg-error "in .strub. context" } */
+#endif
+
+ return ret;
+}
+
+int __attribute__ ((__strub__ ("disabled")))
+disabled () {
+ int ret = 0;
+
+ /* CALLS_GOOD_FOR_STRUB_CONTEXT(); */
+#if !OMIT_IMPERMISSIBLE_CALLS
+ ret += iat_calls (); /* { dg-error "in non-.strub. context" } */
+ ret += iinternal (); /* { dg-error "in non-.strub. context" } */
+ ret += ivar_user (); /* { dg-error "in non-.strub. context" } */
+#endif
+ CALLS_GOOD_FOR_EITHER_CONTEXT();
+ CALLS_GOOD_FOR_NONSTRUB_CONTEXT();
+
+ return ret;
+}
+
+int
+var_user (void) {
+ int ret = var;
+
+ CALLS_GOOD_FOR_STRUB_CONTEXT();
+ CALLS_GOOD_FOR_EITHER_CONTEXT();
+ /* CALLS_GOOD_FOR_NONSTRUB_CONTEXT(); */
+#if !OMIT_IMPERMISSIBLE_CALLS
+ ret += internal (); /* { dg-error "in .strub. context" } */
+ ret += disabled (); /* { dg-error "in .strub. context" } */
+ ret += var_user (); /* { dg-error "in .strub. context" } */
+
+ ret += idisabled (); /* { dg-error "in .strub. context" } */
+
+ ret += xinternal (); /* { dg-error "in .strub. context" } */
+ ret += xdisabled (); /* { dg-error "in .strub. context" } */
+#endif
+
+ return ret;
+}
+
+int
+icallable (void)
+{
+ int ret = 0;
+
+ /* CALLS_GOOD_FOR_STRUB_CONTEXT(_); */
+#if !OMIT_IMPERMISSIBLE_CALLS
+ ret += i_at_calls (); /* { dg-error "in non-.strub. context" } */
+ ret += i_internal (); /* { dg-error "in non-.strub. context" } */
+ ret += i_var_user (); /* { dg-error "in non-.strub. context" } */
+#endif
+ CALLS_GOOD_FOR_EITHER_CONTEXT(_);
+ CALLS_GOOD_FOR_NONSTRUB_CONTEXT(_);
+
+ return ret;
+}
+
+int
+iinternal (void) {
+ int ret = var;
+
+ CALLS_GOOD_FOR_STRUB_CONTEXT(_);
+ CALLS_GOOD_FOR_EITHER_CONTEXT(_);
+ /* CALLS_GOOD_FOR_NONSTRUB_CONTEXT(_); */
+#if !OMIT_IMPERMISSIBLE_CALLS
+ ret += internal (); /* { dg-error "in .strub. context" } */
+ ret += disabled (); /* { dg-error "in .strub. context" } */
+ ret += var_user (); /* { dg-error "in .strub. context" } */
+
+ ret += i_disabled (); /* { dg-error "in .strub. context" } */
+
+ ret += xinternal (); /* { dg-error "in .strub. context" } */
+ ret += xdisabled (); /* { dg-error "in .strub. context" } */
+#endif
+
+ return ret;
+}
+
+int __attribute__ ((__always_inline__, __strub__ ("at-calls")))
+iat_calls (void) {
+ int ret = var;
+
+ CALLS_GOOD_FOR_STRUB_CONTEXT(_);
+ CALLS_GOOD_FOR_EITHER_CONTEXT(_);
+ /* CALLS_GOOD_FOR_NONSTRUB_CONTEXT(_); */
+#if !OMIT_IMPERMISSIBLE_CALLS
+ ret += internal (); /* { dg-error "in .strub. context" } */
+ ret += disabled (); /* { dg-error "in .strub. context" } */
+ ret += var_user (); /* { dg-error "in .strub. context" } */
+
+ ret += i_disabled (); /* { dg-error "in .strub. context" } */
+
+ ret += xinternal (); /* { dg-error "in .strub. context" } */
+ ret += xdisabled (); /* { dg-error "in .strub. context" } */
+#endif
+
+ return ret;
+}
+
+int
+idisabled () {
+ int ret = 0;
+
+ /* CALLS_GOOD_FOR_STRUB_CONTEXT(_); */
+#if !OMIT_IMPERMISSIBLE_CALLS
+ ret += i_at_calls (); /* { dg-error "in non-.strub. context" } */
+ ret += i_internal (); /* { dg-error "in non-.strub. context" } */
+ ret += i_var_user (); /* { dg-error "in non-.strub. context" } */
+#endif
+ CALLS_GOOD_FOR_EITHER_CONTEXT(_);
+ CALLS_GOOD_FOR_NONSTRUB_CONTEXT(_);
+
+ return ret;
+}
+
+int
+ivar_user (void) {
+ int ret = var;
+
+ CALLS_GOOD_FOR_STRUB_CONTEXT(_);
+ CALLS_GOOD_FOR_EITHER_CONTEXT(_);
+ /* CALLS_GOOD_FOR_NONSTRUB_CONTEXT(_); */
+#if !OMIT_IMPERMISSIBLE_CALLS
+ ret += internal (); /* { dg-error "in .strub. context" } */
+ ret += disabled (); /* { dg-error "in .strub. context" } */
+ ret += var_user (); /* { dg-error "in .strub. context" } */
+
+ ret += i_disabled (); /* { dg-error "in .strub. context" } */
+
+ ret += xinternal (); /* { dg-error "in .strub. context" } */
+ ret += xdisabled (); /* { dg-error "in .strub. context" } */
+#endif
+
+ return ret;
+}
diff --git a/gcc/testsuite/c-c++-common/torture/strub-const1.c b/gcc/testsuite/c-c++-common/torture/strub-const1.c
new file mode 100644
index 0000000..5e956cb
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-const1.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* Check that, along with a strub const function call, we issue an asm
+ statement to make sure the watermark passed to it is held in memory before
+ the call, and another to make sure it is not assumed to be unchanged. f
+ should not be inlined into g, but if it were too simple it might be folded
+ by interprocedural value-range propagation. */
+
+extern int __attribute__ ((__strub__ ("callable"),
+ __const__, __nothrow__)) c ();
+
+int __attribute__ ((__strub__, __const__))
+f () {
+ return c ();
+}
+
+int
+g () {
+ return f ();
+}
+
+/* { dg-final { scan-ipa-dump-times "__asm__" 2 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-const2.c b/gcc/testsuite/c-c++-common/torture/strub-const2.c
new file mode 100644
index 0000000..73d6502
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-const2.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* Check that, along with a strub implicitly-const function call, we issue an
+ asm statement to make sure the watermark passed to it is held in memory
+ before the call, and another to make sure it is not assumed to be
+ unchanged. */
+
+extern int __attribute__ ((__strub__ ("callable"),
+ __const__, __nothrow__)) c ();
+
+int __attribute__ ((__strub__))
+#if ! __OPTIMIZE__
+__attribute__ ((__const__))
+#endif
+f () {
+ return c ();
+}
+
+int
+g () {
+ return f ();
+}
+
+/* { dg-final { scan-ipa-dump-times "__asm__" 2 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-const3.c b/gcc/testsuite/c-c++-common/torture/strub-const3.c
new file mode 100644
index 0000000..2584f1f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-const3.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* Check that, along with a strub const wrapping call, we issue an asm statement
+ to make sure the watermark passed to it is held in memory before the call,
+ and another to make sure it is not assumed to be unchanged. */
+
+extern int __attribute__ ((__strub__ ("callable"),
+ __const__, __nothrow__)) c ();
+
+int __attribute__ ((__strub__ ("internal"), __const__))
+f () {
+ return c ();
+}
+
+/* { dg-final { scan-ipa-dump-times "__asm__" 2 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-const4.c b/gcc/testsuite/c-c++-common/torture/strub-const4.c
new file mode 100644
index 0000000..d819f54
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-const4.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* Check that, along with a strub implicitly-const wrapping call, we issue an
+ asm statement to make sure the watermark passed to it is held in memory
+ before the call, and another to make sure it is not assumed to be
+ unchanged. */
+
+extern int __attribute__ ((__strub__ ("callable"),
+ __const__, __nothrow__)) c ();
+
+int __attribute__ ((__strub__ ("internal")))
+#if ! __OPTIMIZE__
+__attribute__ ((__const__))
+#endif
+f () {
+ return c ();
+}
+
+/* { dg-final { scan-ipa-dump-times "__asm__" 2 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-data1.c b/gcc/testsuite/c-c++-common/torture/strub-data1.c
new file mode 100644
index 0000000..7c27a2a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-data1.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* The pointed-to data enables strubbing if accessed. */
+int __attribute__ ((__strub__)) var;
+
+int f() {
+ return var;
+}
+
+/* { dg-final { scan-ipa-dump "strub_enter" "strub" } } */
+/* { dg-final { scan-ipa-dump "strub_leave" "strub" } } */
+/* { dg-final { scan-ipa-dump "strub_update" "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-data2.c b/gcc/testsuite/c-c++-common/torture/strub-data2.c
new file mode 100644
index 0000000..e66d903
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-data2.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* The pointer itself is a strub variable, enabling internal strubbing when
+ its value is used. */
+int __attribute__ ((__strub__)) *ptr;
+
+int *f() {
+ return ptr;
+}
+
+/* { dg-final { scan-ipa-dump "strub_enter" "strub" } } */
+/* { dg-final { scan-ipa-dump "strub_leave" "strub" } } */
+/* { dg-final { scan-ipa-dump "strub_update" "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-data3.c b/gcc/testsuite/c-c++-common/torture/strub-data3.c
new file mode 100644
index 0000000..5e08e0e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-data3.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* The pointer itself is a strub variable, that would enable internal strubbing
+ if its value was used. Here, it's only overwritten, so no strub. */
+int __attribute__ ((__strub__)) var;
+
+void f() {
+ var = 0;
+}
+
+/* { dg-final { scan-ipa-dump-not "strub_enter" "strub" } } */
+/* { dg-final { scan-ipa-dump-not "strub_leave" "strub" } } */
+/* { dg-final { scan-ipa-dump-not "strub_update" "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-data4.c b/gcc/testsuite/c-c++-common/torture/strub-data4.c
new file mode 100644
index 0000000..a818e7a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-data4.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* The pointer itself is a strub variable, that would enable internal strubbing
+ if its value was used. Here, it's only overwritten, so no strub. */
+int __attribute__ ((__strub__)) *ptr;
+
+void f() {
+ ptr = 0;
+}
+
+/* { dg-final { scan-ipa-dump-not "strub_enter" "strub" } } */
+/* { dg-final { scan-ipa-dump-not "strub_leave" "strub" } } */
+/* { dg-final { scan-ipa-dump-not "strub_update" "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-data5.c b/gcc/testsuite/c-c++-common/torture/strub-data5.c
new file mode 100644
index 0000000..ddb0b5c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-data5.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict" } */
+
+/* It would be desirable to issue at least warnings for these. */
+
+typedef int __attribute__ ((__strub__)) strub_int;
+strub_int *ptr;
+
+int *f () {
+ return ptr; /* { dg-message "incompatible|invalid conversion" } */
+}
+
+strub_int *g () {
+ return f (); /* { dg-message "incompatible|invalid conversion" } */
+}
diff --git a/gcc/testsuite/c-c++-common/torture/strub-indcall1.c b/gcc/testsuite/c-c++-common/torture/strub-indcall1.c
new file mode 100644
index 0000000..c165f31
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-indcall1.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+typedef void __attribute__ ((__strub__)) fntype ();
+fntype (*ptr);
+
+void f() {
+ ptr ();
+}
+
+/* { dg-final { scan-ipa-dump "strub_enter" "strub" } } */
+/* { dg-final { scan-ipa-dump "(&\.strub\.watermark\.\[0-9\]\+)" "strub" } } */
+/* { dg-final { scan-ipa-dump "strub_leave" "strub" } } */
+/* { dg-final { scan-ipa-dump-not "strub_update" "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-indcall2.c b/gcc/testsuite/c-c++-common/torture/strub-indcall2.c
new file mode 100644
index 0000000..69fcff8
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-indcall2.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+typedef void __attribute__ ((__strub__)) fntype (int, int);
+fntype (*ptr);
+
+void f() {
+ ptr (0, 0);
+}
+
+/* { dg-final { scan-ipa-dump "strub_enter" "strub" } } */
+/* { dg-final { scan-ipa-dump "(0, 0, &\.strub\.watermark\.\[0-9\]\+)" "strub" } } */
+/* { dg-final { scan-ipa-dump "strub_leave" "strub" } } */
+/* { dg-final { scan-ipa-dump-not "strub_update" "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-indcall3.c b/gcc/testsuite/c-c++-common/torture/strub-indcall3.c
new file mode 100644
index 0000000..ff00622
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-indcall3.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+typedef void __attribute__ ((__strub__)) fntype (int, int, ...);
+fntype (*ptr);
+
+void f() {
+ ptr (0, 0, 1, 1);
+}
+
+/* { dg-final { scan-ipa-dump "strub_enter" "strub" } } */
+/* { dg-final { scan-ipa-dump "(0, 0, &\.strub\.watermark\.\[0-9\]\+, 1, 1)" "strub" } } */
+/* { dg-final { scan-ipa-dump "strub_leave" "strub" } } */
+/* { dg-final { scan-ipa-dump-not "strub_update" "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-inlinable1.c b/gcc/testsuite/c-c++-common/torture/strub-inlinable1.c
new file mode 100644
index 0000000..614b022
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-inlinable1.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=relaxed" } */
+
+inline void __attribute__ ((strub ("internal"), always_inline))
+inl_int_ali (void)
+{
+ /* No internal wrapper, so this body ALWAYS gets inlined,
+ but it cannot be called from non-strub contexts. */
+}
+
+void
+bat (void)
+{
+ /* Not allowed, not a strub context. */
+ inl_int_ali (); /* { dg-error "context" } */
+}
diff --git a/gcc/testsuite/c-c++-common/torture/strub-inlinable2.c b/gcc/testsuite/c-c++-common/torture/strub-inlinable2.c
new file mode 100644
index 0000000..f9a6b4a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-inlinable2.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=all" } */
+
+#include "strub-inlinable1.c"
+
+/* With -fstrub=all, the caller becomes a strub context, so the strub-inlinable
+ callee is not rejected. */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-ptrfn1.c b/gcc/testsuite/c-c++-common/torture/strub-ptrfn1.c
new file mode 100644
index 0000000..b4a7f39
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-ptrfn1.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict" } */
+
+typedef void ft (void);
+typedef void ft2 (int, int);
+extern ft __attribute__ ((__strub__)) fnac;
+
+ft * f (void) {
+ return fnac; /* { dg-message "incompatible|invalid conversion" } */
+}
diff --git a/gcc/testsuite/c-c++-common/torture/strub-ptrfn2.c b/gcc/testsuite/c-c++-common/torture/strub-ptrfn2.c
new file mode 100644
index 0000000..ef634d3
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-ptrfn2.c
@@ -0,0 +1,55 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=relaxed -Wpedantic" } */
+
+/* C++ does not warn about the partial incompatibilities.
+
+ The d_p () calls are actually rejected, even in C++, but they are XFAILed
+ here because we don't get far enough in the compilation as to observe them,
+ because the incompatibilities are errors without -fpermissive.
+ strub-ptrfn3.c uses -fpermissive to check those.
+ */
+
+extern int __attribute__ ((strub ("callable"))) bac (void);
+extern int __attribute__ ((strub ("disabled"))) bad (void);
+extern int __attribute__ ((strub ("internal"))) bar (void);
+extern int __attribute__ ((strub ("at-calls"))) bal (void);
+
+void __attribute__ ((strub))
+bap (void)
+{
+ int __attribute__ ((strub ("disabled"))) (*d_p) (void) = bad;
+ int __attribute__ ((strub ("callable"))) (*c_p) (void) = bac;
+ int __attribute__ ((strub ("at-calls"))) (*a_p) (void) = bal;
+
+ d_p = bac; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bad; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bar; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bal; /* { dg-message "incompatible|invalid conversion" } */
+ a_p = bac; /* { dg-message "incompatible|invalid conversion" } */
+
+ d_p (); /* { dg-error "indirect non-.strub. call in .strub. context" "" { xfail *-*-* } } */
+ c_p ();
+ a_p ();
+}
+
+void __attribute__ ((strub))
+baP (void)
+{
+ typedef int __attribute__ ((strub ("disabled"))) d_fn_t (void);
+ typedef int __attribute__ ((strub ("callable"))) c_fn_t (void);
+ typedef int __attribute__ ((strub ("at-calls"))) a_fn_t (void);
+
+ d_fn_t *d_p = bad;
+ c_fn_t *c_p = bac;
+ a_fn_t *a_p = bal;
+
+ d_p = bac; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bad; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bar; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bal; /* { dg-message "incompatible|invalid conversion" } */
+ a_p = bac; /* { dg-message "incompatible|invalid conversion" } */
+
+ d_p (); /* { dg-error "indirect non-.strub. call in .strub. context" "" { xfail *-*-* } } */
+ c_p ();
+ a_p ();
+}
diff --git a/gcc/testsuite/c-c++-common/torture/strub-ptrfn3.c b/gcc/testsuite/c-c++-common/torture/strub-ptrfn3.c
new file mode 100644
index 0000000..e1f179e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-ptrfn3.c
@@ -0,0 +1,50 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=relaxed -Wpedantic -fpermissive" } */
+/* { dg-prune-output "command-line option .-fpermissive." } */
+
+/* See strub-ptrfn2.c. */
+
+extern int __attribute__ ((strub ("callable"))) bac (void);
+extern int __attribute__ ((strub ("disabled"))) bad (void);
+extern int __attribute__ ((strub ("internal"))) bar (void);
+extern int __attribute__ ((strub ("at-calls"))) bal (void);
+
+void __attribute__ ((strub))
+bap (void)
+{
+ int __attribute__ ((strub ("disabled"))) (*d_p) (void) = bad;
+ int __attribute__ ((strub ("callable"))) (*c_p) (void) = bac;
+ int __attribute__ ((strub ("at-calls"))) (*a_p) (void) = bal;
+
+ d_p = bac; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bad; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bar; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bal; /* { dg-message "incompatible|invalid conversion" } */
+ a_p = bac; /* { dg-message "incompatible|invalid conversion" } */
+
+ d_p (); /* { dg-error "indirect non-.strub. call in .strub. context" } */
+ c_p ();
+ a_p ();
+}
+
+void __attribute__ ((strub))
+baP (void)
+{
+ typedef int __attribute__ ((strub ("disabled"))) d_fn_t (void);
+ typedef int __attribute__ ((strub ("callable"))) c_fn_t (void);
+ typedef int __attribute__ ((strub ("at-calls"))) a_fn_t (void);
+
+ d_fn_t *d_p = bad;
+ c_fn_t *c_p = bac;
+ a_fn_t *a_p = bal;
+
+ d_p = bac; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bad; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bar; /* { dg-warning "not quite compatible" "" { xfail c++ } } */
+ c_p = bal; /* { dg-message "incompatible|invalid conversion" } */
+ a_p = bac; /* { dg-message "incompatible|invalid conversion" } */
+
+ d_p (); /* { dg-error "indirect non-.strub. call in .strub. context" } */
+ c_p ();
+ a_p ();
+}
diff --git a/gcc/testsuite/c-c++-common/torture/strub-ptrfn4.c b/gcc/testsuite/c-c++-common/torture/strub-ptrfn4.c
new file mode 100644
index 0000000..70b558a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-ptrfn4.c
@@ -0,0 +1,43 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=relaxed" } */
+
+/* This is strub-ptrfn2.c without -Wpedantic.
+
+ Even C doesn't report the (not-quite-)compatible conversions without it. */
+
+extern int __attribute__ ((strub ("callable"))) bac (void);
+extern int __attribute__ ((strub ("disabled"))) bad (void);
+extern int __attribute__ ((strub ("internal"))) bar (void);
+extern int __attribute__ ((strub ("at-calls"))) bal (void);
+
+void __attribute__ ((strub))
+bap (void)
+{
+ int __attribute__ ((strub ("disabled"))) (*d_p) (void) = bad;
+ int __attribute__ ((strub ("callable"))) (*c_p) (void) = bac;
+ int __attribute__ ((strub ("at-calls"))) (*a_p) (void) = bal;
+
+ d_p = bac;
+ c_p = bad;
+ c_p = bar;
+ c_p = bal; /* { dg-message "incompatible|invalid conversion" } */
+ a_p = bac; /* { dg-message "incompatible|invalid conversion" } */
+}
+
+void __attribute__ ((strub))
+baP (void)
+{
+ typedef int __attribute__ ((strub ("disabled"))) d_fn_t (void);
+ typedef int __attribute__ ((strub ("callable"))) c_fn_t (void);
+ typedef int __attribute__ ((strub ("at-calls"))) a_fn_t (void);
+
+ d_fn_t *d_p = bad;
+ c_fn_t *c_p = bac;
+ a_fn_t *a_p = bal;
+
+ d_p = bac;
+ c_p = bad;
+ c_p = bar;
+ c_p = bal; /* { dg-message "incompatible|invalid conversion" } */
+ a_p = bac; /* { dg-message "incompatible|invalid conversion" } */
+}
diff --git a/gcc/testsuite/c-c++-common/torture/strub-pure1.c b/gcc/testsuite/c-c++-common/torture/strub-pure1.c
new file mode 100644
index 0000000..a262a08
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-pure1.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* Check that, along with a strub pure function call, we issue an asm statement
+ to make sure the watermark passed to it is not assumed to be unchanged. */
+
+int __attribute__ ((__strub__, __pure__))
+f() {
+ static int i; /* Stop it from being detected as const. */
+ return i;
+}
+
+int
+g() {
+ return f();
+}
+
+/* { dg-final { scan-ipa-dump-times "__asm__" 1 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-pure2.c b/gcc/testsuite/c-c++-common/torture/strub-pure2.c
new file mode 100644
index 0000000..4c4bd50
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-pure2.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* Check that, along with a strub implicitly-pure function call, we issue an asm
+ statement to make sure the watermark passed to it is not assumed to be
+ unchanged. */
+
+int __attribute__ ((__strub__))
+#if ! __OPTIMIZE__ /* At -O0, implicit pure detection doesn't run. */
+__attribute__ ((__pure__))
+#endif
+f() {
+ static int i; /* Stop it from being detected as const. */
+ return i;
+}
+
+int
+g() {
+ return f();
+}
+
+/* { dg-final { scan-ipa-dump-times "__asm__" 1 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-pure3.c b/gcc/testsuite/c-c++-common/torture/strub-pure3.c
new file mode 100644
index 0000000..ce195c6
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-pure3.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* Check that, along with a strub pure wrapping call, we issue an asm statement
+ to make sure the watermark passed to it is not assumed to be unchanged. */
+
+int __attribute__ ((__strub__ ("internal"), __pure__))
+f() {
+ static int i; /* Stop it from being detected as const. */
+ return i;
+}
+
+/* { dg-final { scan-ipa-dump-times "__asm__" 1 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-pure4.c b/gcc/testsuite/c-c++-common/torture/strub-pure4.c
new file mode 100644
index 0000000..75cd54c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-pure4.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+/* Check that, along with a strub implicitly-pure wrapping call, we issue an asm
+ statement to make sure the watermark passed to it is not assumed to be
+ unchanged. */
+
+int __attribute__ ((__strub__ ("internal")))
+#if ! __OPTIMIZE__ /* At -O0, implicit pure detection doesn't run. */
+__attribute__ ((__pure__))
+#endif
+f() {
+ static int i; /* Stop it from being detected as const. */
+ return i;
+}
+
+/* { dg-final { scan-ipa-dump-times "__asm__" 1 "strub" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/strub-run1.c b/gcc/testsuite/c-c++-common/torture/strub-run1.c
new file mode 100644
index 0000000..7458b3f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-run1.c
@@ -0,0 +1,95 @@
+/* { dg-do run } */
+/* { dg-options "-fstrub=strict" } */
+
+/* Check that a non-strub function leaves a string behind in the stack, and that
+ equivalent strub functions don't. Avoid the use of red zones by avoiding
+ leaf functions. */
+
+const char test_string[] = "\x55\xde\xad\xbe\xef\xc0\x1d\xca\xfe\x55\xaa";
+
+/* Pad before and after the string on the stack, so that it's not overwritten by
+ regular stack use. */
+#define PAD 7
+
+static inline __attribute__ ((__always_inline__, __strub__ ("callable")))
+char *
+leak_string (void)
+{
+ /* We use this variable to avoid any stack red zone. Stack scrubbing covers
+ it, but __builtin_stack_address, that we take as a reference, doesn't, so
+ if e.g. callable() were to store the string in the red zone, we wouldn't
+ find it because it would be outside the range we searched. */
+ typedef void __attribute__ ((__strub__ ("callable"))) callable_t (char *);
+ callable_t *f = 0;
+
+ char s[2 * PAD + 1][sizeof (test_string)];
+ __builtin_strcpy (s[PAD], test_string);
+ asm ("" : "+m" (s), "+r" (f));
+
+ if (__builtin_expect (!f, 1))
+ return (char *) __builtin_stack_address ();
+
+ f (s[PAD]);
+ return 0;
+}
+
+static inline __attribute__ ((__always_inline__))
+int
+look_for_string (char *e)
+{
+ char *p = (char *) __builtin_stack_address ();
+
+ if (p == e)
+ __builtin_abort ();
+
+ if (p > e)
+ {
+ char *q = p;
+ p = e;
+ e = q;
+ }
+
+ for (char *re = e - sizeof (test_string); p < re; p++)
+ for (int i = 0; p[i] == test_string[i]; i++)
+ if (i == sizeof (test_string) - 1)
+ return i;
+
+ return 0;
+}
+
+static __attribute__ ((__noinline__, __noclone__))
+char *
+callable ()
+{
+ return leak_string ();
+}
+
+static __attribute__ ((__strub__ ("at-calls")))
+char *
+at_calls ()
+{
+ return leak_string ();
+}
+
+static __attribute__ ((__strub__ ("internal")))
+char *
+internal ()
+{
+ return leak_string ();
+}
+
+int main ()
+{
+ /* Since these test check stack contents above the top of the stack, an
+ unexpected asynchronous signal or interrupt might overwrite the bits we
+ expect to find and cause spurious fails. Tolerate one such overall
+ spurious fail by retrying. */
+ int i = 1;
+ while (!look_for_string (callable ()))
+ if (!i--) __builtin_abort ();
+ while (look_for_string (at_calls ()))
+ if (!i--) __builtin_abort ();
+ while (look_for_string (internal ()))
+ if (!i--) __builtin_abort ();
+ __builtin_exit (0);
+}
diff --git a/gcc/testsuite/c-c++-common/torture/strub-run2.c b/gcc/testsuite/c-c++-common/torture/strub-run2.c
new file mode 100644
index 0000000..5d60a77
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-run2.c
@@ -0,0 +1,84 @@
+/* { dg-do run } */
+/* { dg-options "-fstrub=strict" } */
+
+/* Check that a non-strub function leaves a string behind in the stack, and that
+ equivalent strub functions don't. Allow red zones to be used. */
+
+const char test_string[] = "\x55\xde\xad\xbe\xef\xc0\x1d\xca\xfe\x55\xaa";
+
+/* Pad before and after the string on the stack, so that it's not overwritten by
+ regular stack use. */
+#define PAD 7
+
+static inline __attribute__ ((__always_inline__, __strub__ ("callable")))
+char *
+leak_string (void)
+{
+ int len = sizeof (test_string);
+ asm ("" : "+rm" (len));
+ char s[2 * PAD + 1][len];
+ __builtin_strcpy (s[PAD], test_string);
+ asm ("" : "+m" (s));
+ return (char *) __builtin_stack_address ();
+}
+
+static inline __attribute__ ((__always_inline__))
+int
+look_for_string (char *e)
+{
+ char *p = (char *) __builtin_stack_address ();
+
+ if (p == e)
+ __builtin_abort ();
+
+ if (p > e)
+ {
+ char *q = p;
+ p = e;
+ e = q;
+ }
+
+ for (char *re = e - sizeof (test_string); p < re; p++)
+ for (int i = 0; p[i] == test_string[i]; i++)
+ if (i == sizeof (test_string) - 1)
+ return i;
+
+ return 0;
+}
+
+static __attribute__ ((__noinline__, __noclone__))
+char *
+callable ()
+{
+ return leak_string ();
+}
+
+static __attribute__ ((__strub__ ("at-calls")))
+char *
+at_calls ()
+{
+ return leak_string ();
+}
+
+static __attribute__ ((__strub__ ("internal")))
+char *
+internal ()
+{
+ return leak_string ();
+}
+
+int main ()
+{
+ /* Since these test check stack contents above the top of the stack, an
+ unexpected asynchronous signal or interrupt might overwrite the bits we
+ expect to find and cause spurious fails. Tolerate one such overall
+ spurious fail by retrying. */
+ int i = 1;
+ while (!look_for_string (callable ()))
+ if (!i--) __builtin_abort ();
+ while (look_for_string (at_calls ()))
+ if (!i--) __builtin_abort ();
+ while (look_for_string (internal ()))
+ if (!i--) __builtin_abort ();
+ __builtin_exit (0);
+}
diff --git a/gcc/testsuite/c-c++-common/torture/strub-run3.c b/gcc/testsuite/c-c++-common/torture/strub-run3.c
new file mode 100644
index 0000000..c2ad710
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-run3.c
@@ -0,0 +1,80 @@
+/* { dg-do run } */
+/* { dg-options "-fstrub=strict" } */
+/* { dg-require-effective-target alloca } */
+
+/* Check that a non-strub function leaves a string behind in the stack, and that
+ equivalent strub functions don't. */
+
+const char test_string[] = "\x55\xde\xad\xbe\xef\xc0\x1d\xca\xfe\x55\xaa";
+
+static inline __attribute__ ((__always_inline__, __strub__ ("callable")))
+char *
+leak_string (void)
+{
+ int len = sizeof (test_string);
+ char *s = (char *) __builtin_alloca (len);
+ __builtin_strcpy (s, test_string);
+ asm ("" : "+m" (s));
+ return (char *) __builtin_stack_address ();
+}
+
+static inline __attribute__ ((__always_inline__))
+int
+look_for_string (char *e)
+{
+ char *p = (char *) __builtin_stack_address ();
+
+ if (p == e)
+ __builtin_abort ();
+
+ if (p > e)
+ {
+ char *q = p;
+ p = e;
+ e = q;
+ }
+
+ for (char *re = e - sizeof (test_string); p < re; p++)
+ for (int i = 0; p[i] == test_string[i]; i++)
+ if (i == sizeof (test_string) - 1)
+ return i;
+
+ return 0;
+}
+
+static __attribute__ ((__noinline__, __noclone__))
+char *
+callable ()
+{
+ return leak_string ();
+}
+
+static __attribute__ ((__strub__ ("at-calls")))
+char *
+at_calls ()
+{
+ return leak_string ();
+}
+
+static __attribute__ ((__strub__ ("internal")))
+char *
+internal ()
+{
+ return leak_string ();
+}
+
+int main ()
+{
+ /* Since these test check stack contents above the top of the stack, an
+ unexpected asynchronous signal or interrupt might overwrite the bits we
+ expect to find and cause spurious fails. Tolerate one such overall
+ spurious fail by retrying. */
+ int i = 1;
+ while (!look_for_string (callable ()))
+ if (!i--) __builtin_abort ();
+ while (look_for_string (at_calls ()))
+ if (!i--) __builtin_abort ();
+ while (look_for_string (internal ()))
+ if (!i--) __builtin_abort ();
+ __builtin_exit (0);
+}
diff --git a/gcc/testsuite/c-c++-common/torture/strub-run4.c b/gcc/testsuite/c-c++-common/torture/strub-run4.c
new file mode 100644
index 0000000..3b36b8e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-run4.c
@@ -0,0 +1,106 @@
+/* { dg-do run } */
+/* { dg-options "-fstrub=all" } */
+/* { dg-require-effective-target alloca } */
+
+/* Check that multi-level, multi-inlined functions still get cleaned up as
+ expected, without overwriting temporary stack allocations while they should
+ still be available. */
+
+#ifndef ATTR_STRUB_AT_CALLS
+# define ATTR_STRUB_AT_CALLS /* Defined in strub-run4d.c. */
+#endif
+
+const char test_string[] = "\x55\xde\xad\xbe\xef\xc0\x1d\xca\xfe\x55\xaa";
+
+static inline __attribute__ ((__always_inline__))
+char *
+leak_string (void)
+{
+ int __attribute__ ((__strub__)) len = 512;
+ asm ("" : "+r" (len));
+ char s[len];
+ __builtin_strcpy (s, test_string);
+ __builtin_strcpy (s + len - sizeof (test_string), test_string);
+ asm ("" : "+m" (s));
+ return (char *) __builtin_stack_address ();
+}
+
+static inline __attribute__ ((__always_inline__))
+int
+look_for_string (char *e)
+{
+ char *p = (char *) __builtin_stack_address ();
+
+ if (p == e)
+ __builtin_abort ();
+
+ if (p > e)
+ {
+ char *q = p;
+ p = e;
+ e = q;
+ }
+
+ for (char *re = e - sizeof (test_string); p < re; p++)
+ for (int i = 0; p[i] == test_string[i]; i++)
+ if (i == sizeof (test_string) - 1)
+ return i;
+
+ return 0;
+}
+
+static inline ATTR_STRUB_AT_CALLS
+char *
+innermost ()
+{
+ int __attribute__ ((__strub__)) len = 512;
+ asm ("" : "+r" (len));
+ char s[len];
+ __builtin_strcpy (s, test_string);
+ __builtin_strcpy (s + len - sizeof (test_string), test_string);
+ asm ("" : "+m" (s));
+ char *ret = leak_string ();
+ if (__builtin_strcmp (s, test_string) != 0)
+ __builtin_abort ();
+ if (__builtin_strcmp (s + len - sizeof (test_string), test_string) != 0)
+ __builtin_abort ();
+ return ret;
+}
+
+static inline ATTR_STRUB_AT_CALLS
+char *
+intermediate ()
+{
+ int __attribute__ ((__strub__)) len = 512;
+ asm ("" : "+r" (len));
+ char s[len];
+ __builtin_strcpy (s, test_string);
+ __builtin_strcpy (s + len - sizeof (test_string), test_string);
+ asm ("" : "+m" (s));
+ char *ret = innermost ();
+ if (__builtin_strcmp (s, test_string) != 0)
+ __builtin_abort ();
+ if (__builtin_strcmp (s + len - sizeof (test_string), test_string) != 0)
+ __builtin_abort ();
+ return ret;
+}
+
+static inline __attribute__ ((__strub__ ("internal")))
+char *
+internal ()
+{
+ return intermediate ();
+}
+
+int __attribute__ ((__strub__ ("disabled")))
+main ()
+{
+ /* Since these test check stack contents above the top of the stack, an
+ unexpected asynchronous signal or interrupt might overwrite the bits we
+ expect to find and cause spurious fails. Tolerate one such overall
+ spurious fail by retrying. */
+ int i = 1;
+ while (look_for_string (internal ()))
+ if (!i--) __builtin_abort ();
+ __builtin_exit (0);
+}
diff --git a/gcc/testsuite/c-c++-common/torture/strub-run4c.c b/gcc/testsuite/c-c++-common/torture/strub-run4c.c
new file mode 100644
index 0000000..57f9baf
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-run4c.c
@@ -0,0 +1,5 @@
+/* { dg-do run } */
+/* { dg-options "-fstrub=at-calls" } */
+/* { dg-require-effective-target alloca } */
+
+#include "strub-run4.c"
diff --git a/gcc/testsuite/c-c++-common/torture/strub-run4d.c b/gcc/testsuite/c-c++-common/torture/strub-run4d.c
new file mode 100644
index 0000000..08de3f1
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-run4d.c
@@ -0,0 +1,7 @@
+/* { dg-do run } */
+/* { dg-options "-fstrub=strict" } */
+/* { dg-require-effective-target alloca } */
+
+#define ATTR_STRUB_AT_CALLS __attribute__ ((__strub__ ("at-calls")))
+
+#include "strub-run4.c"
diff --git a/gcc/testsuite/c-c++-common/torture/strub-run4i.c b/gcc/testsuite/c-c++-common/torture/strub-run4i.c
new file mode 100644
index 0000000..459f688
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/strub-run4i.c
@@ -0,0 +1,5 @@
+/* { dg-do run } */
+/* { dg-options "-fstrub=internal" } */
+/* { dg-require-effective-target alloca } */
+
+#include "strub-run4.c"
diff --git a/gcc/testsuite/g++.dg/DRs/dr2262.C b/gcc/testsuite/g++.dg/DRs/dr2262.C
new file mode 100644
index 0000000..88b8a31
--- /dev/null
+++ b/gcc/testsuite/g++.dg/DRs/dr2262.C
@@ -0,0 +1,16 @@
+// DR 2262 - Attributes for asm-definition
+// { dg-do compile { target c++11 } }
+// { dg-options "-Wattributes" }
+
+[[]] asm ("nop");
+[[foo::bar]] asm ("nop"); // { dg-warning "attributes ignored on 'asm' declaration" }
+
+void
+foo ()
+{
+ int i = 42;
+ [[]] asm ("nop");
+ [[foo::bar]] asm ("nop"); // { dg-warning "attributes ignored on 'asm' declaration" }
+ [[]] asm ("nop" : "+r" (i));
+ [[foo::bar]] [[bar::baz]] asm ("nop" : "+r" (i)); // { dg-warning "attributes ignored on 'asm' declaration" }
+}
diff --git a/gcc/testsuite/g++.dg/abi/mangle-concepts1.C b/gcc/testsuite/g++.dg/abi/mangle-concepts1.C
new file mode 100644
index 0000000..eac520c
--- /dev/null
+++ b/gcc/testsuite/g++.dg/abi/mangle-concepts1.C
@@ -0,0 +1,88 @@
+// { dg-do compile { target c++20 } }
+
+template <class T> concept C = true;
+template <class T, class U> concept C2 = true;
+template <class T> concept D = true;
+template <class T> concept E = true;
+template <class T> concept F = true;
+template <class T> using Ref = T&;
+
+// { dg-final { scan-assembler "_Z1fIiQ1CIT_EEvv" } }
+template <class T> requires C<T> void f() {}
+template void f<int>();
+
+// { dg-final { scan-assembler "_Z2f2ITk1CiEvv" } }
+template <C T> void f2() {}
+template void f2<int>();
+
+// { dg-final { scan-assembler "_Z2f3IiEvvQ1CIT_E" } }
+template <class T> void f3() requires C<T> {}
+template void f3<int>();
+
+// { dg-final { scan-assembler "_Z2f4ITk1CiEvT_" } }
+void f4(C auto c) {}
+template void f4(int);
+
+// ??? The constraints end up out of order in the mangled name, may
+// need to change the equivalence rule.
+// { dg-final { scan-assembler "_Z2f5ITk1CicTk1EfTk1FsQ1DIT0_EEvT1_T2_" } }
+template <C T, class U> requires D<U> void f5(E auto c, F auto f) {}
+template void f5<int,char>(float,short);
+
+// { dg-final { scan-assembler "_Z2f6ITk2C2IiEsEvv" } }
+template <C2<int> T> void f6() {}
+template void f6<short>();
+
+// { dg-final { scan-assembler "_ZN1AIiE1fEvQ1CIT_E" } }
+template <class T> struct A {
+ void f() requires C<T> { };
+};
+template struct A<int>;
+
+// { dg-final { scan-assembler "_Z1gIiQrqXcvT__ETRS0_Q1CIS0_EXpscvS0__ENR1CEEvv" } }
+template <class T>
+requires requires { T();
+ typename Ref<T>;
+ requires C<T>;
+ { +T() } noexcept -> C;
+}
+void g() {}
+template void g<int>();
+
+// { dg-final { scan-assembler "_Z1hIiQrQT__Xpsfp_EEvv" } }
+template <class T>
+requires requires (T t) { +t; }
+void h() {}
+template void h<int>();
+
+// { dg-final { scan-assembler "_Z3fn1IiEvT_QrQS0__XpsfL0p_Xpsfp_E" } }
+template <class T>
+void fn1(T t1)
+ requires requires (T t2) { +t1; +t2; }
+{}
+template void fn1<int>(int);
+
+// { dg-final { scan-assembler "_Z3fn3IiTk2C2IDtfL0p_EEiEvT_T0_" } }
+template<typename T> void fn3(T t, C2<decltype(t)> auto) {}
+template void fn3(int, int);
+
+// { dg-final { scan-assembler "_Z3fn4IiiEvT_T0_Q2C2IS1_FDTcl3fn3fL0p_fp_EES0_EE" } }
+template<typename T, typename U> void fn4(T t, U u)
+ requires C2<U, auto (T u) -> decltype(fn3(t, u))> {}
+template void fn4(int, int);
+
+// { dg-final { scan-assembler "_Z3fn5ITpTk1CJicfEEvDpT_" } }
+template<C... T> void fn5(T...) { }
+template void fn5(int,char,float);
+
+// { dg-final { scan-assembler "_ZN2A2IiE1BIiE1fIiiEEvvQ2C2IT_TL1_0_E" } }
+template <class T> struct A2 {
+ template <class X> struct B {
+ template <class U, class V> void f() requires C2<T,V> {}
+ };
+};
+template void A2<int>::B<int>::f<int,int>();
+
+template<C auto N> void f7() {}
+// { dg-final { scan-assembler "_Z2f7ITnDk1CLi5EEvv" } }
+template void f7<5>();
diff --git a/gcc/testsuite/g++.dg/abi/mangle-ttp1.C b/gcc/testsuite/g++.dg/abi/mangle-ttp1.C
new file mode 100644
index 0000000..2f5878f
--- /dev/null
+++ b/gcc/testsuite/g++.dg/abi/mangle-ttp1.C
@@ -0,0 +1,27 @@
+// ABI #47 "natural" template parameter mangling
+// { dg-do compile { target c++17 } }
+
+template <template <class...> class TT> class A { };
+template <int... T> class B { };
+
+template <auto... T>
+void f(B<T...> b);
+
+template <template <auto...> class TT>
+void g(TT<42>);
+
+template <template <int...> class TT>
+void h(TT<42>);
+
+template <class T> struct C {
+ template <template <T...> class TT> static void j(TT<42>);
+};
+
+int main()
+{
+ B<42> b;
+ f(b); // { dg-final { scan-assembler "_Z1fITpTnDaJLi42EEEv1BIJXspT_EEE" } }
+ g(b); // { dg-final { scan-assembler "_Z1gITtTpTnDaE1BEvT_IJLi42EEE" } }
+ h(b); // { dg-final { scan-assembler "_Z1hI1BEvT_IJLi42EEE" } }
+ C<int>::j(b); // { dg-final { scan-assembler "_ZN1CIiE1jI1BEEvT_IJLi42EEE" } }
+}
diff --git a/gcc/testsuite/g++.dg/abi/mangle10.C b/gcc/testsuite/g++.dg/abi/mangle10.C
index d5782ba..fcbb815 100644
--- a/gcc/testsuite/g++.dg/abi/mangle10.C
+++ b/gcc/testsuite/g++.dg/abi/mangle10.C
@@ -1,4 +1,4 @@
-// { dg-options "-fabi-version=0" }
+// { dg-options "-fabi-version=0 -fabi-compat-version=0" }
template <template <typename> class Q>
void f (typename Q<int>::X) {}
diff --git a/gcc/testsuite/g++.dg/abi/mangle52.C b/gcc/testsuite/g++.dg/abi/mangle52.C
index 0b9a72f..1e7eca0 100644
--- a/gcc/testsuite/g++.dg/abi/mangle52.C
+++ b/gcc/testsuite/g++.dg/abi/mangle52.C
@@ -1,4 +1,4 @@
-// { dg-options "-fabi-version=0 -Wabi=2" }
+// { dg-options "-fabi-version=18 -Wabi=2" }
template <unsigned int> struct helper {};
// { dg-final { scan-assembler "\n_?_Z6check1IiEvP6helperIXszscT_Li1EEE\[: \t\n\]" } }
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-array-ptr6.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-array-ptr6.C
index 1c06512..d212665 100644
--- a/gcc/testsuite/g++.dg/cpp0x/constexpr-array-ptr6.C
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-array-ptr6.C
@@ -12,7 +12,7 @@ constexpr auto sz_d = size(array_double);
static_assert(sz_d == 3, "Array size failure");
void f(bool (&param)[2]) {
- static_assert(size(param) == 2, "Array size failure"); // { dg-error "" }
+ static_assert(size(param) == 2, "Array size failure");
short data[] = {-1, 2, -45, 6, 88, 99, -345};
static_assert(size(data) == 7, "Array size failure");
}
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-noreturn1.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-noreturn1.C
new file mode 100644
index 0000000..08c10e8
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-noreturn1.C
@@ -0,0 +1,12 @@
+// { dg-do compile { target c++11 } }
+// { dg-additional-options -Winvalid-constexpr }
+
+// We were giving a wrong error about loading a volatile value instead of the
+// proper error about calling a non-constexpr function.
+
+[[noreturn]] void f();
+
+constexpr int g()
+{
+ return f(), 42; // { dg-message "call to non-'constexpr' function" }
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-ref12.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-ref12.C
index 7c3ce66..f450014 100644
--- a/gcc/testsuite/g++.dg/cpp0x/constexpr-ref12.C
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-ref12.C
@@ -40,7 +40,7 @@ void f(a ap, a& arp)
static_assert (g(ar2),""); // { dg-error "constant" }
static_assert (h(ar2),""); // { dg-error "constant" }
- static_assert (arp.g(),""); // { dg-error "constant" }
- static_assert (g(arp),""); // { dg-error "constant" }
+ static_assert (arp.g(),"");
+ static_assert (g(arp),"");
static_assert (h(arp),""); // { dg-error "constant" }
}
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-ref13.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-ref13.C
new file mode 100644
index 0000000..f260275
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-ref13.C
@@ -0,0 +1,41 @@
+// P2280R4 - Using unknown pointers and references in constant expressions
+// PR c++/106650
+// { dg-do compile { target c++11 } }
+
+using size_t = decltype(sizeof(42));
+
+template <typename T, size_t N>
+constexpr auto array_size(T (&)[N]) -> size_t {
+ return N;
+}
+
+extern int (&r)[42];
+constexpr int i = array_size (r);
+
+void check(int const (&param)[3]) {
+ int local[] = {1, 2, 3};
+ constexpr auto s0 = array_size(local);
+ constexpr auto s1 = array_size(param);
+}
+
+template <typename T, size_t N>
+constexpr size_t array_size_ptr(T (*)[N]) {
+ return N;
+}
+
+void check_ptr(int const (*param)[3]) {
+ constexpr auto s2 = array_size_ptr(param); // { dg-error "not a constant" }
+}
+
+struct A
+{
+ constexpr int f() { return 42; }
+ void g() { constexpr int i = f(); }
+ void g2() { constexpr int i = this->f(); }
+};
+
+struct B {
+ constexpr static bool b = false;
+ void g() noexcept(b) { }
+ void g2() noexcept(this->b) { }
+};
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-ref2.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-ref2.C
index 7697363..d5327c2 100644
--- a/gcc/testsuite/g++.dg/cpp0x/constexpr-ref2.C
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-ref2.C
@@ -4,8 +4,8 @@
extern int *p;
constexpr int& ri = *p; // { dg-error "p" }
-extern constexpr int &er; // { dg-error "not a definition" }
-constexpr int& ri2 = er; // { dg-error "er" }
+extern constexpr int &er; // { dg-error "not a definition|not a constant" }
+constexpr int& ri2 = er;
void f(int j)
{
diff --git a/gcc/testsuite/g++.dg/cpp0x/gen-attrs-76.C b/gcc/testsuite/g++.dg/cpp0x/gen-attrs-76.C
index 72cd4b3..eba88fb 100644
--- a/gcc/testsuite/g++.dg/cpp0x/gen-attrs-76.C
+++ b/gcc/testsuite/g++.dg/cpp0x/gen-attrs-76.C
@@ -8,9 +8,9 @@ namespace P {}
void
foo ()
{
- [[]] asm (""); // { dg-error "expected" }
+ [[]] asm ("");
[[]] __extension__ asm (""); // { dg-error "expected" }
- __extension__ [[]] asm (""); // { dg-error "expected" }
+ __extension__ [[]] asm ("");
[[]] namespace M = ::N; // { dg-error "expected" }
[[]] using namespace N; // { dg-bogus "expected" }
using namespace P [[]]; // { dg-error "expected" }
@@ -22,9 +22,9 @@ foo ()
void
bar ()
{
- [[gnu::unused]] asm (""); // { dg-error "expected" }
+ [[gnu::unused]] asm ("");
[[gnu::unused]] __extension__ asm (""); // { dg-error "expected" }
- __extension__ [[gnu::unused]] asm (""); // { dg-error "expected" }
+ __extension__ [[gnu::unused]] asm ("");
[[gnu::unused]] namespace M = ::N; // { dg-error "expected" }
[[gnu::unused]] using namespace N; // { dg-bogus "expected" }
using namespace P [[gnu::unused]]; // { dg-error "expected" }
diff --git a/gcc/testsuite/g++.dg/cpp0x/noexcept34.C b/gcc/testsuite/g++.dg/cpp0x/noexcept34.C
index 963881b..5cb9967 100644
--- a/gcc/testsuite/g++.dg/cpp0x/noexcept34.C
+++ b/gcc/testsuite/g++.dg/cpp0x/noexcept34.C
@@ -7,13 +7,13 @@ template<typename> struct A
{
constexpr int f () { return 0; }
bool b = true;
- void g () noexcept (f()) { } // { dg-error ".this. is not a constant" }
- void g2 () noexcept (this->f()) { } // { dg-error ".this. is not a constant" }
+ void g () noexcept (f()) { }
+ void g2 () noexcept (this->f()) { }
void g3 () noexcept (b) { } // { dg-error "use of .this. in a constant expression|use of parameter|.this. is not a constant" }
void g4 (int i) noexcept (i) { } // { dg-error "use of parameter" }
- void g5 () noexcept (A::f()) { } // { dg-error ".this. is not a constant" }
+ void g5 () noexcept (A::f()) { }
void g6 () noexcept (foo(b)) { } // { dg-error "use of .this. in a constant expression|use of parameter|.this. is not a constant" }
- void g7 () noexcept (int{f()}) { } // { dg-error ".this. is not a constant" }
+ void g7 () noexcept (int{f()}) { }
};
int main ()
diff --git a/gcc/testsuite/g++.dg/cpp1y/lambda-generic-const10.C b/gcc/testsuite/g++.dg/cpp1y/lambda-generic-const10.C
index 2f48dae..47a49f5 100644
--- a/gcc/testsuite/g++.dg/cpp1y/lambda-generic-const10.C
+++ b/gcc/testsuite/g++.dg/cpp1y/lambda-generic-const10.C
@@ -11,7 +11,7 @@ int main()
constexpr auto x = f(); //ok, call constexpr const non-static method
[](auto const &f) {
- constexpr auto x = f(); // { dg-error "" }
+ constexpr auto x = f();
}(f);
[&]() {
diff --git a/gcc/testsuite/g++.dg/cpp1z/array-prvalue1.C b/gcc/testsuite/g++.dg/cpp1z/array-prvalue1.C
new file mode 100644
index 0000000..e837d32
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1z/array-prvalue1.C
@@ -0,0 +1,7 @@
+// PR c++/94264
+// { dg-do compile { target c++17 } }
+
+int main() {
+ using T = int[];
+ T{1, 2} == nullptr;
+}
diff --git a/gcc/testsuite/g++.dg/cpp1z/constexpr-ref1.C b/gcc/testsuite/g++.dg/cpp1z/constexpr-ref1.C
new file mode 100644
index 0000000..8277181
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1z/constexpr-ref1.C
@@ -0,0 +1,26 @@
+// P2280R4 - Using unknown pointers and references in constant expressions
+// PR c++/106650
+// { dg-do compile { target c++17 } }
+
+#include <type_traits>
+
+template <typename T, typename U>
+constexpr bool is_type(U &&)
+{
+ return std::is_same_v<T, std::decay_t<U>>;
+}
+
+auto visitor = [](auto&& v) {
+ if constexpr(is_type<int>(v)) {
+ // ...
+ } else if constexpr(is_type<char>(v)) {
+ // ...
+ }
+};
+
+void
+g (int i)
+{
+ visitor (i);
+ constexpr bool b = is_type<int>(i);
+}
diff --git a/gcc/testsuite/g++.dg/cpp1z/constexpr-ref2.C b/gcc/testsuite/g++.dg/cpp1z/constexpr-ref2.C
new file mode 100644
index 0000000..ca73437
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1z/constexpr-ref2.C
@@ -0,0 +1,23 @@
+// P2280R4 - Using unknown pointers and references in constant expressions
+// PR c++/106650
+// { dg-do compile { target c++17 } }
+
+template <bool V>
+struct Widget {
+ struct Config {
+ static constexpr bool value = V;
+ } config;
+
+ void f() {
+ if constexpr (config.value) {
+ // ...
+ }
+ }
+};
+
+void
+g ()
+{
+ Widget<false> w;
+ w.f();
+}
diff --git a/gcc/testsuite/g++.dg/cpp23/consteval-if10.C b/gcc/testsuite/g++.dg/cpp23/consteval-if10.C
index 4c0523f..b8709be 100644
--- a/gcc/testsuite/g++.dg/cpp23/consteval-if10.C
+++ b/gcc/testsuite/g++.dg/cpp23/consteval-if10.C
@@ -2,6 +2,9 @@
// { dg-do compile { target c++20 } }
// { dg-options "" }
+// We used to give errors but the lambdas are now promoted to consteval
+// and are in a immediate function context, so no errors.
+
consteval int foo (int x) { return x; }
constexpr int
@@ -10,7 +13,7 @@ bar (int x)
int r = 0;
if consteval // { dg-warning "'if consteval' only available with" "" { target c++20_only } }
{
- auto y = [=] { foo (x); }; // { dg-error "'x' is not a constant expression" }
+ auto y = [=] { foo (x); };
y ();
}
return r;
@@ -23,7 +26,7 @@ baz (T x)
T r = 0;
if consteval // { dg-warning "'if consteval' only available with" "" { target c++20_only } }
{
- auto y = [=] { foo (x); }; // { dg-error "'x' is not a constant expression" }
+ auto y = [=] { foo (x); };
y ();
}
return r;
diff --git a/gcc/testsuite/g++.dg/cpp23/consteval-if2.C b/gcc/testsuite/g++.dg/cpp23/consteval-if2.C
index b2c5472..3b25871 100644
--- a/gcc/testsuite/g++.dg/cpp23/consteval-if2.C
+++ b/gcc/testsuite/g++.dg/cpp23/consteval-if2.C
@@ -33,7 +33,7 @@ baz (int x)
int r = 0;
if not consteval // { dg-warning "'if consteval' only available with" "" { target c++20_only } }
{
- r += foo (x); // { dg-error "'x' is not a constant expression" }
+ r += foo (x); // { dg-error "not a constant expression" }
}
else
{
@@ -45,11 +45,11 @@ baz (int x)
}
else
{
- r += foo (8 * x); // { dg-error "'x' is not a constant expression" }
+ r += foo (8 * x); // { dg-error "is not a constant expression" }
}
if ! consteval // { dg-warning "'if consteval' only available with" "" { target c++20_only } }
{
- r += foo (32 * x);// { dg-error "'x' is not a constant expression" }
+ r += foo (32 * x);// { dg-error "not a constant expression" }
}
if consteval // { dg-warning "'if consteval' only available with" "" { target c++20_only } }
{
@@ -98,7 +98,7 @@ corge (T x)
T r = 0;
if not consteval // { dg-warning "'if consteval' only available with" "" { target c++20_only } }
{
- r += foo (x); // { dg-error "'x' is not a constant expression" }
+ r += foo (x);
}
else
{
@@ -110,11 +110,11 @@ corge (T x)
}
else
{
- r += foo (8 * x); // { dg-error "is not a constant expression" }
+ r += foo (8 * x);
}
if ! consteval // { dg-warning "'if consteval' only available with" "" { target c++20_only } }
{
- r += foo (32 * x);// { dg-error "is not a constant expression" }
+ r += foo (32 * x);
}
if consteval // { dg-warning "'if consteval' only available with" "" { target c++20_only } }
{
@@ -126,5 +126,5 @@ corge (T x)
int
garply (int x)
{
- return corge (x);
+ return corge (x); // { dg-error "is not a constant expression" }
}
diff --git a/gcc/testsuite/g++.dg/cpp23/feat-cxx2b.C b/gcc/testsuite/g++.dg/cpp23/feat-cxx2b.C
index 9e29b01..2b21bd1 100644
--- a/gcc/testsuite/g++.dg/cpp23/feat-cxx2b.C
+++ b/gcc/testsuite/g++.dg/cpp23/feat-cxx2b.C
@@ -480,8 +480,8 @@
#ifndef __cpp_consteval
# error "__cpp_consteval"
-#elif __cpp_consteval != 201811
-# error "__cpp_consteval != 201811"
+#elif __cpp_consteval != 202211L
+# error "__cpp_consteval != 202211L"
#endif
#ifndef __cpp_concepts
diff --git a/gcc/testsuite/g++.dg/cpp26/feat-cxx26.C b/gcc/testsuite/g++.dg/cpp26/feat-cxx26.C
index 80e8ef6..4507ea0 100644
--- a/gcc/testsuite/g++.dg/cpp26/feat-cxx26.C
+++ b/gcc/testsuite/g++.dg/cpp26/feat-cxx26.C
@@ -480,8 +480,8 @@
#ifndef __cpp_consteval
# error "__cpp_consteval"
-#elif __cpp_consteval != 201811
-# error "__cpp_consteval != 201811"
+#elif __cpp_consteval != 202211L
+# error "__cpp_consteval != 202211L"
#endif
#ifndef __cpp_concepts
@@ -584,7 +584,7 @@
# error "__cpp_auto_cast != 202110"
#endif
-// C++23 attributes:
+// C++23 attributes:
#ifdef __has_cpp_attribute
# if ! __has_cpp_attribute(assume)
@@ -595,3 +595,11 @@
#else
# error "__has_cpp_attribute"
#endif
+
+// C++26 features:
+
+#ifndef __cpp_placeholder_variables
+# error "__cpp_placeholder_variables"
+#elif __cpp_placeholder_variables != 202306
+# error "__cpp_placeholder_variables != 202306"
+#endif
diff --git a/gcc/testsuite/g++.dg/cpp26/name-independent-decl1.C b/gcc/testsuite/g++.dg/cpp26/name-independent-decl1.C
new file mode 100644
index 0000000..0830ce8
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/name-independent-decl1.C
@@ -0,0 +1,194 @@
+// P2169R4 - A nice placeholder with no name
+// { dg-do compile { target c++11 } }
+// { dg-options "-Wunused-variable -Wunused-but-set-variable -Wunused-parameter -Wshadow" }
+
+int a[3];
+
+void
+foo ()
+{
+ {
+ int _ = 1;
+ ++_;
+ }
+ {
+ int _ = 3;
+ ++_;
+ int _ = 4; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ }
+ {
+ int _ = 5;
+ --_;
+ int _ = 6; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ int _ = 7; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ }
+ {
+ auto [i, j, _] = a; // { dg-warning "structured bindings only available with" "" { target c++14_down } }
+ ++i;
+ ++_;
+ }
+ {
+ auto [_, _, k] = a; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++k; // { dg-warning "structured bindings only available with" "" { target c++14_down } .-1 }
+ }
+ {
+ auto [i, j, _] = a; // { dg-warning "structured bindings only available with" "" { target c++14_down } }
+ auto [_, k, l] = a; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++i; // { dg-warning "structured bindings only available with" "" { target c++14_down } .-1 }
+ ++l;
+ }
+ {
+ int _;
+ _ = 1;
+ }
+ {
+ int _ = 1;
+ }
+ {
+ int _;
+ }
+ {
+ static int _; // { dg-warning "unused variable" }
+ int _ = 1; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ }
+ {
+ extern int _ (int);
+ extern long _ (long);
+ extern float _ (float);
+ int _ = 1; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ }
+ {
+ extern double _ (double);
+ extern short _ (short);
+ int _ = 1; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ int _ = 2; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ }
+ {
+ int _ = 1;
+ {
+ int _ = 2;
+ ++_;
+ }
+ {
+ static int _ = 3;
+ ++_;
+ }
+ {
+ auto [i, j, _] = a; // { dg-warning "structured bindings only available with" "" { target c++14_down } }
+ ++_;
+ }
+ }
+}
+
+int
+bar (int _ = 0) // { dg-warning "unused parameter '_'" }
+{
+ int _ = 1; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ return 0;
+}
+
+void
+baz ()
+{
+ if (int _ = bar ())
+ int _ = 2; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ else
+ int _ = 3; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ while (int _ = bar ())
+ int _ = 4; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ for (int _ = bar (); _; ++_)
+ int _ = 5; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ if (int _ = bar ())
+ {
+ int _ = 6; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ }
+ else
+ {
+ int _ = 7; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ }
+ while (int _ = bar ())
+ {
+ int _ = 8; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ }
+ for (int _ = bar (); _; ++_)
+ {
+ int _ = 9; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ }
+}
+
+void
+qux (short _ = 0) // { dg-warning "unused parameter '_'" }
+{
+ {
+ long _ = 1;
+ }
+}
+
+void
+corge ()
+{
+ auto b = [_ = 1] () { (void) _; }; // { dg-warning "lambda capture initializers only available with" "" { target c++11_down } }
+ // { dg-warning "variable 'b' set but not used" "" { target *-*-* } .-1 }
+ auto c = [_ = 2, _ = 3] () {};// { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ // { dg-warning "lambda capture initializers only available with" "" { target c++11_down } .-1 }
+ // { dg-warning "variable 'c' set but not used" "" { target *-*-* } .-2 }
+ {
+ int _ = 4;
+ auto d = [_, _ = 5] () {}; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ } // { dg-warning "lambda capture initializers only available with" "" { target c++11_down } .-1 }
+ // { dg-warning "variable 'd' set but not used" "" { target *-*-* } .-2 }
+ {
+ int _ = 5;
+ auto e = [_ = 6] () {}; // { dg-warning "lambda capture initializers only available with" "" { target c++11_down } }
+ } // { dg-warning "variable 'e' set but not used" "" { target *-*-* } .-1 }
+}
+
+namespace A {
+ int _ = 11;
+}
+
+void
+garply (int x, // { dg-warning "unused parameter 'x'" }
+ int _, // { dg-warning "unused parameter '_'" }
+ int)
+{
+}
+
+void
+fred ()
+{
+ try {
+ } catch (int _) {
+ int _ = 5; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ }
+}
+
+void
+waldo (int _) // { dg-warning "unused parameter '_'" }
+try
+{
+}
+catch (int _) // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+{
+ int _ = 7;
+}
+
+void
+grault (int _) // { dg-warning "unused parameter '_'" }
+try
+{
+}
+catch (int)
+{
+ int _ = 8; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+}
+
+void
+plugh (int _) // { dg-warning "unused parameter '_'" }
+try
+{
+ int _ = 1; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+}
+catch (int)
+{
+}
diff --git a/gcc/testsuite/g++.dg/cpp26/name-independent-decl2.C b/gcc/testsuite/g++.dg/cpp26/name-independent-decl2.C
new file mode 100644
index 0000000..84aa27d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/name-independent-decl2.C
@@ -0,0 +1,171 @@
+// P2169R4 - A nice placeholder with no name
+// { dg-do compile { target c++11 } }
+// { dg-options "" }
+
+int a[3];
+
+void
+foo ()
+{
+ {
+ extern int _ (int);
+ int _ = 2; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ extern long _ (long); // { dg-error "redeclared as different kind of entity" }
+ }
+ {
+ int _ = 3;
+ extern int _ (int); // { dg-error "redeclared as different kind of entity" }
+ }
+ {
+ int _ = 4;
+ static int _ = 5; // { dg-error "redeclaration of 'int _'" }
+ } // { dg-message "static variable is not name-independent" "" { target c++26 } .-1 }
+ {
+ int _ = 6;
+ int _ = 7; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ }
+ {
+ int _ = 8;
+ int _ = 9; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ int _ = 10; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ }
+ {
+ static int _ = 11;
+ static int _ = 12; // { dg-error "redeclaration of 'int _'" }
+ int _ = 13; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ } // { dg-message "static variable is not name-independent" "" { target c++26 } .-2 }
+ {
+ extern int _ (int);
+ extern long _ (long);
+ extern float _ (float);
+ int _ = 1; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ }
+ {
+ extern double _ (double);
+ extern short _ (short);
+ int _ = 1; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ int _ = 2; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ }
+ {
+ auto [i, _, _] = a; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ // { dg-warning "structured bindings only available with" "" { target c++14_down } .-1 }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ }
+ {
+ auto [i, j, _] = a; // { dg-warning "structured bindings only available with" "" { target c++14_down } }
+ auto [k, _, l] = a; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ // { dg-warning "structured bindings only available with" "" { target c++14_down } .-1 }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ }
+ {
+ static auto [i, _, _] = a; // { dg-error "redeclaration of 'auto _'" }
+ // { dg-warning "structured bindings only available with" "" { target c++14_down } .-1 }
+ // { dg-warning "structured binding declaration can be 'static' only in" "" { target c++17_down } .-2 }
+ } // { dg-message "static structured binding is not name-independent" "" { target c++26 } .-3 }
+}
+
+int
+bar (int _ = 0)
+{
+ int _ = 1; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ return 0;
+}
+
+void
+baz ()
+{
+ if (int _ = bar ())
+ {
+ int _ = 6; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ }
+ else
+ {
+ int _ = 7; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ }
+ while (int _ = bar ())
+ {
+ int _ = 8; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ }
+ for (int _ = bar (); _; ++_)
+ {
+ int _ = 9; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ }
+}
+
+namespace A
+{
+ int _ = 1;
+ int _ = 1; // { dg-error "redefinition of 'int A::_'" }
+} // { dg-message "variable at namespace scope is not name-independent" "" { target c++26 } .-1 }
+
+namespace B
+{
+ auto [_, _, _] = a; // { dg-error "redefinition of 'auto B::_'" }
+ // { dg-warning "structured bindings only available with" "" { target c++14_down } .-1 }
+} // { dg-message "structured binding at namespace scope is not name-independent" "" { target c++26 } .-2 }
+
+void
+qux ()
+{
+ auto c = [_ = 2, _ = 3] () { // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ // { dg-warning "lambda capture initializers only available with" "" { target c++11_down } .-1 }
+ (void) _; // { dg-error "reference to '_' is ambiguous" }
+ };
+ {
+ int _ = 4;
+ auto d = [_, _ = 5] () { // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ // { dg-warning "lambda capture initializers only available with" "" { target c++11_down } .-1 }
+ (void) _; // { dg-error "reference to '_' is ambiguous" }
+ };
+ }
+ auto e = [_ = 1] (int _) {}; // { dg-warning "lambda capture initializers only available with" "" { target c++11_down } }
+} // { dg-error "lambda parameter '_' previously declared as a capture" "" { target *-*-* } .-1 }
+
+void
+corge (int _, int _) // { dg-error "redefinition of 'int _'" }
+{ // { dg-message "parameter declaration is not name-independent" "" { target c++26 } .-1 }
+}
+
+namespace C
+{
+ typedef int _;
+ typedef int _;
+}
+
+namespace D
+{
+ namespace {
+ int _;
+ int _; // { dg-error "redefinition of 'int D::.anonymous.::_'" }
+ } // { dg-message "variable at namespace scope is not name-independent" "" { target c++26 } .-1 }
+}
+
+namespace E
+{
+ int _ (int);
+ int _ (int);
+ int _ (int) { return 0; }
+ int _ (int) { return 0; } // { dg-error "redefinition of 'int E::_\\\(int\\\)'" }
+ long _ (long) { return 1; }
+}
+
+template <int _, int _> // { dg-error "redefinition of 'int _'" }
+void
+garply ()
+{
+}
+
+#if __cpp_concepts >= 202002L
+template <typename T>
+concept F = requires (T _, T _) { T{}; }; // { dg-error "redefinition of 'T _'" "" { target c++20 } }
+#endif // { dg-message "parameter declaration is not name-independent" "" { target c++26 } .-1 }
diff --git a/gcc/testsuite/g++.dg/cpp26/name-independent-decl3.C b/gcc/testsuite/g++.dg/cpp26/name-independent-decl3.C
new file mode 100644
index 0000000..3963d02
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/name-independent-decl3.C
@@ -0,0 +1,12 @@
+// P2169R4 - A nice placeholder with no name
+// { dg-do compile { target c++11 } }
+// { dg-options "" }
+
+void
+foo ()
+{
+ extern int _;
+ extern int _;
+ ++_;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+}
diff --git a/gcc/testsuite/g++.dg/cpp26/name-independent-decl4.C b/gcc/testsuite/g++.dg/cpp26/name-independent-decl4.C
new file mode 100644
index 0000000..79e97e9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/name-independent-decl4.C
@@ -0,0 +1,12 @@
+// P2169R4 - A nice placeholder with no name
+// { dg-do compile { target c++11 } }
+// { dg-options "" }
+
+void
+foo ()
+{
+ extern int _;
+ extern int _;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+}
diff --git a/gcc/testsuite/g++.dg/cpp26/name-independent-decl5.C b/gcc/testsuite/g++.dg/cpp26/name-independent-decl5.C
new file mode 100644
index 0000000..cf28807
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/name-independent-decl5.C
@@ -0,0 +1,92 @@
+// P2169R4 - A nice placeholder with no name
+// { dg-do compile { target c++11 } }
+// { dg-options "" }
+
+struct S {
+ int _;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+};
+S s = { 1, 2 };
+
+struct T {
+ int _ = 3;
+ int _ = 4; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+};
+T t1;
+#if __cplusplus >= 201402L
+T t2 = { 5, 6 };
+#endif
+
+struct U {
+ int _ (int) { return 1; }
+ long _ (long) { return 2; }
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+};
+U u = { 7 };
+
+struct V {
+ static int _;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+};
+V v = { 8 };
+
+struct W : public S, T { int _; };
+struct X : public S, T {
+ int _;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+};
+
+struct Y {
+ int _;
+ int &foo () { return _; }
+};
+
+struct Z : public Y {
+ int _;
+ int bar ();
+};
+
+int
+Z::bar ()
+{
+ return _ + Y::_;
+}
+
+struct A {
+ int _;
+ void foo () {
+ int _;
+ _ = 42;
+ _ += ({ int _ = 0; _; });
+ }
+};
+
+struct B {
+ union { int _; };
+ void foo () { ++_; };
+};
+
+struct C {
+ int _;
+ union { int x; };
+ void foo () { ++_; };
+};
+
+struct D {
+ struct { int _; };
+ void foo () { ++_; };
+};
+
+struct E {
+ struct _ {};
+ int _;
+ void foo () { ++_; int _; _ = 5; }
+};
+typedef struct E::_ E_;
+
+struct F {
+ struct _ {};
+ int _;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+};
+typedef struct F::_ F_;
diff --git a/gcc/testsuite/g++.dg/cpp26/name-independent-decl6.C b/gcc/testsuite/g++.dg/cpp26/name-independent-decl6.C
new file mode 100644
index 0000000..afb47ce
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp26/name-independent-decl6.C
@@ -0,0 +1,135 @@
+// P2169R4 - A nice placeholder with no name
+// { dg-do compile { target c++11 } }
+// { dg-options "" }
+
+struct S {
+ int _;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ int foo ();
+ S () : _ (1) {} // { dg-error "request for member '_' is ambiguous" }
+ void bar () { ++_; } // { dg-error "reference to '_' is ambiguous" }
+};
+
+int
+S::foo ()
+{
+ int x = _; // { dg-error "reference to '_' is ambiguous" }
+ x += S::_; // { dg-error "reference to '_' is ambiguous" }
+ return x;
+}
+
+struct T {
+ int _;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+};
+T t = { ._ = 1 }; // { dg-error "request for member '_' is ambiguous" }
+
+auto o = __builtin_offsetof (T, _); // { dg-error "request for member '_' is ambiguous" }
+int T::* p = &T::_; // { dg-error "reference to '_' is ambiguous" }
+
+struct U {
+ U () : _ (42) {} // { dg-error "request for member '_' is ambiguous" }
+ int _;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+};
+
+struct V {
+ V ();
+ int _;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+};
+
+V::V () : _(42) // { dg-error "request for member '_' is ambiguous" }
+{
+}
+
+struct A {
+ int _;
+ union { int _; }; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ A() : _(42) {} // { dg-error "request for member '_' is ambiguous" }
+};
+
+struct B {
+ union { int _, _; }; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ union { int _, _; }; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ B() : _(42) {} // { dg-error "request for member '_' is ambiguous" }
+};
+
+void
+bar ()
+{
+ union { int _;
+ int _; }; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ _ = 42; // { dg-error "reference to '_' is ambiguous" }
+}
+
+namespace C
+{
+ static union { int _ = 1; };
+ static union { int _ = 2; }; // { dg-error "redeclaration of 'int _'" }
+}
+
+void
+baz ()
+{
+ static union { int _ = 3; };
+ static union { int _ = 4; }; // { dg-error "redeclaration of 'int _'" }
+} // { dg-message "static variable is not name-independent" "" { target c++26 } .-1 }
+
+struct D {
+ int _;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+};
+
+struct E : public D {};
+
+void
+qux ()
+{
+ D {}._; // { dg-error "request for member '_' is ambiguous" }
+ E {}._; // { dg-error "request for member '_' is ambiguous" }
+}
+
+struct F {
+ struct _ {};
+ int _;
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ void foo () { ++_; } // { dg-error "reference to '_' is ambiguous" }
+ void bar ();
+};
+typedef struct F::_ F_;
+
+void
+F::bar ()
+{
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+}
+
+struct G {
+ int _ (int) { return 1; }
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ void foo () { ++_; } // { dg-error "reference to '_' is ambiguous" }
+ void bar ();
+};
+
+void
+G::bar ()
+{
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ this->_ (0); // { dg-error "request for member '_' is ambiguous" }
+}
+
+struct H {
+ int _ (int) { return 1; }
+ long _ (float) { return 2; }
+ int _; // { dg-warning "name-independent declarations only available with" "" { target c++23_down } }
+ void foo () { ++_; } // { dg-error "reference to '_' is ambiguous" }
+ void bar ();
+};
+
+void
+H::bar ()
+{
+ ++_; // { dg-error "reference to '_' is ambiguous" }
+ this->_ (0); // { dg-error "request for member '_' is ambiguous" }
+}
diff --git a/gcc/testsuite/g++.dg/cpp26/static_assert1.C b/gcc/testsuite/g++.dg/cpp26/static_assert1.C
index 9dec52b..59724ae 100644
--- a/gcc/testsuite/g++.dg/cpp26/static_assert1.C
+++ b/gcc/testsuite/g++.dg/cpp26/static_assert1.C
@@ -1,6 +1,8 @@
// C++26 P2741R3 - user-generated static_assert messages
// { dg-do compile { target c++11 } }
// { dg-options "" }
+// Override any default-'-fno-exceptions':
+// { dg-additional-options -fexceptions }
static_assert (true, "");
static_assert (true, ("")); // { dg-warning "'static_assert' with non-string message only available with" "" { target c++23_down } }
diff --git a/gcc/testsuite/g++.dg/cpp2a/class-deduction-alias3.C b/gcc/testsuite/g++.dg/cpp2a/class-deduction-alias3.C
index 318d4c9..b43a8c8 100644
--- a/gcc/testsuite/g++.dg/cpp2a/class-deduction-alias3.C
+++ b/gcc/testsuite/g++.dg/cpp2a/class-deduction-alias3.C
@@ -1,8 +1,11 @@
// PR c++/95486
// { dg-do compile { target c++20 } }
+template <class T>
+concept Int = __is_same (T, int);
+
template<class T, class U>
-struct X { X(U) requires __is_same(U, int) {} };
+struct X { X(U) requires Int<U> {} };
template<class U>
using Y = X<void, U>;
diff --git a/gcc/testsuite/g++.dg/cpp2a/class-deduction-alias8.C b/gcc/testsuite/g++.dg/cpp2a/class-deduction-alias8.C
index ec005956..9de0a72 100644
--- a/gcc/testsuite/g++.dg/cpp2a/class-deduction-alias8.C
+++ b/gcc/testsuite/g++.dg/cpp2a/class-deduction-alias8.C
@@ -1,8 +1,11 @@
// PR c++/95486
// { dg-do compile { target c++20 } }
+template <class T>
+concept Int = __is_same (T, int);
+
template<class T, class U>
-struct X { X(U) requires __is_same(U, int) {} };
+struct X { X(U) requires Int<U> {} };
template<class U>
X(U) -> X<char, U>;
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-memfn1.C b/gcc/testsuite/g++.dg/cpp2a/consteval-memfn1.C
index 46eed13..ca92351 100644
--- a/gcc/testsuite/g++.dg/cpp2a/consteval-memfn1.C
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-memfn1.C
@@ -20,10 +20,13 @@ template<class>
void VerifyHash(fixed_string s) {
s.size(0); // { dg-bogus "" }
s.size(-1); // { dg-message "expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
s.size_static(0); // { dg-bogus "" }
s.size_static(-1); // { dg-message "expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
fixed_string::size_static(0); // { dg-bogus "" }
fixed_string::size_static(-1); // { dg-message "expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
s(); // { dg-bogus "" }
}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop1.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop1.C
new file mode 100644
index 0000000..5e7b208
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop1.C
@@ -0,0 +1,169 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// Some of these were cribbed from clang's cxx2b-consteval-propagate.cpp.
+
+consteval int id(int i) { return i; }
+
+template <typename T>
+constexpr int
+f0 (T t)
+{
+ // OK, f0<int> promoted to consteval.
+ return id (t); // { dg-message "immediate-escalating expression .id\\(t\\)." }
+}
+
+constexpr auto a0 = f0 (3);
+
+// As a consequence of f0<int> being promoted to an immediate function, we
+// can't take its address.
+auto p0 = &f0<int>; // { dg-error "taking address of an immediate function" }
+
+template <typename T>
+constexpr int
+f1 (T t)
+{
+ // OK, f1<int> promoted to consteval.
+ return t + id (t); // { dg-message "immediate-escalating expression .id\\(t\\)." }
+}
+
+constexpr auto a1 = f1 (3);
+
+// As a consequence of f1<int> being promoted to an immediate function, we
+// can't take its address.
+auto p1 = &f1<int>; // { dg-error "taking address of an immediate function" }
+
+template <typename T>
+constexpr int
+f2 (T)
+{
+ // This produces a constant; f2 *not* promoted to consteval.
+ return id (42);
+}
+
+// ... so we can take its address.
+auto p2 = &f2<int>;
+
+constexpr int
+f3 (int i)
+{
+ // f3 isn't a function template and those don't get upgraded to consteval.
+ return id (i); // { dg-error "not a constant expression" }
+}
+
+auto p3 = &f3;
+
+template<typename T>
+constexpr int
+f4 (T t)
+{
+ auto p = id; // { dg-message "immediate-escalating expression .id." }
+ (void) p;
+ return t;
+}
+
+auto p6 = &f4<int>; // { dg-error "taking address of an immediate function" }
+
+static_assert (f4 (42) == 42);
+
+// Constructors.
+consteval int zero (int)
+{
+ return 0;
+}
+
+struct A {
+ // A::A(auto) promoted to consteval.
+ constexpr A(auto i) { zero (i); }
+};
+
+constexpr void
+f5 (auto i)
+{
+ A a{i};
+}
+
+constexpr void
+f5_nt (int i)
+{
+ A a{i}; // { dg-error "call to consteval function|not a constant" }
+}
+
+void
+f6 ()
+{
+ f5 (0);
+}
+
+struct B {
+ constexpr B(int) { }
+};
+
+B b1(f0<int>((f1<int>(7))));
+
+template<typename T>
+constexpr int cid(T t) { return t; }
+
+auto p4 = &cid<int>;
+auto p5 = &cid<char>;
+
+int g = 7; // { dg-message ".int g. is not const" }
+
+B b2(f0<int>(cid<int>(g))); // { dg-error "call to consteval function|not usable" }
+
+struct C {
+ consteval C (int) {};
+};
+
+constexpr int
+f7 (auto t)
+{
+ C c(t); // { dg-message "immediate-escalating expression .c.C::C\\(t\\)." }
+ return 0;
+}
+
+int i1 = f7 (g); // { dg-error "call to consteval function|not usable" }
+
+struct Y {
+ int y;
+ int x = id (y);
+ consteval Y (int i) : y (id (i)) {}
+};
+
+Y y1(1);
+Y y2(g); // { dg-error "call to consteval function|not usable" }
+
+struct Y2 {
+ int y;
+ int x = id (y);
+ constexpr Y2 (auto i) : y (id (i)) {}
+};
+
+Y2 y3(1);
+Y2 y4(g); // { dg-error "call to consteval function|not usable" }
+
+auto l1 = [](int i) constexpr {
+ int t = id (i);
+ return id (0);
+};
+
+int (*pl1)(int) = l1; // { dg-error "call to consteval function|returns address of immediate function" }
+
+auto l2 = [](int i) {
+ int t = id (i);
+ return id (0);
+};
+
+int (*pl2)(int) = l2; // { dg-error "call to consteval function|returns address of immediate function" }
+
+// Not defined = won't produce a constant expression.
+consteval int undef (); // { dg-warning "used but never defined" }
+
+struct S {
+ int a = [] { return undef (); }();
+};
+
+struct S2 { // { dg-error "used before its definition" }
+ int a = [] (int u = undef ()) {
+ return u;
+ }();
+} s2; // { dg-error "call to consteval function" }
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop10.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop10.C
new file mode 100644
index 0000000..4e33e6e
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop10.C
@@ -0,0 +1,41 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// Test default arguments.
+
+consteval int id (int i) { return i; }
+
+template<typename>
+constexpr int
+f1 (int i = id (42))
+{
+ return i;
+}
+
+int non_const; // { dg-message ".int non_const. is not const" }
+
+template<typename>
+constexpr int
+f2 (int i = id (non_const))
+{
+ return i;
+}
+
+constexpr int
+f3 (auto)
+{
+ return f2<int>(); // { dg-message "contains an immediate-escalating expression .id\\(non_const\\)." }
+}
+
+auto a = &f3<int>; // { dg-error "taking address of an immediate function" }
+
+void
+g (int i)
+{
+ f1<int> (42);
+ f1<int> (i);
+ f1<int> ();
+ f2<int> (42);
+ f2<int> (i);
+ f2<int> (); // { dg-error "call to consteval function .id\\(non_const\\). is not a constant expression" }
+// { dg-error ".non_const. is not usable in a constant expression" "" { target *-*-* } .-1 }
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop11.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop11.C
new file mode 100644
index 0000000..aca9675
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop11.C
@@ -0,0 +1,49 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// { dg-options "-fdiagnostics-show-caret" }
+// Test diagnostic.
+
+consteval int id (int i) { return i; }
+constexpr int foo (int i ) { return i; }
+
+constexpr int
+foobar (auto i)
+{
+ return i + id (i);
+ /* { dg-begin-multiline-output "" }
+ return i + id (i);
+ ~~~^~~
+ { dg-end-multiline-output "" } */
+}
+
+void
+g (int x)
+{
+ foobar (x); // { dg-error "10:call to consteval function .foobar<int>\\(x\\). is not a constant expression" }
+// { dg-error ".x. is not a constant expression" "" { target *-*-* } .-1 }
+ /* { dg-begin-multiline-output "" }
+foobar (x);
+ ~~~~~~~^~~
+ { dg-end-multiline-output "" } */
+}
+
+constexpr int
+f2 (auto i)
+{
+ auto p = &id;
+ /* { dg-begin-multiline-output "" }
+ auto p = &id;
+ ^~~
+ { dg-end-multiline-output "" } */
+ return p (i);
+}
+
+void
+g2 (int x)
+{
+ f2 (x); // { dg-error "6:call to consteval function .f2<int>\\(x\\). is not a constant expression|not a constant expression" }
+ /* { dg-begin-multiline-output "" }
+f2 (x);
+ ~~~^~~
+ { dg-end-multiline-output "" } */
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop12.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop12.C
new file mode 100644
index 0000000..2949ab8
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop12.C
@@ -0,0 +1,30 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+
+consteval int
+zero (int)
+{
+ return 0;
+}
+
+constexpr int
+f (auto i)
+{
+ return zero (i);
+}
+
+constexpr int
+g (auto)
+{
+ // This call is a constant expression, so don't promote g.
+ return f (42);
+}
+
+void
+do_test ()
+{
+ g (2);
+}
+
+// Must work.
+auto q = &g<int>;
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop13.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop13.C
new file mode 100644
index 0000000..6c20b98
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop13.C
@@ -0,0 +1,23 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// Verify we don't recurse endlessly while determining whether a function
+// should be propagated to consteval.
+
+consteval int id (int i) { return i; }
+
+constexpr int f2 (auto);
+
+constexpr int
+f1 (auto i)
+{
+ return f2 (i);
+}
+
+constexpr int
+f2 (auto i)
+{
+ return f1 (i);
+}
+
+auto p = &f1<int>;
+auto q = &f2<int>;
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop14.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop14.C
new file mode 100644
index 0000000..cdc1f6d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop14.C
@@ -0,0 +1,78 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// Test more CALL_EXPRs in a function, some of which are escalating.
+
+consteval int id (int i) { return i; }
+constexpr int neg (int i) { return -i; }
+constexpr int foo (auto i) { return id (i); }
+
+constexpr int
+f1 (auto i)
+{
+ auto x = id (i); // { dg-message "promoted to an immediate function because its body contains an immediate-escalating expression .id\\(i\\)." }
+ auto y = neg (i);
+ return x + y;
+}
+
+constexpr int
+f2 (auto i)
+{
+ return neg (id (i)); // { dg-message "promoted to an immediate function because its body contains an immediate-escalating expression .id\\(i\\)." }
+}
+
+constexpr int
+f3 (auto i)
+{
+ auto x = i + neg (neg (neg (id (neg (neg (i)))))); // { dg-message "promoted to an immediate function because its body contains an immediate-escalating expression .id\\(neg\\(neg\\(i\\)\\)\\)." }
+ return x;
+}
+
+constexpr int
+f4 (auto i)
+{
+ return i + neg ((id (2 * i) + neg (i)) / 2); // { dg-message "promoted to an immediate function because its body contains an immediate-escalating expression .id\\(\\(i \\* 2\\)\\)." }
+}
+
+constexpr int
+f5 (auto i)
+{
+ (void) neg (i);
+ (void) neg (i);
+ (void) neg (i);
+ (void) neg (i);
+ (void) neg (i);
+ (void) neg (i);
+ (void) +id (i); // { dg-message "promoted to an immediate function because its body contains an immediate-escalating expression .id\\(i\\)." }
+ (void) neg (i);
+ return i;
+}
+
+constexpr int
+f6 (auto i)
+{
+ auto x = neg (i + foo (i)); // { dg-message "promoted to an immediate function because its body contains an immediate-escalating expression .foo<int>\\(i\\)." }
+ return x;
+}
+
+void
+g (int i)
+{
+ f1 (i); // { dg-error "call to consteval function .f1<int>\\(i\\). is not a constant expression" }
+// { dg-error ".i. is not a constant expression" "" { target *-*-* } .-1 }
+ f1 (42);
+ f2 (i); // { dg-error "call to consteval function .f2<int>\\(i\\). is not a constant expression" }
+// { dg-error ".i. is not a constant expression" "" { target *-*-* } .-1 }
+ f2 (42);
+ f3 (i); // { dg-error "call to consteval function .f3<int>\\(i\\). is not a constant expression" }
+// { dg-error ".i. is not a constant expression" "" { target *-*-* } .-1 }
+ f3 (42);
+ f4 (i); // { dg-error "call to consteval function .f4<int>\\(i\\). is not a constant expression" }
+// { dg-error ".i. is not a constant expression" "" { target *-*-* } .-1 }
+ f4 (42);
+ f5 (i); // { dg-error "call to consteval function .f5<int>\\(i\\). is not a constant expression" }
+// { dg-error ".i. is not a constant expression" "" { target *-*-* } .-1 }
+ f5 (42);
+ f6 (i); // { dg-error "call to consteval function .f6<int>\\(i\\). is not a constant expression" }
+// { dg-error ".i. is not a constant expression" "" { target *-*-* } .-1 }
+ f6 (42);
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop15.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop15.C
new file mode 100644
index 0000000..3341c51
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop15.C
@@ -0,0 +1,107 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// { dg-options "-Wno-c++23-extensions" }
+
+consteval int id (int i) { return i; }
+
+constexpr int
+f1 (auto i)
+{
+ auto p = &id; // { dg-message "promoted to an immediate function because its body contains an immediate-escalating expression .id." }
+ (void) p;
+ return i;
+}
+
+constexpr int
+f2 (auto i)
+{
+ return f1 (i);
+}
+
+constexpr int
+f3 (auto i)
+{
+ return f2 (i);
+}
+
+constexpr int
+f4 (auto i)
+{
+ return f3 (i);
+}
+
+constexpr int
+f5 (auto i)
+{
+ return f4 (i); // { dg-message "promoted to an immediate function because its body contains an immediate-escalating expression .f4<int>\\(i\\)." }
+}
+
+constexpr int
+f6 (auto)
+{
+ // This call is a constant expression, so don't promote f6.
+ return f4 (42);
+}
+
+constexpr int
+f7 (auto i)
+{
+ if consteval {
+ auto p = &id;
+ (void) p;
+ }
+ return i;
+}
+
+constexpr int
+f8 (auto i)
+{
+ if not consteval {
+ (void) 0;
+ } else {
+ auto p = &id;
+ (void) p;
+ }
+ return i;
+}
+
+constexpr int
+f9 (auto i)
+{
+ if consteval {
+ return id(i);
+ }
+ return i;
+}
+
+constexpr int
+f10 (auto i)
+{
+ if not consteval {
+ (void) 0;
+ } else {
+ return id(i);
+ }
+ return i;
+}
+
+void
+g (int non_const)
+{
+ f1 (42);
+ f1 (non_const); // { dg-error "call to consteval function .f1<int>\\(non_const\\). is not a constant expression" }
+// { dg-error ".non_const. is not a constant expression" "" { target *-*-* } .-1 }
+ f5 (42);
+ f5 (non_const); // { dg-error "call to consteval function .f5<int>\\(non_const\\). is not a constant expression" }
+// { dg-error ".non_const. is not a constant expression" "" { target *-*-* } .-1 }
+ f6 (42);
+ f6 (non_const);
+ f7 (42);
+ f7 (non_const);
+ f8 (42);
+ f8 (non_const);
+ f9 (42);
+ f9 (non_const);
+ f10 (42);
+ f10 (non_const);
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop16.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop16.C
new file mode 100644
index 0000000..7952d49
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop16.C
@@ -0,0 +1,73 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// Test unevaluated operands.
+
+consteval int id (int i) { return i; }
+
+constexpr int
+f1 (auto i)
+{
+ // Unevaluated operand -> don't promote.
+ auto p = sizeof (&id);
+ (void) p;
+ return i;
+}
+
+constexpr int
+f2 (auto i)
+{
+ // Unevaluated operand -> don't promote.
+ auto p = noexcept (id);
+ (void) p;
+ return i;
+}
+
+constexpr int
+f3 (auto i)
+{
+ // Unevaluated operand -> don't promote.
+ auto p = noexcept (id (i));
+ (void) p;
+ return i;
+}
+
+constexpr int
+f4 (auto i)
+{
+ // Unevaluated operand -> don't promote.
+ decltype(id) p;
+ (void) p;
+ return i;
+}
+
+constexpr int
+f5 (auto i)
+{
+ // Unevaluated operand -> don't promote.
+ __extension__ auto p = alignof (id (i));
+ (void) p;
+ return i;
+}
+
+constexpr int
+f6 (auto i) requires requires { id (i); }
+{
+ return i;
+}
+
+void
+g (int non_const)
+{
+ f1 (42);
+ f1 (non_const);
+ f2 (42);
+ f2 (non_const);
+ f3 (42);
+ f3 (non_const);
+ f4 (42);
+ f4 (non_const);
+ f5 (42);
+ f5 (non_const);
+ f6 (42);
+ f6 (non_const);
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop17.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop17.C
new file mode 100644
index 0000000..47ec9b6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop17.C
@@ -0,0 +1,17 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// { dg-options "-fno-immediate-escalation" }
+
+consteval int id(int i) { return i; }
+
+constexpr int
+f (auto i)
+{
+ return id (i); // { dg-error "not a constant expression" }
+}
+
+int
+g ()
+{
+ return f (42);
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop18.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop18.C
new file mode 100644
index 0000000..a18106f
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop18.C
@@ -0,0 +1,20 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+
+consteval int id(int i) { return i; }
+
+constexpr int
+f (auto t)
+{
+ return t + id (t);
+}
+
+constexpr int
+f2 (auto t)
+{
+ return t + f(t); // { dg-message "immediate-escalating expression .f<int>\\(t\\)." }
+}
+
+int z; // { dg-message "not const" }
+auto y1 = f2 (42);
+auto y2 = f2 (z); // { dg-error "value of .z. is not usable in a constant expression|call to consteval function" }
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop19.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop19.C
new file mode 100644
index 0000000..3ceb05e
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop19.C
@@ -0,0 +1,7 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+
+consteval int g(int p) { return p; }
+template<typename T> constexpr auto f(T) { return g; }
+int r = f(1)(2); // proposed ok
+int s = f(1)(2) + r; // { dg-error "call to consteval function|returns address of immediate function" }
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop2.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop2.C
new file mode 100644
index 0000000..30129a4
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop2.C
@@ -0,0 +1,90 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// Testcase from P2564R3.
+
+consteval int id(int i) { return i; }
+constexpr char id(char c) { return c; }
+
+template<class T>
+constexpr int f(T t) {
+ return t + id(t); // { dg-message "immediate-escalating expression .id\\(t\\)." }
+}
+
+auto a = &f<char>; // OK, f<char> is not an immediate function
+auto b = &f<int>; // { dg-error "taking address of an immediate function" }
+
+static_assert(f(3) == 6); // OK
+
+template<class T>
+constexpr int g(T t) { // g<int> is not an immediate function
+ return t + id(42); // because id(42) is already a constant
+}
+
+template<class T, class F>
+constexpr bool is_not(T t, F f) {
+ return not f(t);
+}
+
+consteval bool is_even(int i) { return i % 2 == 0; }
+
+static_assert(is_not(5, is_even)); // OK
+
+int x = 0;
+
+template<class T>
+constexpr T h(T t = id(x)) { // h<int> is not an immediate function
+ return t;
+}
+
+template<class T>
+constexpr T hh() { // hh<int> is an immediate function
+ return h<T>(); // { dg-error "the value of .x. is not usable in a constant expression" }
+// { dg-message "immediate-escalating expression .id\\(x\\)." "" { target *-*-* } .-1 }
+}
+
+int i = hh<int>(); // { dg-error "call to consteval function|called in a constant expression" }
+ // error: hh<int>() is an immediate-escalating expression
+ // outside of an immediate-escalating function
+struct A {
+ int x;
+ int y = id(x);
+};
+
+// [expr.const]#example-9 says:
+// k<int> is not an immediate function because A(42) is a
+// constant expression and thus not immediate-escalating
+// In the evaluation of A(42), the member x has just been initialized
+// to constant 42. And A(42) is constant-evaluated because "An aggregate
+// initialization is an immediate invocation if it evaluates a default
+// member initializer that has a subexpression that is an
+// immediate-escalating expression."
+template<class T>
+constexpr int k(int) {
+ return A(42).y;
+}
+
+int
+test (int i)
+{
+ int r = g (42) + g(i);
+ int t = k<int>(42)
+ + k<int>(i); // { dg-bogus "call to|constant" "" { xfail *-*-* } }
+ return r + t;
+}
+
+// Just like above, but make the call to id(x) actually a constant.
+struct A2 {
+ static constexpr int x = 42;
+ int y = id(x);
+};
+
+template<class T>
+constexpr int k2(int) {
+ return A2(42).y;
+}
+
+int
+test2 (int i)
+{
+ return k2<int>(42) + k2<int>(i);
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop20.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop20.C
new file mode 100644
index 0000000..f1bb08e
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop20.C
@@ -0,0 +1,21 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// { dg-options "-Wno-c++23-extensions" }
+
+consteval int id(int i) { return i; }
+
+constexpr int
+f (auto i)
+{
+ return id (i);
+}
+
+void
+g ()
+{
+ auto p = &f<int>; // { dg-error "taking address" }
+ decltype(&f<int>) x;
+ if consteval {
+ auto q = &f<int>;
+ }
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop3.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop3.C
new file mode 100644
index 0000000..f181cb3
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop3.C
@@ -0,0 +1,27 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// Cribbed from clang's cxx2b-consteval-propagate.cpp.
+
+consteval int id(int i) { return i; }
+
+template <typename T>
+constexpr int f(T t);
+
+auto a1 = &f<char>;
+auto b1 = &f<int>;
+
+template <typename T>
+constexpr int f(T t) {
+ return id(0);
+}
+
+template <typename T>
+constexpr int f2(T);
+
+auto a2 = &f2<char>; // { dg-error "taking address" }
+auto b2 = &f2<int>; // { dg-error "taking address" }
+
+template <typename T>
+constexpr int f2(T t) {
+ return id(t);
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop4.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop4.C
new file mode 100644
index 0000000..3a2e09b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop4.C
@@ -0,0 +1,30 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// From clang's cxx2b-consteval-propagate.cpp. This test ICEd when I worked on
+// P2564.
+
+consteval int f (int);
+
+struct S {
+ int a = 0;
+ int b = f (a);
+};
+
+constexpr bool
+g (auto i)
+{
+ S s{i};
+ return s.b == 2 *i;
+}
+
+consteval int
+f (int i)
+{
+ return 2 * i;
+}
+
+void
+test ()
+{
+ static_assert(g(42));
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop5.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop5.C
new file mode 100644
index 0000000..3bd1b9d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop5.C
@@ -0,0 +1,27 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+
+consteval int f (int i) { return i; }
+
+struct S {
+ int x = f(42);
+};
+
+constexpr S
+immediate (auto)
+{
+ return S{};
+}
+
+void
+g ()
+{
+ immediate (0);
+}
+
+consteval void
+test ()
+{
+ constexpr S s = immediate(0);
+ static_assert(s.x == 42);
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop6.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop6.C
new file mode 100644
index 0000000..93ed398
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop6.C
@@ -0,0 +1,59 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// From cxx2b-consteval-propagate.cpp.
+
+void side_effect();
+
+consteval int
+f (int x)
+{
+ if (!x)
+ side_effect(); // { dg-error "call to non-.constexpr. function" }
+ return x;
+}
+
+struct SS {
+ int y = f(1);
+ int x = f(0);
+ SS();
+};
+SS::SS(){} // { dg-error "call to consteval function" }
+
+consteval int
+f2 (int x)
+{
+ if (!__builtin_is_constant_evaluated ())
+ side_effect();
+ return x;
+}
+
+struct S2 {
+ int x = f2(0);
+ constexpr S2();
+};
+
+constexpr S2::S2(){}
+S2 s = {};
+constinit S2 s2 = {};
+
+struct S3 {
+ int x = f2(0);
+ S3();
+};
+S3::S3(){}
+
+consteval int undef (int x); // { dg-warning "never defined" }
+
+struct X {
+ int a = sizeof(undef(0));
+ int x = undef(0);
+
+ X() = default; // { dg-error "modification of .x. is not a constant expression" }
+};
+
+void
+test ()
+{
+ [[maybe_unused]] X x; // { dg-error "call to consteval function" }
+// { dg-message "promoted to an immediate function" "" { target *-*-* } .-1 }
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop7.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop7.C
new file mode 100644
index 0000000..118cf57
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop7.C
@@ -0,0 +1,76 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// The problem here was that while parsing, we first process calling
+// 'f' from 'g' but only when instantiating 'f<int>' do we promote 'f'
+// to consteval. When the var we're initializing is marked constexpr,
+// store_init_value detects the problem that we're calling a consteval
+// function with non-const argument.
+
+consteval int id(int i) { return i; }
+
+// Don't let the instantiations confuse us, e.g. instantiating a fn
+// prior to entering 'g'.
+template <typename T>
+constexpr int f1(T t) { return id (t); }
+
+template <typename T>
+constexpr int f2(T t) { return id (t); }
+
+template <typename T>
+constexpr int f3(T t) { return id (t); }
+
+template <typename T>
+constexpr int f4(T t) { return id (t); }
+
+template <typename T>
+constexpr int f5(T t) { return id (t); }
+
+template <typename T>
+constexpr int f6(T t) { return id (t); }
+
+template <typename T>
+constexpr int f7(T t) { return id (t); }
+
+template <typename T>
+constexpr int f8(T t) { return id (t); }
+
+template <typename T>
+constexpr int f9(T t) { return id (t); }
+
+template <typename T>
+constexpr int f10(T t) { return id (t); }
+
+template <typename T>
+constexpr int g1(T t) { auto p = id; return p (t); }
+
+int non_const;
+
+auto a1 = f1 (non_const); // { dg-error "call to consteval function|not usable" }
+constexpr auto a2 = f2 (non_const); // { dg-error "not a constant|not usable" }
+auto a3 = f3 (42);
+constexpr auto a4 = f4 (42);
+
+void
+g ()
+{
+ auto a5 = f5 (non_const); // { dg-error "not a constant|not usable" }
+ constexpr auto a6 = f6 (non_const); // { dg-error "not usable" }
+ auto a7 = f7 (42);
+ constexpr auto a8 = f8 (42);
+ (void) f9 (non_const); // { dg-error "not a constant|not usable" }
+ (void) f10 (42);
+ (void) g1 (non_const); // { dg-error "not a constant|not usable" }
+}
+
+struct S {
+ int y;
+ int x = id (y);
+ // Promoted to consteval.
+ template<typename T>
+ constexpr S(T t) : y (id (t)) {}
+};
+
+S s1(1);
+S s2(non_const); // { dg-error "call to consteval function|not usable" }
+constexpr S s3(1);
+constexpr S s4(non_const); // { dg-error "not usable" }
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop8.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop8.C
new file mode 100644
index 0000000..080fc76
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop8.C
@@ -0,0 +1,82 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+// { dg-options "-Wno-c++23-extensions" }
+
+consteval int zero (int)
+{
+ return 0;
+}
+
+struct A {
+ // A::A(auto) promoted to consteval.
+ constexpr A(auto i) { zero (i); }
+};
+
+// 'f1<int>' is an immediate function because its body contains a call to an
+// immediate constructor 'A<int>' and that call is not a constant expression
+constexpr void
+f1 (auto i)
+{
+ A a{i};
+}
+
+// 'f2<int>' is an immediate function because its body contains a call to an
+// immediate constructor 'A<int>' and that call is not a constant expression
+constexpr void
+f2 (auto i)
+{
+ A a{i};
+}
+
+void
+f3 (int i)
+{
+ A a{i}; // { dg-error "not a constant expression" }
+}
+
+inline void
+f7 (int i)
+{
+ A a{i}; // { dg-error "not a constant expression" }
+}
+
+constexpr void
+f8 (int i)
+{
+ A a{i}; // { dg-error "not a constant expression" }
+}
+
+/* "An expression or conversion is immediate-escalating if it is not initially
+ in an immediate function context" but this one is, so we do *not* promote
+ f4 to consteval. */
+constexpr void
+f4 (auto i)
+{
+ if consteval {
+ A a{i};
+ }
+}
+
+constexpr void
+f5 (auto i)
+{
+ if not consteval {
+ (void) 0;
+ } else {
+ A a{i};
+ }
+}
+
+void
+f6 (int x)
+{
+ f1 (0);
+ f1 (x); // { dg-error "not a constant expression" }
+ f2 (0);
+ f2 (x); // { dg-error "not a constant expression" }
+ f3 (0);
+ f4 (x);
+ f4 (0);
+ f5 (x);
+ f5 (0);
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval-prop9.C b/gcc/testsuite/g++.dg/cpp2a/consteval-prop9.C
new file mode 100644
index 0000000..9c4a233
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval-prop9.C
@@ -0,0 +1,67 @@
+// P2564R3
+// { dg-do compile { target c++20 } }
+
+consteval int
+zero (int)
+{
+ return 0;
+}
+
+constexpr int
+f1 (auto i)
+{
+ return zero (i);
+}
+
+constexpr int
+f2 (auto i)
+{
+ return f1 (i);
+}
+
+constexpr int
+f3 (auto i)
+{
+ return f2 (i);
+}
+
+constexpr int
+f4 (auto i)
+{
+ return f3 (i);
+}
+
+constexpr int
+f5 (auto i)
+{
+ return f4 (i);
+}
+
+constexpr int
+f6 (auto)
+{
+ // This call is a constant expression, so don't promote f6.
+ return f5 (42);
+}
+
+constexpr int
+f7 (auto)
+{
+ // This call is a constant expression, so don't promote f7.
+ return zero (42);
+}
+
+auto p1 = &f5<int>; // { dg-error "taking address" }
+static auto p2 = &f4<int>; // { dg-error "taking address" }
+auto p3 = &f6<int>;
+static auto p4 = &f6<int>;
+auto p5 = &f7<int>;
+static auto p6 = &f7<int>;
+
+void
+g ()
+{
+ static auto q1 = &f4<int>; // { dg-error "taking address" }
+ static auto q2 = &f6<int>;
+ static auto q3 = &f7<int>;
+}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval11.C b/gcc/testsuite/g++.dg/cpp2a/consteval11.C
index 05cecea..c2ee3c7 100644
--- a/gcc/testsuite/g++.dg/cpp2a/consteval11.C
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval11.C
@@ -8,9 +8,11 @@ constexpr int a = bar (1);
constexpr int b = bar (2); // { dg-message "in 'constexpr' expansion of" }
constexpr int c = 0 ? bar (3) : 1;
const int d = bar (4); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
const int e = 0 ? bar (5) : 1;
int f = bar (1);
int g = bar (6); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
int h = 0 ? bar (7) : 1;
void
@@ -20,25 +22,35 @@ foo ()
constexpr int b = bar (2); // { dg-message "in 'constexpr' expansion of" }
constexpr int c = 0 ? bar (3) : 1;
const int d = bar (4); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
const int e = 0 ? bar (5) : 1;
int f = bar (1);
int g = bar (6); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
int h = 0 ? bar (7) : 1; // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
h += 0 ? bar (8) : 1; // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
if (0)
bar (9); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
else
bar (10); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
if (1)
bar (11); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
else
bar (12); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
if constexpr (0)
bar (13);
else
bar (14); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
if constexpr (1)
bar (15); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
else
bar (16);
}
@@ -121,18 +133,24 @@ quux ()
{
if (0)
bar ((T) 2); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
else
bar ((T) 3); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
if (1)
bar ((T) 4); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
else
bar ((T) 5); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
if constexpr (0)
bar ((T) 6);
else
bar ((T) 7); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
if constexpr (1)
bar ((T) 8); // { dg-message "in 'constexpr' expansion of" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
else
bar ((T) 9);
}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval3.C b/gcc/testsuite/g++.dg/cpp2a/consteval3.C
index 9efac8c..1199e9d 100644
--- a/gcc/testsuite/g++.dg/cpp2a/consteval3.C
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval3.C
@@ -16,8 +16,8 @@ consteval auto [ b, c ] = S (); // { dg-error "structured binding declaration c
int f5 (consteval int x) { return x; } // { dg-error "a parameter cannot be declared 'consteval'" }
consteval int f6 (int x) { return x; }
int d = 6; // { dg-message "'int d' is not const" }
-int e = f6 (d); // { dg-error "the value of 'd' is not usable in a constant expression" }
-constexpr int f7 (int x) { return f6 (x); } // { dg-error "'x' is not a constant expression" }
+int e = f6 (d); // { dg-error "the value of 'd' is not usable in a constant expression|call to consteval function" }
+constexpr int f7 (int x) { return f6 (x); } // { dg-error "'x' is not a constant expression|call to consteval function" }
constexpr int f = f7 (5);
using fnptr = int (int);
fnptr *g = f6; // { dg-error "taking address of an immediate function 'consteval int f6\\(int\\)'" }
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval34.C b/gcc/testsuite/g++.dg/cpp2a/consteval34.C
index 068827b..7562f40 100644
--- a/gcc/testsuite/g++.dg/cpp2a/consteval34.C
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval34.C
@@ -7,6 +7,7 @@ constexpr int
foo (bool b)
{
return b ? bar (3) : 2; // { dg-message "in .constexpr. expansion" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
}
static_assert (foo (false) == 2);
@@ -22,13 +23,20 @@ void
g ()
{
__extension__ int a1[bar(3)]; // { dg-message "in .constexpr. expansion" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
int a2[sizeof (bar(3))];
int a3 = false ? (1 + bar (8)) : 1; // { dg-message "in .constexpr. expansion" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
a3 += false ? (1 + bar (8)) : 1; // { dg-message "in .constexpr. expansion" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
__extension__ int a4 = false ?: (1 + bar (8)); // { dg-message "in .constexpr. expansion" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
__extension__ int a5 = true ?: (1 + bar (8)); // { dg-message "in .constexpr. expansion" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
int a6 = bar (2) ? 1 : 2; // { dg-message "in .constexpr. expansion" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
int a7 = bar (2) - 1 ? 1 : 2; // { dg-message "in .constexpr. expansion" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval36.C b/gcc/testsuite/g++.dg/cpp2a/consteval36.C
index 9c470e4..8e27f2e 100644
--- a/gcc/testsuite/g++.dg/cpp2a/consteval36.C
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval36.C
@@ -6,17 +6,17 @@ consteval int id (int i) { return i; }
void
g (int i)
{
- 1 ? 1 : ((1 ? 1 : 1), id (i)); // { dg-error "'i' is not a constant expression" }
- 1 ? 1 : ((1 ? 1 : 1), id (i), 1); // { dg-error "'i' is not a constant expression" }
- 1 ? 1 : ((i ? 1 : 1), id (i), 1); // { dg-error "'i' is not a constant expression" }
- 1 ? 1 : ((1 ? i : 1), id (i), 1); // { dg-error "'i' is not a constant expression" }
- 1 ? 1 : ((1 ? 1 : i), id (i), 1); // { dg-error "'i' is not a constant expression" }
- 1 ? 1 : ((i ? -i : i), id (i), 1); // { dg-error "'i' is not a constant expression" }
- 1 ? 1 : ((1 ? 1 : id (i)), id (42), 1); // { dg-error "'i' is not a constant expression" }
- 1 ? 1 : ((1 ? 1 : id (42)), id (i)); // { dg-error "'i' is not a constant expression" }
- 1 ? 1 : ((1 ? 1 : id (42)), id (i), 1); // { dg-error "'i' is not a constant expression" }
- id (i) ? 1 : ((1 ? 1 : 1), id (i)); // { dg-error "'i' is not a constant expression" }
- 1 ? 1 : ((1 ? 1 : id (i)), id (i)); // { dg-error "'i' is not a constant expression" }
- 1 ? id (i) : ((1 ? 1 : id (i)), id (i)); // { dg-error "'i' is not a constant expression" }
- 1 ? 1 : ((id (i) ? 1 : 1), id (i)); // { dg-error "'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : 1), id (i)); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : 1), id (i), 1); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ 1 ? 1 : ((i ? 1 : 1), id (i), 1); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? i : 1), id (i), 1); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : i), id (i), 1); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ 1 ? 1 : ((i ? -i : i), id (i), 1); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : id (i)), id (42), 1); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : id (42)), id (i)); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : id (42)), id (i), 1); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ id (i) ? 1 : ((1 ? 1 : 1), id (i)); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ 1 ? 1 : ((1 ? 1 : id (i)), id (i)); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ 1 ? id (i) : ((1 ? 1 : id (i)), id (i)); // { dg-error "call to consteval function|'i' is not a constant expression" }
+ 1 ? 1 : ((id (i) ? 1 : 1), id (i)); // { dg-error "call to consteval function|'i' is not a constant expression" }
}
diff --git a/gcc/testsuite/g++.dg/cpp2a/consteval9.C b/gcc/testsuite/g++.dg/cpp2a/consteval9.C
index 051a3d4..ad882d5 100644
--- a/gcc/testsuite/g++.dg/cpp2a/consteval9.C
+++ b/gcc/testsuite/g++.dg/cpp2a/consteval9.C
@@ -14,6 +14,7 @@ template <int N>
void qux ()
{
int a = bar (N); // { dg-message "in 'constexpr' expansion of 'bar\\(2\\)'" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
}
// This function is not instantiated so NDR.
@@ -31,3 +32,4 @@ baz ()
}
int a = bar (2); // { dg-message "in 'constexpr' expansion of 'bar\\(2\\)'" }
+// { dg-error "call to consteval function" "" { target *-*-* } .-1 }
diff --git a/gcc/testsuite/g++.dg/cpp2a/constexpr-ref1.C b/gcc/testsuite/g++.dg/cpp2a/constexpr-ref1.C
new file mode 100644
index 0000000..2ea865f
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/constexpr-ref1.C
@@ -0,0 +1,54 @@
+// P2280R4 - Using unknown pointers and references in constant expressions
+// PR c++/106650
+// { dg-do compile { target c++20 } }
+
+#include <typeinfo>
+
+using size_t = decltype(sizeof(42));
+
+template <typename T, size_t N>
+constexpr size_t array_size(T (&)[N]) {
+ return N;
+}
+
+void use_array(int const (&gold_medal_mel)[2]) {
+ constexpr auto gold = array_size(gold_medal_mel); // OK
+}
+
+constexpr auto olympic_mile() {
+ const int ledecky = 1500;
+ return []{ return ledecky; };
+}
+static_assert(olympic_mile()() == 1500); // OK
+
+struct Swim {
+ constexpr int phelps() { return 28; }
+ virtual constexpr int lochte() { return 12; }
+ int coughlin = 12;
+};
+
+constexpr int how_many(Swim& swam) {
+ Swim* p = &swam;
+ return (p + 1 - 1)->phelps();
+}
+
+void splash(Swim& swam) {
+ static_assert(swam.phelps() == 28); // OK
+ static_assert((&swam)->phelps() == 28); // OK
+
+ Swim* pswam = &swam;
+ static_assert(pswam->phelps() == 28); // { dg-error "non-constant|not usable" }
+
+ static_assert(how_many(swam) == 28); // OK
+ static_assert(Swim().lochte() == 12); // OK
+
+ static_assert(swam.lochte() == 12); // { dg-error "non-constant|not a constant" }
+
+ static_assert(swam.coughlin == 12); // { dg-error "non-constant|not a constant" }
+}
+
+extern Swim dc;
+extern Swim& trident;
+
+constexpr auto& sandeno = typeid(dc); // OK, can only be typeid(Swim)
+constexpr auto& gallagher = typeid(trident); // { dg-error "not a constant" }
diff --git a/gcc/testsuite/g++.dg/cpp2a/feat-cxx2a.C b/gcc/testsuite/g++.dg/cpp2a/feat-cxx2a.C
index 16bc0b8..fc268d4 100644
--- a/gcc/testsuite/g++.dg/cpp2a/feat-cxx2a.C
+++ b/gcc/testsuite/g++.dg/cpp2a/feat-cxx2a.C
@@ -480,8 +480,8 @@
#ifndef __cpp_consteval
# error "__cpp_consteval"
-#elif __cpp_consteval != 201811
-# error "__cpp_consteval != 201811"
+#elif __cpp_consteval != 202211L
+# error "__cpp_consteval != 202211L"
#endif
#ifndef __cpp_concepts
diff --git a/gcc/testsuite/g++.dg/cpp2a/nontype-class4.C b/gcc/testsuite/g++.dg/cpp2a/nontype-class4.C
index 6235fc8..5dd4b03 100644
--- a/gcc/testsuite/g++.dg/cpp2a/nontype-class4.C
+++ b/gcc/testsuite/g++.dg/cpp2a/nontype-class4.C
@@ -1,11 +1,12 @@
// { dg-do compile { target c++20 } }
+// { dg-additional-options "-fabi-version=18 -fabi-compat-version=18 -Wabi=0" }
template <class T>
struct A {
constexpr A(T) {}
// auto operator<=> (const A&) = default;
};
-template <A a> void f();
+template <A a> void f(); // { dg-warning "mangled name" }
int main()
{
diff --git a/gcc/testsuite/g++.dg/cpp2a/nontype-class4a.C b/gcc/testsuite/g++.dg/cpp2a/nontype-class4a.C
new file mode 100644
index 0000000..717876d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp2a/nontype-class4a.C
@@ -0,0 +1,18 @@
+// { dg-do compile { target c++20 } }
+// { dg-additional-options "-fabi-version=0 -fabi-compat-version=0 -Wabi=18" }
+
+template <class T>
+struct A {
+ constexpr A(T) {}
+ // auto operator<=> (const A&) = default;
+};
+template <A a> void f(); // { dg-warning "mangled name" }
+
+int main()
+{
+ constexpr A a = 1;
+ f<a>();
+ f<1>();
+}
+
+// { dg-final { scan-assembler "_Z1fITn1AXtlS0_IiEEEEvv" } }
diff --git a/gcc/testsuite/g++.dg/cpp2a/spaceship-synth9.C b/gcc/testsuite/g++.dg/cpp2a/spaceship-synth9.C
index 33b547d..ecb46b0 100644
--- a/gcc/testsuite/g++.dg/cpp2a/spaceship-synth9.C
+++ b/gcc/testsuite/g++.dg/cpp2a/spaceship-synth9.C
@@ -22,6 +22,6 @@ struct Z: Y<int>
int main()
{
X<char>() == X<char>(); // { dg-error "no match" }
- X<int> x; x == x; // { dg-error "x' is not usable in a constant expression" }
+ X<int> x; x == x; // { dg-error "x' is not usable in a constant expression|call to consteval function" }
Y<int>() == Y<int>(); // { dg-warning "nodiscard" }
}
diff --git a/gcc/testsuite/g++.dg/ext/complit17.C b/gcc/testsuite/g++.dg/ext/complit17.C
new file mode 100644
index 0000000..acc8084
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/complit17.C
@@ -0,0 +1,4 @@
+// PR c++/53220
+// { dg-options "" }
+
+int* f() { return (int[]){42}; } // { dg-warning "pointer to temporary" }
diff --git a/gcc/testsuite/g++.dg/ext/has-feature.C b/gcc/testsuite/g++.dg/ext/has-feature.C
index 52191b7..bcfe824 100644
--- a/gcc/testsuite/g++.dg/ext/has-feature.C
+++ b/gcc/testsuite/g++.dg/ext/has-feature.C
@@ -5,7 +5,11 @@
#define CXX11 (__cplusplus >= 201103L)
#define CXX14 (__cplusplus >= 201402L)
-#if !FEAT(cxx_exceptions) || !FEAT(cxx_rtti)
+#if FEAT(cxx_exceptions) != !!__cpp_exceptions
+#error
+#endif
+
+#if FEAT(cxx_rtti) != !!__cpp_rtti
#error
#endif
diff --git a/gcc/testsuite/g++.dg/ext/unroll-2.C b/gcc/testsuite/g++.dg/ext/unroll-2.C
index f9ec892..dfbe4ef 100644
--- a/gcc/testsuite/g++.dg/ext/unroll-2.C
+++ b/gcc/testsuite/g++.dg/ext/unroll-2.C
@@ -1,6 +1,5 @@
-// { dg-do compile }
+// { dg-do compile { target c++11 } }
// { dg-options "-O2 -fdump-tree-cunrolli-details" }
-// { dg-skip-if "range for" { *-*-* } { "-std=gnu++98" } { "" } }
void
foo (int (&a)[8], int *b, int *c)
diff --git a/gcc/testsuite/g++.dg/ext/unroll-3.C b/gcc/testsuite/g++.dg/ext/unroll-3.C
index dda94c5..007a5b2 100644
--- a/gcc/testsuite/g++.dg/ext/unroll-3.C
+++ b/gcc/testsuite/g++.dg/ext/unroll-3.C
@@ -1,6 +1,5 @@
-// { dg-do compile }
+// { dg-do compile { target c++11 } }
// { dg-options "-O2 -fdump-tree-cunrolli-details" }
-// { dg-skip-if "range for" { *-*-* } { "-std=gnu++98" } { "" } }
template <typename T>
void
diff --git a/gcc/testsuite/g++.dg/ext/unroll-5.C b/gcc/testsuite/g++.dg/ext/unroll-5.C
new file mode 100644
index 0000000..aa19192
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/unroll-5.C
@@ -0,0 +1,36 @@
+// PR c++/112795
+// { dg-do compile { target c++11 } }
+// { dg-options "-O2 -fdump-tree-cunrolli-details" }
+
+void baz (int);
+constexpr int n = 3;
+
+template <int N>
+void
+foo ()
+{
+#pragma GCC unroll(n)
+ for (int i = 0; i != n; ++i)
+ baz (i);
+}
+
+template <int N>
+void
+bar ()
+{
+#pragma GCC unroll(N)
+ for (int i = 0; i != N; ++i)
+ baz (i);
+}
+
+void
+qux ()
+{
+ foo <2> ();
+ bar <6> ();
+ bar <10> ();
+}
+
+// { dg-final { scan-tree-dump "loop with 3 iterations completely unrolled" "cunrolli" } }
+// { dg-final { scan-tree-dump "loop with 6 iterations completely unrolled" "cunrolli" } }
+// { dg-final { scan-tree-dump "loop with 10 iterations completely unrolled" "cunrolli" } }
diff --git a/gcc/testsuite/g++.dg/ext/unroll-6.C b/gcc/testsuite/g++.dg/ext/unroll-6.C
new file mode 100644
index 0000000..e81f47d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/unroll-6.C
@@ -0,0 +1,85 @@
+// PR c++/112795
+// { dg-do compile { target c++11 } }
+
+void
+foo ()
+{
+ #pragma GCC unroll 1.0f // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll 0xffffffffffffffffULL // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll -42 // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+}
+
+template <int N>
+void
+bar ()
+{
+ #pragma GCC unroll 1.0f // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll 0xffffffffffffffffULL // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll -42 // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+}
+
+template <typename T, int N>
+void
+baz ()
+{
+ #pragma GCC unroll (N + 1.0f) // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll (N + 0xffffffffffffffffULL)
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll (N - 42)
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll ((T) 1.0f)
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll ((T) 0xffffffffffffffffULL)
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll ((T) -42)
+ for (int i = 0; i < 2; ++i)
+ ;
+}
+
+template <typename T, int N>
+void
+qux ()
+{
+ #pragma GCC unroll (N + 1.0f) // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll (N + 0xffffffffffffffffULL)// { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll (N - 42) // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll ((T) 1.0f) // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll ((T) 0xffffffffffffffffULL)// { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+ #pragma GCC unroll ((T) -42) // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (int i = 0; i < 2; ++i)
+ ;
+}
+
+void
+corge ()
+{
+ qux <float, 0> ();
+}
diff --git a/gcc/testsuite/g++.dg/ext/unroll-7.C b/gcc/testsuite/g++.dg/ext/unroll-7.C
new file mode 100644
index 0000000..d063010
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/unroll-7.C
@@ -0,0 +1,45 @@
+// PR c++/112795
+// { dg-do compile { target c++11 } }
+// { dg-options "-O2 -fdump-tree-cunrolli-details" }
+
+void baz (int);
+constexpr int n = 3;
+constexpr int m = 7;
+
+template <typename T>
+void
+foo (int (&a)[3], T b)
+{
+#pragma GCC unroll(n)
+ for (auto i : a)
+ baz (i);
+#pragma GCC unroll(m)
+ for (auto i : b)
+ baz (i);
+}
+
+template <int N>
+void
+bar (int (&a)[N])
+{
+#pragma GCC unroll(N)
+ for (auto i : a)
+ baz (i);
+}
+
+void
+qux ()
+{
+ int a[3] = { 1, 2, 3 };
+ int b[7] = { 4, 5, 6, 7, 8, 9, 10 };
+ int c[6] = { 11, 12, 13, 14, 15, 16 };
+ int d[10] = { 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 };
+ foo <int (&)[7]> (a, b);
+ bar <6> (c);
+ bar <10> (d);
+}
+
+// { dg-final { scan-tree-dump "loop with 3 iterations completely unrolled" "cunrolli" } }
+// { dg-final { scan-tree-dump "loop with 6 iterations completely unrolled" "cunrolli" } }
+// { dg-final { scan-tree-dump "loop with 7 iterations completely unrolled" "cunrolli" } }
+// { dg-final { scan-tree-dump "loop with 10 iterations completely unrolled" "cunrolli" } }
diff --git a/gcc/testsuite/g++.dg/ext/unroll-8.C b/gcc/testsuite/g++.dg/ext/unroll-8.C
new file mode 100644
index 0000000..935ada2
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/unroll-8.C
@@ -0,0 +1,86 @@
+// PR c++/112795
+// { dg-do compile { target c++11 } }
+
+void
+foo (int (&a)[3])
+{
+ #pragma GCC unroll 1.0f // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+ #pragma GCC unroll 0xffffffffffffffffULL // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+ #pragma GCC unroll -42 // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+}
+
+template <int N, typename U>
+void
+bar (U a)
+{
+ #pragma GCC unroll 1.0f // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+ #pragma GCC unroll 0xffffffffffffffffULL // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+ #pragma GCC unroll -42 // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+}
+
+template <typename T, int N, typename U>
+void
+baz (U a)
+{
+ #pragma GCC unroll (N + 1.0f) // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+ #pragma GCC unroll (N + 0xffffffffffffffffULL)
+ for (auto i : a)
+ ;
+ #pragma GCC unroll (N - 42)
+ for (auto i : a)
+ ;
+ #pragma GCC unroll ((T) 1.0f)
+ for (auto i : a)
+ ;
+ #pragma GCC unroll ((T) 0xffffffffffffffffULL)
+ for (auto i : a)
+ ;
+ #pragma GCC unroll ((T) -42)
+ for (auto i : a)
+ ;
+}
+
+template <typename T, int N, typename U>
+void
+qux (U a)
+{
+ #pragma GCC unroll (N + 1.0f) // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+ #pragma GCC unroll (N + 0xffffffffffffffffULL)// { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+ #pragma GCC unroll (N - 42) // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+ #pragma GCC unroll ((T) 1.0f) // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+ #pragma GCC unroll ((T) 0xffffffffffffffffULL)// { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+ #pragma GCC unroll ((T) -42) // { dg-error "'#pragma GCC unroll' requires an assignment-expression that evaluates to a non-negative integral constant less than" }
+ for (auto i : a)
+ ;
+}
+
+void
+corge ()
+{
+ int a[3] = { 1, 2, 3 };
+ qux <float, 0, int (&)[3]> (a);
+}
diff --git a/gcc/testsuite/g++.dg/gomp/attrs-11.C b/gcc/testsuite/g++.dg/gomp/attrs-11.C
index 44e025e..6cc02d4 100644
--- a/gcc/testsuite/g++.dg/gomp/attrs-11.C
+++ b/gcc/testsuite/g++.dg/gomp/attrs-11.C
@@ -7,9 +7,9 @@ namespace O { typedef int T; };
void
foo ()
{
- [[omp::directive (parallel)]] asm (""); // { dg-error "expected" }
+ [[omp::directive (parallel)]] asm ("");
[[omp::directive (parallel)]] __extension__ asm (""); // { dg-error "expected" }
- __extension__ [[omp::directive (parallel)]] asm (""); // { dg-error "expected" }
+ __extension__ [[omp::directive (parallel)]] asm ("");
[[omp::directive (parallel)]] namespace M = ::N; // { dg-error "expected" }
[[omp::directive (parallel)]] using namespace N; // { dg-error "not allowed to be specified in this context" }
[[omp::directive (parallel)]] using O::T; // { dg-error "expected" }
diff --git a/gcc/testsuite/g++.dg/lookup/scoped11.C b/gcc/testsuite/g++.dg/lookup/scoped11.C
new file mode 100644
index 0000000..be74352
--- /dev/null
+++ b/gcc/testsuite/g++.dg/lookup/scoped11.C
@@ -0,0 +1,14 @@
+// PR c++/112744
+// { dg-do compile }
+
+struct A { const static int a = 0; };
+struct B : A {};
+struct C : A {};
+struct D : B, C {};
+
+int main()
+{
+ D d;
+ (void) d.a;
+ (void) d.A::a;
+}
diff --git a/gcc/testsuite/g++.dg/lookup/scoped12.C b/gcc/testsuite/g++.dg/lookup/scoped12.C
new file mode 100644
index 0000000..ffa1455
--- /dev/null
+++ b/gcc/testsuite/g++.dg/lookup/scoped12.C
@@ -0,0 +1,14 @@
+// PR c++/112744
+// { dg-do compile }
+
+class A { const static int a = 0; };
+struct B : A {};
+struct C : A {};
+struct D : B, C {};
+
+int main()
+{
+ D d;
+ (void) d.a; // { dg-error "private" }
+ (void) d.A::a; // { dg-error "private" }
+}
diff --git a/gcc/testsuite/g++.dg/lookup/scoped13.C b/gcc/testsuite/g++.dg/lookup/scoped13.C
new file mode 100644
index 0000000..970e1aa
--- /dev/null
+++ b/gcc/testsuite/g++.dg/lookup/scoped13.C
@@ -0,0 +1,14 @@
+// PR c++/112744
+// { dg-do compile }
+
+struct A { const static int a = 0; };
+struct B : A {};
+struct C : A {};
+struct D : B, C {};
+
+int main()
+{
+ D d;
+ (void) d.x; // { dg-error ".struct D. has no member named .x." }
+ (void) d.A::x; // { dg-error ".struct A. has no member named .x." }
+}
diff --git a/gcc/testsuite/g++.dg/lookup/scoped14.C b/gcc/testsuite/g++.dg/lookup/scoped14.C
new file mode 100644
index 0000000..141aa0d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/lookup/scoped14.C
@@ -0,0 +1,14 @@
+// PR c++/112744
+// { dg-do compile { target c++11 } }
+
+struct A { int a = 0; };
+struct B : A {};
+struct C : A {};
+struct D : B, C {};
+
+int main()
+{
+ D d;
+ (void) d.a; // { dg-error "request for member .a. is ambiguous" }
+ (void) d.A::a; // { dg-error ".A. is an ambiguous base of .D." }
+}
diff --git a/gcc/testsuite/g++.dg/lookup/scoped15.C b/gcc/testsuite/g++.dg/lookup/scoped15.C
new file mode 100644
index 0000000..2cc4eb5
--- /dev/null
+++ b/gcc/testsuite/g++.dg/lookup/scoped15.C
@@ -0,0 +1,21 @@
+// PR c++/112744
+// { dg-do compile { target c++11 } }
+
+struct A { constexpr static int a = 0; };
+struct D : private A {};
+
+// The injected-class-name of A is private when named in D, but if A is named
+// some other way, there is no requirement in [class.access.base] for static data
+// members that it be an accessible base.
+
+void f() {
+ D{}.A::a; // { dg-error "inaccessible" }
+ D{}.::A::a;
+}
+
+template<class T>
+void g() {
+ D{}.T::a;
+}
+
+template void g<A>();
diff --git a/gcc/testsuite/g++.dg/opt/devirt2.C b/gcc/testsuite/g++.dg/opt/devirt2.C
index cf4842b..d71bdaa 100644
--- a/gcc/testsuite/g++.dg/opt/devirt2.C
+++ b/gcc/testsuite/g++.dg/opt/devirt2.C
@@ -17,9 +17,7 @@
// { dg-final { scan-assembler-times "jsr\[^\n\]*xyzzy" 2 { target alpha*-*-* } } }
// Unless the assembler supports -relax, the 32-bit SPARC compiler generates
// sethi/jmp instead of just call, so the scans need to be more specific.
-// With subexpressions, Tcl regexp -inline -all returns both the complete
-// match and the subexpressions, so double the count.
-// { dg-final { scan-assembler-times "\(jmp|call\)\[^\n\]*xyzzy" 4 { target sparc*-*-* } } }
+// { dg-final { scan-assembler-times "\(jmp|call\)\[^\n\]*xyzzy" 2 { target sparc*-*-* } } }
struct S { S(); virtual void xyzzy(); };
struct R { int a; S s; R(); };
diff --git a/gcc/testsuite/g++.dg/opt/pr111601.C b/gcc/testsuite/g++.dg/opt/pr111601.C
new file mode 100644
index 0000000..a5019e9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/opt/pr111601.C
@@ -0,0 +1,86 @@
+// PR bootstrap/111601
+// { dg-do run { target c++11 } }
+// { dg-options "-O2 -fno-exceptions -fno-rtti -fprofile-generate" }
+// { dg-require-profiling "-fprofile-generate" }
+// { dg-final { cleanup-coverage-files } }
+
+struct tree_base
+{
+ int code:16;
+};
+struct saved_scope
+{
+ void *pad[14];
+ int x_processing_template_decl;
+};
+struct saved_scope *scope_chain;
+struct z_candidate
+{
+ tree_base *fn;
+ void *pad[11];
+ z_candidate *next;
+ int viable;
+ int flags;
+};
+
+__attribute__((noipa)) struct z_candidate *
+splice_viable (struct z_candidate *cands, bool strict_p, bool *any_viable_p)
+{
+ struct z_candidate *viable;
+ struct z_candidate **last_viable;
+ struct z_candidate **cand;
+ bool found_strictly_viable = false;
+ if (scope_chain->x_processing_template_decl)
+ strict_p = true;
+ viable = (z_candidate *) 0;
+ last_viable = &viable;
+ *any_viable_p = false;
+ cand = &cands;
+ while (*cand)
+ {
+ struct z_candidate *c = *cand;
+ if (!strict_p && (c->viable == 1 || ((int) (c->fn)->code) == 273))
+ {
+ strict_p = true;
+ if (viable && !found_strictly_viable)
+ {
+ *any_viable_p = false;
+ *last_viable = cands;
+ cands = viable;
+ viable = (z_candidate *) 0;
+ last_viable = &viable;
+ }
+ }
+ if (strict_p ? c->viable == 1 : c->viable)
+ {
+ *last_viable = c;
+ *cand = c->next;
+ c->next = (z_candidate *) 0;
+ last_viable = &c->next;
+ *any_viable_p = true;
+ if (c->viable == 1)
+ found_strictly_viable = true;
+ }
+ else
+ cand = &c->next;
+ }
+ return viable ? viable : cands;
+}
+
+int
+main ()
+{
+ saved_scope s{};
+ scope_chain = &s;
+ z_candidate z[4] = {};
+ z[0].next = &z[1];
+ z[1].viable = 1;
+ z[1].next = &z[2];
+ z[2].viable = 1;
+ z[2].next = &z[3];
+ bool b;
+ z_candidate *c = splice_viable (&z[0], true, &b);
+ if (c != &z[1] || z[1].next != &z[2] || z[2].next)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/strub-run1.C b/gcc/testsuite/g++.dg/strub-run1.C
new file mode 100644
index 0000000..0d367fb
--- /dev/null
+++ b/gcc/testsuite/g++.dg/strub-run1.C
@@ -0,0 +1,19 @@
+// { dg-do run }
+// { dg-options "-fstrub=internal" }
+
+// Check that we don't get extra copies.
+
+struct T {
+ T &self;
+ void check () const { if (&self != this) __builtin_abort (); }
+ T() : self (*this) { check (); }
+ T(const T& ck) : self (*this) { ck.check (); check (); }
+ ~T() { check (); }
+};
+
+T foo (T q) { q.check (); return T(); }
+T bar (T p) { p.check (); return foo (p); }
+
+int main () {
+ bar (T()).check ();
+}
diff --git a/gcc/testsuite/g++.dg/template/partial-order4.C b/gcc/testsuite/g++.dg/template/partial-order4.C
new file mode 100644
index 0000000..89555ab
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/partial-order4.C
@@ -0,0 +1,17 @@
+// DR 532
+// PR c++/53499
+// [temp.func.order] says that we do ordering on the first parameter.
+
+struct A
+{
+ template <class T>
+ bool operator==(T);
+};
+
+template <class T, class U>
+bool operator==(T, U);
+
+int main()
+{
+ A() == A();
+}
diff --git a/gcc/testsuite/g++.dg/template/spec26.C b/gcc/testsuite/g++.dg/template/spec26.C
index fad8e3e..253d421 100644
--- a/gcc/testsuite/g++.dg/template/spec26.C
+++ b/gcc/testsuite/g++.dg/template/spec26.C
@@ -1,13 +1,15 @@
-// { dg-do run }
+// { dg-do compile { target c++11 } }
// Copyright (C) 2005 Free Software Foundation, Inc.
// Contributed by Nathan Sidwell 16 Sep 2005 <nathan@codesourcery.com>
// PR 23519 template specialization ordering (DR214)
// Origin: Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
+// DR532 clarified that the * expression is ambiguous.
+
struct A
{
- template<class T> int operator+(T&) { return 1;}
+ template<class T> int operator+(T&) = delete;
};
template<class T> struct B
@@ -16,7 +18,7 @@ template<class T> struct B
template<typename R> int operator*(R&) {return 3;}
};
-template <typename T, typename R> int operator-(B<T>, R&) {return 4;}
+template <typename T, typename R> int operator-(B<T>, R&) = delete;
template<class T> int operator+(A&, B<T>&) { return 5;}
template <typename T> int operator*(T &, A&){return 6;}
@@ -30,6 +32,6 @@ int main()
if ((b - a) != 2)
return 2;
- if ((b * a) != 6)
+ if ((b * a) != 6) // { dg-error "ambiguous" }
return 3;
}
diff --git a/gcc/testsuite/g++.dg/torture/strub-init1.C b/gcc/testsuite/g++.dg/torture/strub-init1.C
new file mode 100644
index 0000000..c226ab1
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/strub-init1.C
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+extern int __attribute__((__strub__)) initializer ();
+
+int f() {
+ static int x = initializer ();
+ return x;
+}
+
+/* { dg-final { scan-ipa-dump "strub_enter" "strub" } } */
+/* { dg-final { scan-ipa-dump "strub_leave" "strub" } } */
+/* { dg-final { scan-ipa-dump-not "strub_update" "strub" } } */
diff --git a/gcc/testsuite/g++.dg/torture/strub-init2.C b/gcc/testsuite/g++.dg/torture/strub-init2.C
new file mode 100644
index 0000000..a7911f1
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/strub-init2.C
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+extern int __attribute__((__strub__)) initializer ();
+
+static int x = initializer ();
+
+int f() {
+ return x;
+}
+
+/* { dg-final { scan-ipa-dump "strub_enter" "strub" } } */
+/* { dg-final { scan-ipa-dump "strub_leave" "strub" } } */
+/* { dg-final { scan-ipa-dump-not "strub_update" "strub" } } */
diff --git a/gcc/testsuite/g++.dg/torture/strub-init3.C b/gcc/testsuite/g++.dg/torture/strub-init3.C
new file mode 100644
index 0000000..6ebebcd
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/strub-init3.C
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrub=strict -fdump-ipa-strub" } */
+
+extern int __attribute__((__strub__)) initializer ();
+
+int f() {
+ int x = initializer ();
+ return x;
+}
+
+/* { dg-final { scan-ipa-dump "strub_enter" "strub" } } */
+/* { dg-final { scan-ipa-dump "strub_leave" "strub" } } */
+/* { dg-final { scan-ipa-dump-not "strub_update" "strub" } } */
diff --git a/gcc/testsuite/g++.dg/torture/uninit-pr112766.C b/gcc/testsuite/g++.dg/torture/uninit-pr112766.C
new file mode 100644
index 0000000..028056e
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/uninit-pr112766.C
@@ -0,0 +1,17 @@
+// { dg-do compile }
+// { dg-additional-options "-Wuninitialized" }
+
+void* operator new[](__SIZE_TYPE__, void* __p) ;
+
+class Result
+{
+public:
+ Result();
+ ~Result();
+};
+
+void *foo(long nElements, void *p)
+{
+ return p ? new((int*)p) Result[nElements] : new Result[nElements]; // { dg-bogus "uninitialized" }
+}
+
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr112711.C b/gcc/testsuite/g++.dg/tree-ssa/pr112711.C
new file mode 100644
index 0000000..13bc48d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr112711.C
@@ -0,0 +1,31 @@
+/* { dg-do run { target i?86-*-* x86_64-*-* } } */
+/* { dg-options "-O1" } */
+
+typedef int i32;
+typedef unsigned int u32;
+
+static inline void write_i32(void *memory, i32 value) {
+ // swap i32 bytes as if it was u32:
+ u32 u_value = value;
+ value = __builtin_bswap32(u_value);
+
+ // llvm infers '1' alignment from destination type
+ __builtin_memcpy(__builtin_assume_aligned(memory, 1), &value, sizeof(value));
+}
+
+__attribute__((noipa))
+static void bug (void) {
+ #define assert_eq(lhs, rhs) if (lhs != rhs) __builtin_trap()
+
+ unsigned char data[5];
+ write_i32(data, -1362446643);
+ assert_eq(data[0], 0xAE);
+ assert_eq(data[1], 0xCA);
+ write_i32(data + 1, -1362446643);
+ assert_eq(data[1], 0xAE);
+}
+
+int main() {
+ bug();
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/warn/Wparentheses-33.C b/gcc/testsuite/g++.dg/warn/Wparentheses-33.C
new file mode 100644
index 0000000..daa2208
--- /dev/null
+++ b/gcc/testsuite/g++.dg/warn/Wparentheses-33.C
@@ -0,0 +1,25 @@
+// PR c++/112765
+// { dg-additional-options "-Wparentheses" }
+
+struct A {
+ A& operator=(const A&);
+ operator bool() const;
+};
+
+template<class T>
+void f(A a1, A a2) {
+ if ((a2 = a1)) // { dg-bogus "parentheses" }
+ return;
+ bool b = (a2 = a1); // { dg-bogus "parentheses" }
+}
+
+template void f<int>(A, A);
+
+template<class T>
+void g(T a1, T a2) {
+ if ((a2 = a1)) // { dg-bogus "parentheses" }
+ return;
+ bool b = (a2 = a1); // { dg-bogus "parentheses" }
+}
+
+template void g<A>(A, A);
diff --git a/gcc/testsuite/g++.dg/warn/Wuse-after-free3.C b/gcc/testsuite/g++.dg/warn/Wuse-after-free3.C
index e5b1578..8ef8202 100644
--- a/gcc/testsuite/g++.dg/warn/Wuse-after-free3.C
+++ b/gcc/testsuite/g++.dg/warn/Wuse-after-free3.C
@@ -11,5 +11,7 @@ struct A
A::~A ()
{
operator delete (this);
- f (); // { dg-warning "used after" }
+ f (); // { dg-warning "used after" "" { xfail arm_eabi } }
+ // arm_eabi's cdtors return this, which disables -Wuse-after-free
+ // warnings for cdtors' "this".
}
diff --git a/gcc/testsuite/g++.target/aarch64/sme/aarch64-sme-acle-asm.exp b/gcc/testsuite/g++.target/aarch64/sme/aarch64-sme-acle-asm.exp
new file mode 100644
index 0000000..a9ed3a1
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sme/aarch64-sme-acle-asm.exp
@@ -0,0 +1,82 @@
+# Assembly-based regression-test driver for the SME ACLE.
+# Copyright (C) 2009-2023 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>. */
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't an AArch64 target.
+if {![istarget aarch64*-*-*] } {
+ return
+}
+
+# Load support procs.
+load_lib g++-dg.exp
+
+# Initialize `dg'.
+dg-init
+
+# Force SME if we're not testing it already.
+if { [check_effective_target_aarch64_sme] } {
+ set sme_flags ""
+} else {
+ set sme_flags "-march=armv9-a+sme"
+}
+
+# Turn off any codegen tweaks by default that may affect expected assembly.
+# Tests relying on those should turn them on explicitly.
+set sme_flags "$sme_flags -mtune=generic -moverride=tune=none"
+
+global gcc_runtest_parallelize_limit_minor
+if { [info exists gcc_runtest_parallelize_limit_minor] } {
+ set old_limit_minor $gcc_runtest_parallelize_limit_minor
+ set gcc_runtest_parallelize_limit_minor 1
+}
+
+torture-init
+set-torture-options {
+ "-std=c++11 -O0 -g"
+ "-std=c++14 -O1 -g"
+ "-std=c++17 -Og -g"
+ "-std=c++23 -Os -g"
+ "-std=gnu++11 -O2 -fno-schedule-insns -fno-schedule-insns2 -DCHECK_ASM --save-temps"
+ "-std=gnu++23 -Ofast -g"
+} {
+ "-DTEST_FULL"
+ "-DTEST_OVERLOADS"
+}
+
+# Main loop.
+set gcc_subdir [string replace $subdir 0 2 gcc]
+set files [glob -nocomplain $srcdir/$gcc_subdir/acle-asm/*.c]
+set save-dg-do-what-default ${dg-do-what-default}
+if { [check_effective_target_aarch64_asm_sme-i16i64_ok] } {
+ set dg-do-what-default assemble
+} else {
+ set dg-do-what-default compile
+}
+gcc-dg-runtest [lsort $files] "" "$sme_flags -fno-ipa-icf"
+set dg-do-what-default ${save-dg-do-what-default}
+
+torture-finish
+
+if { [info exists gcc_runtest_parallelize_limit_minor] } {
+ set gcc_runtest_parallelize_limit_minor $old_limit_minor
+}
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/g++.target/aarch64/sme/aarch64-sme.exp b/gcc/testsuite/g++.target/aarch64/sme/aarch64-sme.exp
new file mode 100644
index 0000000..1c3e69c
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sme/aarch64-sme.exp
@@ -0,0 +1,46 @@
+# Specific regression driver for AArch64 SME.
+# Copyright (C) 2009-2023 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>. */
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't an AArch64 target.
+if {![istarget aarch64*-*-*] } {
+ return
+}
+
+# Load support procs.
+load_lib g++-dg.exp
+
+# Initialize `dg'.
+dg-init
+
+if { [check_effective_target_aarch64_sme] } {
+ set sme_flags ""
+} else {
+ set sme_flags "-march=armv9-a+sme"
+}
+
+aarch64-with-arch-dg-options $sme_flags {
+ # Main loop.
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
+ "" $sme_flags
+}
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/g++.target/aarch64/sme/exceptions_1.C b/gcc/testsuite/g++.target/aarch64/sme/exceptions_1.C
new file mode 100644
index 0000000..a245546
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sme/exceptions_1.C
@@ -0,0 +1,189 @@
+// { dg-options "-O -fno-optimize-sibling-calls" }
+// { dg-final { check-function-bodies "**" "" } }
+
+void callee_inout() __arm_inout("za");
+void callee_in() noexcept __arm_in("za");
+void callee_out() noexcept __arm_out("za");
+void callee_normal();
+
+/*
+** _Z5test1v:
+** ...
+** bl __arm_tpidr2_save
+** ...
+** bl __cxa_begin_catch
+** bl __cxa_end_catch
+** mov w0, #?2
+** ...
+*/
+__arm_new("za") int
+test1 ()
+{
+ try
+ {
+ callee_inout();
+ return 1;
+ }
+ catch (...)
+ {
+ return 2;
+ }
+}
+
+/*
+** _Z5test2v:
+** ...
+** bl __arm_tpidr2_save
+** ...
+** bl __cxa_begin_catch
+** smstart za
+** bl _Z10callee_outv
+** bl _Z9callee_inv
+** smstop za
+** bl __cxa_end_catch
+** mov w0, #?2
+** ...
+*/
+__arm_new("za") int
+test2 ()
+{
+ try
+ {
+ callee_inout();
+ return 1;
+ }
+ catch (...)
+ {
+ callee_out();
+ callee_in();
+ return 2;
+ }
+}
+
+/*
+** _Z5test3v:
+** ...
+** bl __arm_tpidr2_save
+** ...
+** smstop za
+** ...
+** bl _Z13callee_normalv
+** ...
+** bl __cxa_begin_catch
+** smstart za
+** bl _Z10callee_outv
+** bl _Z9callee_inv
+** smstop za
+** bl __cxa_end_catch
+** mov w0, #?2
+** ...
+*/
+__arm_new("za") int
+test3 ()
+{
+ try
+ {
+ callee_normal();
+ return 1;
+ }
+ catch (...)
+ {
+ callee_out();
+ callee_in();
+ return 2;
+ }
+}
+
+__arm_new("za") int
+test4 ()
+{
+ try
+ {
+ // No lazy save set up because this is a shared-ZA function.
+ callee_inout();
+ return 1;
+ }
+ catch (...)
+ {
+ callee_inout();
+ return 2;
+ }
+}
+// { dg-final { scan-assembler {_Z5test4v:(?:(?!msr\ttpidr2_el0, x[0-9]+).)*\tret} } }
+
+/*
+** _Z5test5v:
+** ...
+** bl __arm_tpidr2_save
+** ...
+** smstart za
+** ...
+** bl _Z12callee_inoutv
+** add (x[0-9]+), [^\n]+
+** msr tpidr2_el0, \1
+** bl _Z13callee_normalv
+** msr tpidr2_el0, xzr
+** smstop za
+** ...
+** bl __cxa_begin_catch
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstart za
+** ...
+** bl __arm_tpidr2_restore
+** msr tpidr2_el0, xzr
+** bl _Z12callee_inoutv
+** smstop za
+** bl __cxa_end_catch
+** mov w0, #?2
+** ...
+*/
+__arm_new("za") int
+test5 ()
+{
+ try
+ {
+ callee_inout();
+ callee_normal();
+ return 1;
+ }
+ catch (...)
+ {
+ callee_inout();
+ return 2;
+ }
+}
+
+/*
+** _Z5test6v:
+** ...
+** msr tpidr2_el0, x[0-9]+
+** bl _Z13callee_normalv
+** msr tpidr2_el0, xzr
+** ...
+** bl __cxa_begin_catch
+** bl __cxa_end_catch
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstart za
+** ...
+** bl __arm_tpidr2_restore
+** msr tpidr2_el0, xzr
+** ...
+*/
+int
+test6 () __arm_inout("za")
+{
+ try
+ {
+ callee_normal();
+ callee_out();
+ return 1;
+ }
+ catch (...)
+ {
+ return 2;
+ }
+}
diff --git a/gcc/testsuite/g++.target/aarch64/sme/exceptions_2.C b/gcc/testsuite/g++.target/aarch64/sme/exceptions_2.C
new file mode 100644
index 0000000..f791b6e
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sme/exceptions_2.C
@@ -0,0 +1,148 @@
+// { dg-options "-O -fno-optimize-sibling-calls" }
+// { dg-final { check-function-bodies "**" "" } }
+
+void n_callee();
+void s_callee() __arm_streaming;
+void sc_callee() __arm_streaming_compatible;
+
+void n_callee_ne() noexcept;
+void s_callee_ne() noexcept __arm_streaming;
+void sc_callee_ne() noexcept __arm_streaming_compatible;
+
+void n_caller1()
+{
+ try
+ {
+ n_callee();
+ sc_callee();
+ }
+ catch (...)
+ {
+ n_callee_ne();
+ sc_callee_ne();
+ }
+}
+// { dg-final { scan-assembler {_Z9n_caller1v:(?:(?!smstart|smstop).)*\tret} } }
+
+/*
+** _Z9n_caller2v:
+** ...
+** cntd (x[0-9]+)
+** str \1, [^\n]+
+** ...
+** bl __cxa_begin_catch
+** smstart sm
+** bl _Z11s_callee_nev
+** smstop sm
+** bl __cxa_end_catch
+** ...
+*/
+void n_caller2()
+{
+ try
+ {
+ n_callee();
+ sc_callee();
+ }
+ catch (...)
+ {
+ s_callee_ne();
+ }
+}
+
+/*
+** _Z9s_caller1v:
+** ...
+** bl __cxa_end_catch
+** smstart sm
+** ...
+*/
+int s_caller1() __arm_streaming
+{
+ try
+ {
+ s_callee();
+ return 1;
+ }
+ catch (...)
+ {
+ return 2;
+ }
+}
+
+/*
+** _Z9s_caller2v:
+** ...
+** bl __cxa_begin_catch
+** smstart sm
+** bl _Z11s_callee_nev
+** smstop sm
+** bl __cxa_end_catch
+** smstart sm
+** ...
+*/
+int s_caller2() __arm_streaming
+{
+ try
+ {
+ n_callee();
+ return 1;
+ }
+ catch (...)
+ {
+ s_callee_ne();
+ return 2;
+ }
+}
+
+/*
+** _Z10sc_caller1v:
+** ...
+** cntd (x[0-9]+)
+** str \1, [^\n]+
+** mrs (x[0-9]+), svcr
+** str \2, ([^\n]+)
+** ...
+** bl __cxa_end_catch
+** ldr (x[0-9]+), \3
+** tbz \4, 0, [^\n]+
+** smstart sm
+** ...
+*/
+int sc_caller1() __arm_streaming_compatible
+{
+ try
+ {
+ sc_callee();
+ return 1;
+ }
+ catch (...)
+ {
+ return 2;
+ }
+}
+
+/*
+** _Z10ls_caller1v:
+** ...
+** cntd (x[0-9]+)
+** str \1, [^\n]+
+** ...
+** bl __cxa_begin_catch
+** smstart sm
+** bl _Z12sc_callee_nev
+** smstop sm
+** bl __cxa_end_catch
+** ...
+*/
+__arm_locally_streaming void ls_caller1()
+{
+ try
+ {
+ sc_callee();
+ }
+ catch (...)
+ {
+ sc_callee_ne();
+ }
+}
diff --git a/gcc/testsuite/g++.target/aarch64/sme/keyword_macros_1.C b/gcc/testsuite/g++.target/aarch64/sme/keyword_macros_1.C
new file mode 100644
index 0000000..dc5c097
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sme/keyword_macros_1.C
@@ -0,0 +1,10 @@
+/* { dg-options "-std=c++11 -pedantic-errors" } */
+
+void f1 () __arm_streaming;
+void f2 () __arm_streaming_compatible;
+void f3 () __arm_in("za");
+void f4 () __arm_out("za");
+void f5 () __arm_inout("za");
+void f6 () __arm_preserves("za");
+__arm_new("za") void f7 () {}
+__arm_locally_streaming void f8 () {}
diff --git a/gcc/testsuite/g++.target/aarch64/sme/streaming_mode_1.C b/gcc/testsuite/g++.target/aarch64/sme/streaming_mode_1.C
new file mode 100644
index 0000000..c3de726
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sme/streaming_mode_1.C
@@ -0,0 +1,142 @@
+// { dg-options "" }
+
+void sc_a () [[arm::streaming_compatible]];
+void sc_a (); // { dg-error "ambiguating new declaration" "" { xfail *-*-* } }
+
+void sc_b ();
+void sc_b () [[arm::streaming_compatible]]; // { dg-error "ambiguating new declaration" }
+
+void sc_c () [[arm::streaming_compatible]];
+void sc_c () {} // Inherits attribute from declaration (confusingly).
+
+void sc_d ();
+void sc_d () [[arm::streaming_compatible]] {} // { dg-error "ambiguating new declaration" }
+
+void sc_e () [[arm::streaming_compatible]] {}
+void sc_e (); // { dg-error "ambiguating new declaration" "" { xfail *-*-* } }
+
+void sc_f () {}
+void sc_f () [[arm::streaming_compatible]]; // { dg-error "ambiguating new declaration" }
+
+extern void (*sc_g) ();
+extern void (*sc_g) () [[arm::streaming_compatible]]; // { dg-error "conflicting declaration" }
+
+extern void (*sc_h) () [[arm::streaming_compatible]];
+extern void (*sc_h) (); // { dg-error "conflicting declaration" }
+
+//----------------------------------------------------------------------------
+
+void s_a () [[arm::streaming]];
+void s_a (); // { dg-error "ambiguating new declaration" "" { xfail *-*-* } }
+
+void s_b ();
+void s_b () [[arm::streaming]]; // { dg-error "ambiguating new declaration" }
+
+void s_c () [[arm::streaming]];
+void s_c () {} // Inherits attribute from declaration (confusingly).
+
+void s_d ();
+void s_d () [[arm::streaming]] {} // { dg-error "ambiguating new declaration" }
+
+void s_e () [[arm::streaming]] {}
+void s_e (); // { dg-error "ambiguating new declaration" "" { xfail *-*-* } }
+
+void s_f () {}
+void s_f () [[arm::streaming]]; // { dg-error "ambiguating new declaration" }
+
+extern void (*s_g) ();
+extern void (*s_g) () [[arm::streaming]]; // { dg-error "conflicting declaration" }
+
+extern void (*s_h) () [[arm::streaming]];
+extern void (*s_h) (); // { dg-error "conflicting declaration" }
+
+//----------------------------------------------------------------------------
+
+void mixed_a () [[arm::streaming]];
+void mixed_a () [[arm::streaming_compatible]]; // { dg-error "ambiguating new declaration" }
+
+void mixed_b () [[arm::streaming_compatible]];
+void mixed_b () [[arm::streaming]]; // { dg-error "ambiguating new declaration" }
+
+void mixed_c () [[arm::streaming]];
+void mixed_c () [[arm::streaming_compatible]] {} // { dg-error "ambiguating new declaration" }
+
+void mixed_d () [[arm::streaming_compatible]];
+void mixed_d () [[arm::streaming]] {} // { dg-error "ambiguating new declaration" }
+
+void mixed_e () [[arm::streaming]] {}
+void mixed_e () [[arm::streaming_compatible]]; // { dg-error "ambiguating new declaration" }
+
+void mixed_f () [[arm::streaming_compatible]] {}
+void mixed_f () [[arm::streaming]]; // { dg-error "ambiguating new declaration" }
+
+extern void (*mixed_g) () [[arm::streaming_compatible]];
+extern void (*mixed_g) () [[arm::streaming]]; // { dg-error "conflicting declaration" }
+
+extern void (*mixed_h) () [[arm::streaming]];
+extern void (*mixed_h) () [[arm::streaming_compatible]]; // { dg-error "conflicting declaration" }
+
+//----------------------------------------------------------------------------
+
+void contradiction_1 () [[arm::streaming, arm::streaming_compatible]]; // { dg-warning "conflicts with attribute" }
+void contradiction_2 () [[arm::streaming_compatible, arm::streaming]]; // { dg-warning "conflicts with attribute" }
+
+int [[arm::streaming_compatible]] int_attr; // { dg-warning "attribute ignored" }
+void [[arm::streaming_compatible]] ret_attr (); // { dg-warning "attribute ignored" }
+void *[[arm::streaming]] ptr_attr; // { dg-warning "only applies to function types" }
+
+typedef void s_callback () [[arm::streaming]];
+typedef void sc_callback () [[arm::streaming_compatible]];
+
+typedef void contradiction_callback_1 () [[arm::streaming, arm::streaming_compatible]]; // { dg-warning "conflicts with attribute" }
+typedef void contradiction_callback_2 () [[arm::streaming_compatible, arm::streaming]]; // { dg-warning "conflicts with attribute" }
+
+void (*contradiction_callback_ptr_1) () [[arm::streaming, arm::streaming_compatible]]; // { dg-warning "conflicts with attribute" }
+void (*contradiction_callback_ptr_2) () [[arm::streaming_compatible, arm::streaming]]; // { dg-warning "conflicts with attribute" }
+
+struct s {
+ void (*contradiction_callback_ptr_1) () [[arm::streaming, arm::streaming_compatible]]; // { dg-warning "conflicts with attribute" }
+ void (*contradiction_callback_ptr_2) () [[arm::streaming_compatible, arm::streaming]]; // { dg-warning "conflicts with attribute" }
+};
+
+//----------------------------------------------------------------------------
+
+void keyword_ok_1 () __arm_streaming;
+void keyword_ok_1 () __arm_streaming;
+
+void keyword_ok_2 () __arm_streaming;
+void keyword_ok_2 () [[arm::streaming]];
+
+void keyword_ok_3 () [[arm::streaming]];
+void keyword_ok_3 () __arm_streaming;
+
+void keyword_ok_4 () __arm_streaming [[arm::streaming]];
+
+void keyword_ok_5 () __arm_streaming_compatible;
+void keyword_ok_5 () [[arm::streaming_compatible]];
+
+//----------------------------------------------------------------------------
+
+void keyword_contradiction_1 () __arm_streaming;
+void keyword_contradiction_1 (); // { dg-error "ambiguating new declaration" "" { xfail *-*-* } }
+
+void keyword_contradiction_2 ();
+void keyword_contradiction_2 () __arm_streaming; // { dg-error "ambiguating new declaration" }
+
+void keyword_contradiction_3 () __arm_streaming;
+void keyword_contradiction_3 () [[arm::streaming_compatible]]; // { dg-error "ambiguating new declaration" }
+
+void keyword_contradiction_4 () [[arm::streaming_compatible]];
+void keyword_contradiction_4 () __arm_streaming; // { dg-error "ambiguating new declaration" }
+
+//----------------------------------------------------------------------------
+
+struct s1
+{
+ virtual void f () [[arm::streaming]];
+};
+
+struct s2 : public s1
+{
+ void f () override; // { dg-error "conflicting type attributes" }
+};
diff --git a/gcc/testsuite/g++.target/aarch64/sme/streaming_mode_2.C b/gcc/testsuite/g++.target/aarch64/sme/streaming_mode_2.C
new file mode 100644
index 0000000..f2dd2db
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sme/streaming_mode_2.C
@@ -0,0 +1,25 @@
+// { dg-options "" }
+
+void sc_fn () [[arm::streaming_compatible]];
+void s_fn () [[arm::streaming]];
+void ns_fn ();
+
+void (*sc_fn_ptr) () [[arm::streaming_compatible]];
+void (*s_fn_ptr) () [[arm::streaming]];
+void (*ns_fn_ptr) ();
+
+void
+f ()
+{
+ sc_fn_ptr = sc_fn;
+ sc_fn_ptr = s_fn; // { dg-error "invalid conversion" }
+ sc_fn_ptr = ns_fn; // { dg-error "invalid conversion" }
+
+ s_fn_ptr = sc_fn; // { dg-error "invalid conversion" }
+ s_fn_ptr = s_fn;
+ s_fn_ptr = ns_fn; // { dg-error "invalid conversion" }
+
+ ns_fn_ptr = sc_fn; // { dg-error "invalid conversion" }
+ ns_fn_ptr = s_fn; // { dg-error "invalid conversion" }
+ ns_fn_ptr = ns_fn;
+}
diff --git a/gcc/testsuite/g++.target/aarch64/sme2/aarch64-sme2-acle-asm.exp b/gcc/testsuite/g++.target/aarch64/sme2/aarch64-sme2-acle-asm.exp
new file mode 100644
index 0000000..46c8836
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sme2/aarch64-sme2-acle-asm.exp
@@ -0,0 +1,82 @@
+# Assembly-based regression-test driver for the SME2 ACLE.
+# Copyright (C) 2009-2023 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>. */
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't an AArch64 target.
+if {![istarget aarch64*-*-*] } {
+ return
+}
+
+# Load support procs.
+load_lib g++-dg.exp
+
+# Initialize `dg'.
+dg-init
+
+# Force SME2 if we're not testing it already.
+if { [check_effective_target_aarch64_sme2] } {
+ set sme2_flags ""
+} else {
+ set sme2_flags "-march=armv9-a+sme2"
+}
+
+# Turn off any codegen tweaks by default that may affect expected assembly.
+# Tests relying on those should turn them on explicitly.
+set sme2_flags "$sme2_flags -mtune=generic -moverride=tune=none"
+
+global gcc_runtest_parallelize_limit_minor
+if { [info exists gcc_runtest_parallelize_limit_minor] } {
+ set old_limit_minor $gcc_runtest_parallelize_limit_minor
+ set gcc_runtest_parallelize_limit_minor 1
+}
+
+torture-init
+set-torture-options {
+ "-std=c++11 -O0 -g"
+ "-std=c++14 -O1 -g"
+ "-std=c++17 -Og -g"
+ "-std=c++23 -Os -g"
+ "-std=gnu++11 -O2 -fno-schedule-insns -fno-schedule-insns2 -DCHECK_ASM --save-temps"
+ "-std=gnu++23 -Ofast -g"
+} {
+ "-DTEST_FULL"
+ "-DTEST_OVERLOADS"
+}
+
+# Main loop.
+set gcc_subdir [string replace $subdir 0 2 gcc]
+set files [glob -nocomplain $srcdir/$gcc_subdir/acle-asm/*.c]
+set save-dg-do-what-default ${dg-do-what-default}
+if { [check_effective_target_aarch64_asm_sme2_ok] } {
+ set dg-do-what-default assemble
+} else {
+ set dg-do-what-default compile
+}
+gcc-dg-runtest [lsort $files] "" "$sme2_flags -fno-ipa-icf"
+set dg-do-what-default ${save-dg-do-what-default}
+
+torture-finish
+
+if { [info exists gcc_runtest_parallelize_limit_minor] } {
+ set gcc_runtest_parallelize_limit_minor $old_limit_minor
+}
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/g++.target/aarch64/sve/aarch64-ssve.exp b/gcc/testsuite/g++.target/aarch64/sve/aarch64-ssve.exp
new file mode 100644
index 0000000..d6a5a56
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sve/aarch64-ssve.exp
@@ -0,0 +1,308 @@
+# Specific regression driver for AArch64 SME.
+# Copyright (C) 2009-2023 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>. */
+
+# Test whether certain SVE instructions are accepted or rejected in
+# SME streaming mode.
+
+# Exit immediately if this isn't an AArch64 target.
+if {![istarget aarch64*-*-*] } {
+ return
+}
+
+load_lib gcc-defs.exp
+
+gcc_parallel_test_enable 0
+
+# Code shared by all tests.
+set preamble {
+#include <arm_sve.h>
+
+#pragma GCC target "+i8mm+f32mm+f64mm+sve2+sve2-bitperm+sve2-sm4+sve2-aes+sve2-sha3+sme"
+
+extern svbool_t &pred;
+
+extern svint8_t &s8;
+extern svint32_t &s32;
+
+extern svuint8_t &u8;
+extern svuint16_t &u16;
+extern svuint32_t &u32;
+extern svuint64_t &u64;
+
+extern svbfloat16_t &bf16;
+extern svfloat32_t &f32;
+
+extern void *void_ptr;
+
+extern int8_t *s8_ptr;
+extern int16_t *s16_ptr;
+extern int32_t *s32_ptr;
+
+extern uint8_t *u8_ptr;
+extern uint16_t *u16_ptr;
+extern uint32_t *u32_ptr;
+extern uint64_t *u64_ptr;
+
+extern uint64_t indx;
+}
+
+# Wrap a standalone call in a streaming-compatible function.
+set sc_harness {
+void
+foo () [[arm::streaming_compatible]]
+{
+ $CALL;
+}
+}
+
+# HARNESS is some source code that should be appended to the preamble
+# variable defined above. It includes the string "$CALL", which should be
+# replaced by the function call in CALL. The result after both steps is
+# a complete C++ translation unit.
+#
+# Try compiling the C++ code and see what output GCC produces.
+# The expected output is either:
+#
+# - empty, if SHOULD_PASS is true
+# - a message rejecting CALL in streaming mode, if SHOULD_PASS is false
+#
+# CALL is simple enough that it can be used in test names.
+proc check_ssve_call { harness name call should_pass } {
+ global preamble
+
+ set filename test-[pid]
+ set fd [open $filename.cc w]
+ puts $fd $preamble
+ puts -nonewline $fd [string map [list {$CALL} $call] $harness]
+ close $fd
+ remote_download host $filename.cc
+
+ set test "streaming SVE call $name"
+
+ set gcc_output [g++_target_compile $filename.cc $filename.s assembly ""]
+ remote_file build delete $filename.cc $filename.s
+
+ if { [string equal $gcc_output ""] } {
+ if { $should_pass } {
+ pass $test
+ } else {
+ fail $test
+ }
+ return
+ }
+
+ set lines [split $gcc_output "\n"]
+ set error_text "cannot be called when SME streaming mode is enabled"
+ if { [llength $lines] == 3
+ && [string first "In function" [lindex $lines 0]] >= 0
+ && [string first $error_text [lindex $lines 1]] >= 0
+ && [string equal [lindex $lines 2] ""] } {
+ if { $should_pass } {
+ fail $test
+ } else {
+ pass $test
+ }
+ return
+ }
+
+ verbose -log "$test: unexpected output"
+ fail $test
+}
+
+# Apply check_ssve_call to each line in CALLS. The other arguments are
+# as for check_ssve_call.
+proc check_ssve_calls { harness calls should_pass } {
+ foreach line [split $calls "\n"] {
+ set call [string trim $line]
+ if { [string equal $call ""] } {
+ continue
+ }
+ check_ssve_call $harness "$call" $call $should_pass
+ }
+}
+
+# A small selection of things that are valid in streaming mode.
+set streaming_ok {
+ s8 = svadd_x (pred, s8, s8)
+ s8 = svld1 (pred, s8_ptr)
+}
+
+# This order follows the list in the SME manual.
+set nonstreaming_only {
+ u32 = svadrb_offset (u32, u32)
+ u64 = svadrb_offset (u64, u64)
+ u32 = svadrh_index (u32, u32)
+ u64 = svadrh_index (u64, u64)
+ u32 = svadrw_index (u32, u32)
+ u64 = svadrw_index (u64, u64)
+ u32 = svadrd_index (u32, u32)
+ u64 = svadrd_index (u64, u64)
+ u8 = svaesd (u8, u8)
+ u8 = svaese (u8, u8)
+ u8 = svaesimc (u8)
+ u8 = svaesmc (u8)
+ u8 = svbdep (u8, u8)
+ u8 = svbext (u8, u8)
+ f32 = svbfmmla (f32, bf16, bf16)
+ u8 = svbgrp (u8, u8)
+ u32 = svcompact (pred, u32)
+ f32 = svadda (pred, 1.0f, f32)
+ f32 = svexpa (u32)
+ f32 = svmmla (f32, f32, f32)
+ f32 = svtmad (f32, f32, 0)
+ f32 = svtsmul (f32, u32)
+ f32 = svtssel (f32, u32)
+ u32 = svhistcnt_z (pred, u32, u32)
+ u8 = svhistseg (u8, u8)
+ u32 = svld1ub_gather_offset_u32 (pred, u8_ptr, u32)
+ u32 = svld1ub_gather_offset_u32 (pred, u32, 1)
+ u64 = svld1_gather_index (pred, u64_ptr, u64)
+ u64 = svld1_gather_index_u64 (pred, u64, 1)
+ u32 = svld1uh_gather_index_u32 (pred, u16_ptr, u32)
+ u32 = svld1uh_gather_index_u32 (pred, u32, 1)
+ u8 = svld1ro (pred, u8_ptr + indx)
+ u8 = svld1ro (pred, u8_ptr + 1)
+ u16 = svld1ro (pred, u16_ptr + indx)
+ u16 = svld1ro (pred, u16_ptr + 1)
+ u32 = svld1ro (pred, u32_ptr + indx)
+ u32 = svld1ro (pred, u32_ptr + 1)
+ u64 = svld1ro (pred, u64_ptr + indx)
+ u64 = svld1ro (pred, u64_ptr + 1)
+ u32 = svld1sb_gather_offset_u32 (pred, s8_ptr, u32)
+ u32 = svld1sb_gather_offset_u32 (pred, u32, 1)
+ u32 = svld1sh_gather_index_u32 (pred, s16_ptr, u32)
+ u32 = svld1sh_gather_index_u32 (pred, u32, 1)
+ u64 = svld1sw_gather_index_u64 (pred, s32_ptr, u64)
+ u64 = svld1sw_gather_index_u64 (pred, u64, 1)
+ u64 = svld1uw_gather_index_u64 (pred, u32_ptr, u64)
+ u64 = svld1uw_gather_index_u64 (pred, u64, 1)
+ u32 = svld1_gather_index (pred, u32_ptr, u32)
+ u32 = svld1_gather_index_u32 (pred, u32, 1)
+ u8 = svldff1(pred, u8_ptr)
+ u16 = svldff1ub_u16(pred, u8_ptr)
+ u32 = svldff1ub_u32(pred, u8_ptr)
+ u64 = svldff1ub_u64(pred, u8_ptr)
+ u32 = svldff1ub_gather_offset_u32 (pred, u8_ptr, u32)
+ u32 = svldff1ub_gather_offset_u32 (pred, u32, 1)
+ u64 = svldff1(pred, u64_ptr)
+ u64 = svldff1_gather_index (pred, u64_ptr, u64)
+ u64 = svldff1_gather_index_u64 (pred, u64, 1)
+ u16 = svldff1(pred, u16_ptr)
+ u32 = svldff1uh_u32(pred, u16_ptr)
+ u64 = svldff1uh_u64(pred, u16_ptr)
+ u32 = svldff1uh_gather_offset_u32 (pred, u16_ptr, u32)
+ u32 = svldff1uh_gather_offset_u32 (pred, u32, 1)
+ u16 = svldff1sb_u16(pred, s8_ptr)
+ u32 = svldff1sb_u32(pred, s8_ptr)
+ u64 = svldff1sb_u64(pred, s8_ptr)
+ u32 = svldff1sb_gather_offset_u32 (pred, s8_ptr, u32)
+ u32 = svldff1sb_gather_offset_u32 (pred, u32, 1)
+ u32 = svldff1sh_u32(pred, s16_ptr)
+ u64 = svldff1sh_u64(pred, s16_ptr)
+ u32 = svldff1sh_gather_offset_u32 (pred, s16_ptr, u32)
+ u32 = svldff1sh_gather_offset_u32 (pred, u32, 1)
+ u64 = svldff1sw_u64(pred, s32_ptr)
+ u64 = svldff1sw_gather_offset_u64 (pred, s32_ptr, u64)
+ u64 = svldff1sw_gather_offset_u64 (pred, u64, 1)
+ u32 = svldff1(pred, u32_ptr)
+ u32 = svldff1_gather_index (pred, u32_ptr, u32)
+ u32 = svldff1_gather_index_u32 (pred, u32, 1)
+ u64 = svldff1uw_u64(pred, u32_ptr)
+ u64 = svldff1uw_gather_offset_u64 (pred, u32_ptr, u64)
+ u64 = svldff1uw_gather_offset_u64 (pred, u64, 1)
+ u8 = svldnf1(pred, u8_ptr)
+ u16 = svldnf1ub_u16(pred, u8_ptr)
+ u32 = svldnf1ub_u32(pred, u8_ptr)
+ u64 = svldnf1ub_u64(pred, u8_ptr)
+ u64 = svldnf1(pred, u64_ptr)
+ u16 = svldnf1(pred, u16_ptr)
+ u32 = svldnf1uh_u32(pred, u16_ptr)
+ u64 = svldnf1uh_u64(pred, u16_ptr)
+ u16 = svldnf1sb_u16(pred, s8_ptr)
+ u32 = svldnf1sb_u32(pred, s8_ptr)
+ u64 = svldnf1sb_u64(pred, s8_ptr)
+ u32 = svldnf1sh_u32(pred, s16_ptr)
+ u64 = svldnf1sh_u64(pred, s16_ptr)
+ u64 = svldnf1sw_u64(pred, s32_ptr)
+ u32 = svldnf1(pred, u32_ptr)
+ u64 = svldnf1uw_u64(pred, u32_ptr)
+ u32 = svldnt1ub_gather_offset_u32 (pred, u8_ptr, u32)
+ u32 = svldnt1ub_gather_offset_u32 (pred, u32, 1)
+ u64 = svldnt1_gather_index (pred, u64_ptr, u64)
+ u64 = svldnt1_gather_index_u64 (pred, u64, 1)
+ u32 = svldnt1uh_gather_offset_u32 (pred, u16_ptr, u32)
+ u32 = svldnt1uh_gather_offset_u32 (pred, u32, 1)
+ u32 = svldnt1sb_gather_offset_u32 (pred, s8_ptr, u32)
+ u32 = svldnt1sb_gather_offset_u32 (pred, u32, 1)
+ u32 = svldnt1sh_gather_offset_u32 (pred, s16_ptr, u32)
+ u32 = svldnt1sh_gather_offset_u32 (pred, u32, 1)
+ u64 = svldnt1sw_gather_offset_u64 (pred, s32_ptr, u64)
+ u64 = svldnt1sw_gather_offset_u64 (pred, u64, 1)
+ u64 = svldnt1uw_gather_offset_u64 (pred, u32_ptr, u64)
+ u64 = svldnt1uw_gather_offset_u64 (pred, u64, 1)
+ u32 = svldnt1_gather_offset (pred, u32_ptr, u32)
+ u32 = svldnt1_gather_offset_u32 (pred, u32, 1)
+ pred = svmatch (pred, u8, u8)
+ pred = svnmatch (pred, u8, u8)
+ u64 = svpmullb_pair (u64, u64)
+ u64 = svpmullt_pair (u64, u64)
+ svprfb_gather_offset (pred, void_ptr, u64, SV_PLDL1KEEP)
+ svprfb_gather_offset (pred, u64, 1, SV_PLDL1KEEP)
+ svprfd_gather_index (pred, void_ptr, u64, SV_PLDL1KEEP)
+ svprfd_gather_index (pred, u64, 1, SV_PLDL1KEEP)
+ svprfh_gather_index (pred, void_ptr, u64, SV_PLDL1KEEP)
+ svprfh_gather_index (pred, u64, 1, SV_PLDL1KEEP)
+ svprfw_gather_index (pred, void_ptr, u64, SV_PLDL1KEEP)
+ svprfw_gather_index (pred, u64, 1, SV_PLDL1KEEP)
+ u64 = svrax1 (u64, u64)
+ pred = svrdffr ()
+ pred = svrdffr_z (pred)
+ svsetffr ()
+ u32 = svsm4e (u32, u32)
+ u32 = svsm4ekey (u32, u32)
+ s32 = svmmla (s32, s8, s8)
+ svst1b_scatter_offset (pred, u8_ptr, u32, u32)
+ svst1b_scatter_offset (pred, u32, 1, u32)
+ svst1_scatter_index (pred, u64_ptr, u64, u64)
+ svst1_scatter_index (pred, u64, 1, u64)
+ svst1h_scatter_index (pred, u16_ptr, u32, u32)
+ svst1h_scatter_index (pred, u32, 1, u32)
+ svst1w_scatter_index (pred, u32_ptr, u64, u64)
+ svst1w_scatter_index (pred, u64, 1, u64)
+ svst1_scatter_index (pred, u32_ptr, u32, u32)
+ svst1_scatter_index (pred, u32, 1, u32)
+ svstnt1b_scatter_offset (pred, u8_ptr, u32, u32)
+ svstnt1b_scatter_offset (pred, u32, 1, u32)
+ svstnt1_scatter_offset (pred, u64_ptr, u64, u64)
+ svstnt1_scatter_offset (pred, u64, 1, u64)
+ svstnt1h_scatter_offset (pred, u16_ptr, u32, u32)
+ svstnt1h_scatter_offset (pred, u32, 1, u32)
+ svstnt1w_scatter_offset (pred, u32_ptr, u64, u64)
+ svstnt1w_scatter_offset (pred, u64, 1, u64)
+ svstnt1_scatter_offset (pred, u32_ptr, u32, u32)
+ svstnt1_scatter_offset (pred, u32, 1, u32)
+ u32 = svmmla (u32, u8, u8)
+ s32 = svusmmla (s32, u8, s8)
+ svwrffr (pred)
+}
+
+check_ssve_calls $sc_harness $streaming_ok 1
+check_ssve_calls $sc_harness $nonstreaming_only 0
+
+gcc_parallel_test_enable 1
diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp b/gcc/testsuite/g++.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp
index 5b40d0d..4b4ee10 100644
--- a/gcc/testsuite/g++.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp
+++ b/gcc/testsuite/g++.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp
@@ -50,6 +50,7 @@ if { [info exists gcc_runtest_parallelize_limit_minor] } {
torture-init
set-torture-options {
"-std=c++98 -O0 -g"
+ "-std=c++11 -O0 -DSTREAMING_COMPATIBLE"
"-std=c++98 -O1 -g"
"-std=c++11 -O2 -g"
"-std=c++14 -O3 -g"
diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_4.c b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_4.c
index 9591e3d..f2f922d 100644
--- a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_4.c
+++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_4.c
@@ -4,6 +4,7 @@
to be diagnosed. Any attempt to call the function before including
arm_sve.h will lead to a link failure. (Same for taking its address,
etc.) */
-extern __SVUint8_t svadd_u8_x (__SVBool_t, __SVUint8_t, __SVUint8_t);
+extern __SVUint8_t svadd_u8_x (__SVBool_t, __SVUint8_t, __SVUint8_t)
+ __arm_streaming_compatible;
#pragma GCC aarch64 "arm_sve.h"
diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_5.c b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_5.c
index f872019..f24ef00 100644
--- a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_5.c
+++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_5.c
@@ -2,6 +2,7 @@
__SVUint8_t
svadd_u8_x (__SVBool_t pg, __SVUint8_t x, __SVUint8_t y)
+ __arm_streaming_compatible
{
return x;
}
diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_7.c b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_7.c
index 1f2e4bf..6752ea1 100644
--- a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_7.c
+++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/func_redef_7.c
@@ -2,6 +2,7 @@
__SVUint8_t
svadd_x (__SVBool_t pg, __SVUint8_t x, __SVUint8_t y)
+ __arm_streaming_compatible
{
return x;
}
diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_1.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_1.C
index 36dab3c..2ad0c7f 100644
--- a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_1.C
+++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_1.C
@@ -15,6 +15,7 @@ void f10(svfloat16_t) {}
void f11(svfloat32_t) {}
void f12(svfloat64_t) {}
void f13(svbfloat16_t) {}
+void f14(svcount_t) {}
/* { dg-final { scan-assembler "_Z2f1u10__SVBool_t:" } } */
/* { dg-final { scan-assembler "_Z2f2u10__SVInt8_t:" } } */
@@ -29,3 +30,4 @@ void f13(svbfloat16_t) {}
/* { dg-final { scan-assembler "_Z3f11u13__SVFloat32_t:" } } */
/* { dg-final { scan-assembler "_Z3f12u13__SVFloat64_t:" } } */
/* { dg-final { scan-assembler "_Z3f13u14__SVBfloat16_t:" } } */
+/* { dg-final { scan-assembler "_Z3f14u11__SVCount_t:" } } */
diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_2.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_2.C
index ad4aaee..c8bfcc5 100644
--- a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_2.C
+++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_2.C
@@ -13,6 +13,7 @@ void f10(__SVFloat16_t) {}
void f11(__SVFloat32_t) {}
void f12(__SVFloat64_t) {}
void f13(__SVBfloat16_t) {}
+void f14(__SVCount_t) {}
/* { dg-final { scan-assembler "_Z2f1u10__SVBool_t:" } } */
/* { dg-final { scan-assembler "_Z2f2u10__SVInt8_t:" } } */
@@ -27,3 +28,4 @@ void f13(__SVBfloat16_t) {}
/* { dg-final { scan-assembler "_Z3f11u13__SVFloat32_t:" } } */
/* { dg-final { scan-assembler "_Z3f12u13__SVFloat64_t:" } } */
/* { dg-final { scan-assembler "_Z3f13u14__SVBfloat16_t:" } } */
+/* { dg-final { scan-assembler "_Z3f14u11__SVCount_t:" } } */
diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/svcount_1.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/svcount_1.C
new file mode 100644
index 0000000..9eac65a
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/svcount_1.C
@@ -0,0 +1,10 @@
+#include <arm_sve.h>
+
+svbool_t f1 (svcount_t x) { return x; } // { dg-error {cannot convert 'svcount_t' to 'svbool_t' in return} }
+svcount_t f2 (svbool_t x) { return x; } // { dg-error {cannot convert 'svbool_t' to 'svcount_t' in return} }
+void f3 (svbool_t *p, svcount_t x) { *p = x; } // { dg-error {cannot convert 'svcount_t' to 'svbool_t' in assignment} }
+void f4 (svcount_t *p, svbool_t x) { *p = x; } // { dg-error {cannot convert 'svbool_t' to 'svcount_t' in assignment} }
+svbool_t *f5 (svcount_t *p) { return p; } // { dg-error {cannot convert} }
+svcount_t *f6 (svbool_t *p) { return p; } // { dg-error {cannot convert} }
+svbool_t f7 (svcount_t x) { return (svbool_t) x; } // { dg-error {invalid cast from type 'svcount_t' to type 'svbool_t'} }
+svcount_t f8 (svbool_t x) { return (svcount_t) x; } // { dg-error {invalid cast from type 'svbool_t' to type 'svcount_t'} }
diff --git a/gcc/testsuite/g++.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp b/gcc/testsuite/g++.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp
index b605da8..9cd2efd 100644
--- a/gcc/testsuite/g++.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp
+++ b/gcc/testsuite/g++.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp
@@ -53,6 +53,7 @@ if { [info exists gcc_runtest_parallelize_limit_minor] } {
torture-init
set-torture-options {
"-std=c++98 -O0 -g"
+ "-std=c++11 -O0 -DSTREAMING_COMPATIBLE"
"-std=c++98 -O1 -g"
"-std=c++11 -O2 -g"
"-std=c++14 -O3 -g"
diff --git a/gcc/testsuite/g++.target/riscv/rvv/autovec/bug-01.C b/gcc/testsuite/g++.target/riscv/rvv/autovec/bug-1.C
index fd10009..d142a0a 100644
--- a/gcc/testsuite/g++.target/riscv/rvv/autovec/bug-01.C
+++ b/gcc/testsuite/g++.target/riscv/rvv/autovec/bug-1.C
@@ -5,6 +5,7 @@ public:
int e();
void j();
};
+
float *d;
class k {
int f;
@@ -21,6 +22,7 @@ public:
}
}
};
+
c l;
void o() {
int b = l.e();
diff --git a/gcc/testsuite/g++.target/riscv/rvv/autovec/bug-2.C b/gcc/testsuite/g++.target/riscv/rvv/autovec/bug-2.C
new file mode 100644
index 0000000..53bc4a3
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/autovec/bug-2.C
@@ -0,0 +1,26 @@
+/* { dg-options "-march=rv64gc_zve32f -mabi=lp64d -O3 --param=riscv-autovec-lmul=m4" } */
+
+int max(int __b) {
+ if (0 < __b)
+ return __b;
+ return 0;
+}
+struct Plane {
+ Plane(int, int);
+ int *Row();
+};
+float *ConvolveXSampleAndTranspose_rowp;
+int ConvolveXSampleAndTranspose_res, ConvolveXSampleAndTranspose_r;
+void ConvolveXSampleAndTranspose() {
+ Plane out(0, ConvolveXSampleAndTranspose_res);
+ for (int y;;) {
+ float sum;
+ for (int i = ConvolveXSampleAndTranspose_r; i; ++i)
+ sum += i;
+ for (; ConvolveXSampleAndTranspose_r; ++ConvolveXSampleAndTranspose_r)
+ sum +=
+ ConvolveXSampleAndTranspose_rowp[max(ConvolveXSampleAndTranspose_r)] *
+ ConvolveXSampleAndTranspose_r;
+ out.Row()[y] = sum;
+ }
+}
diff --git a/gcc/testsuite/gcc.c-torture/execute/comp-goto-1.c b/gcc/testsuite/gcc.c-torture/execute/comp-goto-1.c
index 4379fe7..6be63c0 100644
--- a/gcc/testsuite/gcc.c-torture/execute/comp-goto-1.c
+++ b/gcc/testsuite/gcc.c-torture/execute/comp-goto-1.c
@@ -163,5 +163,5 @@ main ()
exit (0);
}
#else
-main(){ exit (0); }
+int main(){ exit (0); }
#endif
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr65369.c b/gcc/testsuite/gcc.c-torture/execute/pr65369.c
index 017fe1b..548b48f 100644
--- a/gcc/testsuite/gcc.c-torture/execute/pr65369.c
+++ b/gcc/testsuite/gcc.c-torture/execute/pr65369.c
@@ -6,7 +6,7 @@ static const char data[] =
"123456789012345678901234567890";
__attribute__ ((noinline))
-static void foo (const unsigned int *buf)
+static void foo (const uint32_t *buf)
{
if (__builtin_memcmp (buf, data, 64))
__builtin_abort ();
diff --git a/gcc/testsuite/gcc.dg/20030906-1.c b/gcc/testsuite/gcc.dg/20030906-1.c
index c416f55..6ba5b3d 100644
--- a/gcc/testsuite/gcc.dg/20030906-1.c
+++ b/gcc/testsuite/gcc.dg/20030906-1.c
@@ -2,7 +2,7 @@
Copyright (C) 2003 Free Software Foundation Inc. */
/* { dg-do compile } */
-/* { dg-options "-O -finline-functions -Wreturn-type" } */
+/* { dg-options "-fpermissive -O -finline-functions -Wreturn-type" } */
extern int i;
extern int foo (void);
diff --git a/gcc/testsuite/gcc.dg/20030906-1a.c b/gcc/testsuite/gcc.dg/20030906-1a.c
new file mode 100644
index 0000000..46ca177
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/20030906-1a.c
@@ -0,0 +1,21 @@
+/* Bug 9862 -- Spurious warnings with -finline-functions.
+ Copyright (C) 2003 Free Software Foundation Inc. */
+
+/* { dg-do compile } */
+/* { dg-options "-O -finline-functions -Wreturn-type" } */
+
+extern int i;
+extern int foo (void);
+extern int bar (void);
+
+int foo (void)
+{
+ if( i ) return 0;
+ else return 1;
+}
+
+int bar (void)
+{
+ if( i ) return; /* { dg-error "'return' with no value, in function returning non-void" } */
+ else return 1;
+}
diff --git a/gcc/testsuite/gcc.dg/20030906-2.c b/gcc/testsuite/gcc.dg/20030906-2.c
index 1191133..a85d91f 100644
--- a/gcc/testsuite/gcc.dg/20030906-2.c
+++ b/gcc/testsuite/gcc.dg/20030906-2.c
@@ -2,7 +2,7 @@
Copyright (C) 2003 Free Software Foundation Inc. */
/* { dg-do compile } */
-/* { dg-options "-O -finline-functions -Wreturn-type" } */
+/* { dg-options "-fpermissive -O -finline-functions -Wreturn-type" } */
extern int i;
extern int foo (void);
diff --git a/gcc/testsuite/gcc.dg/20030906-2a.c b/gcc/testsuite/gcc.dg/20030906-2a.c
new file mode 100644
index 0000000..a6ffbac
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/20030906-2a.c
@@ -0,0 +1,21 @@
+/* Bug 9862 -- Spurious warnings with -finline-functions.
+ Copyright (C) 2003 Free Software Foundation Inc. */
+
+/* { dg-do compile } */
+/* { dg-options "-O -finline-functions -Wreturn-type" } */
+
+extern int i;
+extern int foo (void);
+extern int bar (void);
+
+int foo (void)
+{
+ if( i ) return; /* { dg-error "'return' with no value, in function returning non-void" } */
+ else return 1;
+}
+
+int bar (void)
+{
+ if( i ) return 0;
+ else return 1;
+}
diff --git a/gcc/testsuite/gcc.dg/Wimplicit-function-declaration-c99-2.c b/gcc/testsuite/gcc.dg/Wimplicit-function-declaration-c99-2.c
new file mode 100644
index 0000000..d65abc9
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/Wimplicit-function-declaration-c99-2.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99" } */
+
+void f(void)
+{
+ puts("Hello"); /* { dg-error "implicit declaration of function" } */
+}
diff --git a/gcc/testsuite/gcc.dg/Wimplicit-function-declaration-c99.c b/gcc/testsuite/gcc.dg/Wimplicit-function-declaration-c99.c
index 254f7e7..abea8a5 100644
--- a/gcc/testsuite/gcc.dg/Wimplicit-function-declaration-c99.c
+++ b/gcc/testsuite/gcc.dg/Wimplicit-function-declaration-c99.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-std=c99" } */
+/* { dg-options "-fpermissive -std=c99" } */
void f(void)
{
diff --git a/gcc/testsuite/gcc.dg/Wimplicit-int-1.c b/gcc/testsuite/gcc.dg/Wimplicit-int-1.c
index 4a96e8f..fc7726c 100644
--- a/gcc/testsuite/gcc.dg/Wimplicit-int-1.c
+++ b/gcc/testsuite/gcc.dg/Wimplicit-int-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "" } */
+/* { dg-options "-fpermissive" } */
static l; /* { dg-warning "type defaults to" } */
diff --git a/gcc/testsuite/gcc.dg/Wimplicit-int-1a.c b/gcc/testsuite/gcc.dg/Wimplicit-int-1a.c
new file mode 100644
index 0000000..ef1835e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/Wimplicit-int-1a.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+static l; /* { dg-error "type defaults to" } */
+
+foo (a) /* { dg-error "return type defaults to" } */
+/* { dg-error "type of .a. defaults to .int." "type" { target *-*-* } .-1 } */
+{
+ auto p; /* { dg-error "type defaults to" } */
+ typedef bar; /* { dg-error "type defaults to" } */
+}
diff --git a/gcc/testsuite/gcc.dg/Wimplicit-int-4.c b/gcc/testsuite/gcc.dg/Wimplicit-int-4.c
index c9c6e8e..99c61a7 100644
--- a/gcc/testsuite/gcc.dg/Wimplicit-int-4.c
+++ b/gcc/testsuite/gcc.dg/Wimplicit-int-4.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-Wno-implicit -Wimplicit-int" } */
+/* { dg-options "-fpermissive -Wno-implicit -Wimplicit-int" } */
static l; /* { dg-warning "type defaults to" } */
diff --git a/gcc/testsuite/gcc.dg/Wimplicit-int-4a.c b/gcc/testsuite/gcc.dg/Wimplicit-int-4a.c
new file mode 100644
index 0000000..920a088
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/Wimplicit-int-4a.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-Wno-implicit -Wimplicit-int" } */
+
+static l; /* { dg-error "type defaults to" } */
+
+foo (a) /* { dg-error "return type defaults to" } */
+/* { dg-error "type of .a. defaults to .int." "type" { target *-*-* } .-1 } */
+{
+ auto p; /* { dg-error "type defaults to" } */
+ typedef bar; /* { dg-error "type defaults to" } */
+}
diff --git a/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-2.c b/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-2.c
index 19276bb..bbf983c 100644
--- a/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-2.c
+++ b/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "" } */
+/* { dg-options "-fpermissive" } */
void *
f1 (int flag, int *a, long *b)
diff --git a/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-5.c b/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-5.c
new file mode 100644
index 0000000..dcbfa47
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-5.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+void *
+f1 (int flag, int *a, long *b)
+{
+ return flag ? a : b; /* { dg-error "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" } */
+ /* { dg-note "first expression has type 'int \\*'" "" { target *-*-* } .-1 } */
+ /* { dg-note "second expression has type 'long int \\*'" "" { target *-*-* } .-2 } */
+}
diff --git a/gcc/testsuite/gcc.dg/Wint-conversion-2.c b/gcc/testsuite/gcc.dg/Wint-conversion-2.c
index bf590a7..101e792 100644
--- a/gcc/testsuite/gcc.dg/Wint-conversion-2.c
+++ b/gcc/testsuite/gcc.dg/Wint-conversion-2.c
@@ -1,7 +1,7 @@
/* PR middle-end/86202 - ICE in get_range_info calling an invalid memcpy()
declaration */
/* { dg-do compile } */
-/* { dg-options "-Wint-conversion" } */
+/* { dg-options "-fpermissive -Wint-conversion" } */
void *memcpy (void *, void *, __SIZE_TYPE__ *); /* { dg-warning "conflicting types for built-in function .memcpy." } */
void *a, *b;
diff --git a/gcc/testsuite/gcc.dg/Wint-conversion-3.c b/gcc/testsuite/gcc.dg/Wint-conversion-3.c
index 4e51476..4614c01 100644
--- a/gcc/testsuite/gcc.dg/Wint-conversion-3.c
+++ b/gcc/testsuite/gcc.dg/Wint-conversion-3.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "" } */
+/* { dg-options "-fpermissive" } */
const char *
f1 (int flag)
diff --git a/gcc/testsuite/gcc.dg/Wint-conversion-4.c b/gcc/testsuite/gcc.dg/Wint-conversion-4.c
new file mode 100644
index 0000000..6ded61a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/Wint-conversion-4.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+const char *
+f1 (int flag)
+{
+ return flag ? "" : 1; /* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+}
+
+const char *
+f2 (int flag)
+{
+ return flag ? 1 : ""; /* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+}
diff --git a/gcc/testsuite/gcc.dg/Wnonnull-4.c b/gcc/testsuite/gcc.dg/Wnonnull-4.c
index 1f14fbb..d63e76d 100644
--- a/gcc/testsuite/gcc.dg/Wnonnull-4.c
+++ b/gcc/testsuite/gcc.dg/Wnonnull-4.c
@@ -142,6 +142,7 @@ void test_fda_n_5 (int r_m1)
T ( 1); // { dg-bogus "argument 2 of variable length array 'double\\\[n]\\\[5]' is null but the corresponding bound argument 1 value is 1" }
T ( 9); // { dg-bogus "argument 2 of variable length array 'double\\\[n]\\\[5]' is null but the corresponding bound argument 1 value is 9" }
T (max); // { dg-bogus "argument 2 of variable length array 'double\\\[n]\\\[5]' is null but the corresponding bound argument 1 value is \\d+" }
+// { dg-warning "size 4294967256 exceeds maximum object size" "" { target ilp32 } .-1 }
}
diff --git a/gcc/testsuite/gcc.dg/Wreturn-mismatch-1.c b/gcc/testsuite/gcc.dg/Wreturn-mismatch-1.c
index 3bad847..aef6782 100644
--- a/gcc/testsuite/gcc.dg/Wreturn-mismatch-1.c
+++ b/gcc/testsuite/gcc.dg/Wreturn-mismatch-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "" } */
+/* { dg-options "-fpermissive" } */
void f1 (void);
diff --git a/gcc/testsuite/gcc.dg/Wreturn-mismatch-1a.c b/gcc/testsuite/gcc.dg/Wreturn-mismatch-1a.c
new file mode 100644
index 0000000..70c7c9d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/Wreturn-mismatch-1a.c
@@ -0,0 +1,40 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+void f1 (void);
+
+int
+f2 (void)
+{
+ f1 ();
+}
+
+static inline int
+f3 (void)
+{
+ f1 ();
+}
+
+void
+f4 (void)
+{
+ return 1; /* { dg-error "'return' with a value\[^\n\r\]*-Wreturn-mismatch" } */
+}
+
+void
+f5 (void)
+{
+ return f1 (); /* { dg-bogus "ISO C" } */
+}
+
+int
+f6 (void)
+{
+ return; /* { dg-error "'return' with no value\[^\n\r\]*-Wreturn-mismatch" } */
+}
+
+int
+f7 (void)
+{
+ return f1 (); /* { dg-error "void value not ignored as it ought to be" } */
+}
diff --git a/gcc/testsuite/gcc.dg/Wreturn-mismatch-2.c b/gcc/testsuite/gcc.dg/Wreturn-mismatch-2.c
index 49eb5a5..0881102 100644
--- a/gcc/testsuite/gcc.dg/Wreturn-mismatch-2.c
+++ b/gcc/testsuite/gcc.dg/Wreturn-mismatch-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-Wall" } */
+/* { dg-options "-fpermissive -Wall" } */
void f1 (void);
diff --git a/gcc/testsuite/gcc.dg/Wreturn-mismatch-2a.c b/gcc/testsuite/gcc.dg/Wreturn-mismatch-2a.c
new file mode 100644
index 0000000..836651e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/Wreturn-mismatch-2a.c
@@ -0,0 +1,41 @@
+/* { dg-do compile } */
+/* { dg-options "-Wall" } */
+
+void f1 (void);
+
+int
+f2 (void)
+{
+ f1 ();
+} /* { dg-warning "control reaches end of non-void\[^\n\r\]*-Wreturn-type" } */
+
+static inline int
+f3 (void)
+{
+ f1 ();
+} /* { dg-warning "no return statement in function\[^\n\r\]*-Wreturn-type" } */
+
+void
+f4 (void)
+{
+ return 1; /* { dg-error "with a value,\[^\n\r\]*-Wreturn-mismatch" } */
+}
+
+void
+f5 (void)
+{
+ return f1 ();
+}
+
+int
+f6 (void)
+{
+ return; /* { dg-error "with no value,\[^\n\r\]*Wreturn-mismatch" } */
+}
+
+int
+f7 (void)
+{
+ return f1 (); /* { dg-error "void value not ignored as it ought to be" } */
+} /* { dg-warning "control reaches end of non-void\[^\n\r\]*-Wreturn-type" } */
+
diff --git a/gcc/testsuite/gcc.dg/analyzer/fd-accept.c b/gcc/testsuite/gcc.dg/analyzer/fd-accept.c
index cce9555..d07ab15 100644
--- a/gcc/testsuite/gcc.dg/analyzer/fd-accept.c
+++ b/gcc/testsuite/gcc.dg/analyzer/fd-accept.c
@@ -65,7 +65,7 @@ int test_accept_on_accept (int fd_a)
if (fd_b == -1)
return -1;
- int fd_c = accept (fd_b, NULL, 0); /* { dg-warning "'accept' on file descriptor 'fd_b' in wrong phase \\\[-Wanalyzer-fd-phase-mismatch\\\]" "warning" } */
+ int fd_c = accept (fd_b, NULL, 0); /* { dg-warning "'accept' on file descriptor 'fd_b' in wrong phase \\\[CWE-666\\\] \\\[-Wanalyzer-fd-phase-mismatch\\\]" "warning" } */
/* { dg-message "'accept' expects a listening stream socket file descriptor but 'fd_b' is connected" "final event" { target *-*-* } .-1 } */
return fd_b;
diff --git a/gcc/testsuite/gcc.dg/analyzer/fd-bind.c b/gcc/testsuite/gcc.dg/analyzer/fd-bind.c
index 2a5cee5..2f69841 100644
--- a/gcc/testsuite/gcc.dg/analyzer/fd-bind.c
+++ b/gcc/testsuite/gcc.dg/analyzer/fd-bind.c
@@ -35,7 +35,7 @@ void test_double_bind (int fd, const char *sockname)
addr.sun_family = AF_UNIX;
strncpy (addr.sun_path, sockname, sizeof(addr.sun_path) - 1);
bind (fd, (struct sockaddr *)&addr, sizeof (addr));
- bind (fd, (struct sockaddr *)&addr, sizeof (addr)); /* { dg-warning "'bind' on file descriptor 'fd' in wrong phase \\\[-Wanalyzer-fd-phase-mismatch\\\]" "warning" } */
+ bind (fd, (struct sockaddr *)&addr, sizeof (addr)); /* { dg-warning "'bind' on file descriptor 'fd' in wrong phase \\\[CWE-666\\\] \\\[-Wanalyzer-fd-phase-mismatch\\\]" "warning" } */
/* { dg-message "'bind' expects a new socket file descriptor but 'fd' has already been bound" "final event" { target *-*-* } .-1 } */
}
@@ -71,7 +71,7 @@ void test_bind_after_accept (int fd, const char *sockname)
memset (&addr, 0, sizeof (addr));
addr.sun_family = AF_UNIX;
strncpy (addr.sun_path, sockname, sizeof(addr.sun_path) - 1);
- bind (afd, (struct sockaddr *)&addr, sizeof (addr)); /* { dg-warning "'bind' on file descriptor 'afd' in wrong phase \\\[-Wanalyzer-fd-phase-mismatch\\\]" "warning" } */
+ bind (afd, (struct sockaddr *)&addr, sizeof (addr)); /* { dg-warning "'bind' on file descriptor 'afd' in wrong phase \\\[CWE-666\\\] \\\[-Wanalyzer-fd-phase-mismatch\\\]" "warning" } */
/* { dg-message "'bind' expects a new socket file descriptor but 'afd' is already connected" "final event" { target *-*-* } .-1 } */
close (afd);
diff --git a/gcc/testsuite/gcc.dg/analyzer/fd-socket-misuse.c b/gcc/testsuite/gcc.dg/analyzer/fd-socket-misuse.c
index 87e8967..9149486 100644
--- a/gcc/testsuite/gcc.dg/analyzer/fd-socket-misuse.c
+++ b/gcc/testsuite/gcc.dg/analyzer/fd-socket-misuse.c
@@ -18,7 +18,7 @@ void test_read_on_new_socket (void *buf)
int fd = socket (AF_UNIX, SOCK_STREAM, 0); /* { dg-message "stream socket created here" } */
if (fd == -1)
return;
- read (fd, buf, 1); /* { dg-warning "'read' on file descriptor 'fd' in wrong phase \\\[-Wanalyzer-fd-phase-mismatch\\\]" "warning" } */
+ read (fd, buf, 1); /* { dg-warning "'read' on file descriptor 'fd' in wrong phase \\\[CWE-666\\\] \\\[-Wanalyzer-fd-phase-mismatch\\\]" "warning" } */
/* { dg-message "'read' expects a stream socket to be connected via 'accept' but 'fd' has not yet been bound" "final event" { target *-*-* } .-1 } */
close (fd);
}
diff --git a/gcc/testsuite/gcc.dg/anon-struct-11.c b/gcc/testsuite/gcc.dg/anon-struct-11.c
index c2f85fc..622fb7c 100644
--- a/gcc/testsuite/gcc.dg/anon-struct-11.c
+++ b/gcc/testsuite/gcc.dg/anon-struct-11.c
@@ -1,8 +1,7 @@
/* { dg-do compile } */
-/* No special options--in particular, turn off the default
- -pedantic-errors option. */
-/* { dg-options "" } */
+/* Also turn off the default -pedantic-errors option. */
+/* { dg-options "-fpermissive" } */
/* When not using -fplan9-extensions, we don't support automatic
conversion of pointer types, and we don't support referring to a
diff --git a/gcc/testsuite/gcc.dg/anon-struct-11a.c b/gcc/testsuite/gcc.dg/anon-struct-11a.c
new file mode 100644
index 0000000..9ee3bb6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/anon-struct-11a.c
@@ -0,0 +1,111 @@
+/* { dg-do compile } */
+
+/* No special options--in particular, turn off the default
+ -pedantic-errors option. */
+/* { dg-options "" } */
+
+/* When not using -fplan9-extensions, we don't support automatic
+ conversion of pointer types, and we don't support referring to a
+ typedef name directly. */
+
+extern void exit (int);
+extern void abort (void);
+
+struct A { char a; };
+
+struct B {
+ char b;
+ struct A; /* { dg-warning "does not declare anything" } */
+ char c;
+};
+
+void
+f1 (struct A *p) /* { dg-message "expected" } */
+{
+ p->a = 1;
+}
+
+void
+test1 (void)
+{
+ struct B b;
+ struct A *p;
+
+ b.b = 2;
+ b.c = 3;
+ f1 (&b); /* { dg-error "incompatible pointer type" } */
+ if (b.a != 1) /* { dg-error "no member" } */
+ abort ();
+ if (b.b != 2 || b.c != 3)
+ abort ();
+ p = &b; /* { dg-error "incompatible pointer type" } */
+ if (p->a != 1)
+ abort ();
+}
+
+typedef struct { char d; } D;
+
+struct E {
+ char b;
+ struct F { char f; }; /* { dg-warning "does not declare anything" } */
+ char c;
+ union {
+ D; /* { dg-warning "does not declare anything" } */
+ };
+ char e;
+};
+
+void
+f2 (struct F *p) /* { dg-message "expected" } */
+{
+ p->f = 6;
+}
+
+void
+f3 (D *p) /* { dg-message "expected" } */
+{
+ p->d = 4;
+}
+
+void
+f4 (D d)
+{
+}
+
+void
+test2 (void)
+{
+ struct E e;
+ struct F *pf;
+ D *pd;
+ D d;
+
+ e.b = 2;
+ e.c = 3;
+ e.e = 5;
+ f2 (&e); /* { dg-error "incompatible pointer type" } */
+ f3 (&e); /* { dg-error "incompatible pointer type" } */
+ if (e.d != 4) /* { dg-error "no member" } */
+ abort ();
+ if (e.f != 6) /* { dg-error "no member" } */
+ abort ();
+ if (e.b != 2 || e.c != 3 || e.e != 5)
+ abort ();
+ pf = &e; /* { dg-error "incompatible pointer type" } */
+ if (pf->f != 6)
+ abort ();
+ pd = &e; /* { dg-error "incompatible pointer type" } */
+ if (pd->d != 4)
+ abort ();
+ d = e.D; /* { dg-error "no member" } */
+ f3 (&e.D); /* { dg-error "no member" } */
+ f4 (e.D); /* { dg-error "no member" } */
+}
+
+int
+main ()
+{
+ test1 ();
+ test2 ();
+ exit (0);
+}
diff --git a/gcc/testsuite/gcc.dg/anon-struct-13.c b/gcc/testsuite/gcc.dg/anon-struct-13.c
index 6a50814..de478b9 100644
--- a/gcc/testsuite/gcc.dg/anon-struct-13.c
+++ b/gcc/testsuite/gcc.dg/anon-struct-13.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-fplan9-extensions" } */
+/* { dg-options "-fpermissive -fplan9-extensions" } */
/* Test for ambiguity when using the Plan 9 extensions. */
diff --git a/gcc/testsuite/gcc.dg/anon-struct-13a.c b/gcc/testsuite/gcc.dg/anon-struct-13a.c
new file mode 100644
index 0000000..9e6c5d9
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/anon-struct-13a.c
@@ -0,0 +1,76 @@
+/* { dg-do compile } */
+/* { dg-options "-fplan9-extensions" } */
+
+/* Test for ambiguity when using the Plan 9 extensions. */
+
+struct A {
+ char a; /* { dg-error "duplicate member" } */
+};
+
+struct B
+{
+ struct A;
+ struct A;
+};
+
+char
+f1 (struct B *p)
+{
+ return p->a; /* { dg-error "no member" } */
+}
+
+void
+f2 (struct A *p) /* { dg-message "expected" } */
+{
+}
+
+void
+f3 (struct B *p)
+{
+ f2 (p); /* { dg-error "incompatible pointer type" } */
+}
+
+struct C
+{
+ char c; /* { dg-error "duplicate member" } */
+};
+
+struct D
+{
+ struct C;
+};
+
+struct E
+{
+ struct C;
+ struct D;
+};
+
+char
+f4 (struct E *p)
+{
+ return p->c; /* { dg-error "no member" } */
+}
+
+void
+f6 (struct C *p) /* { dg-message "expected" } */
+{
+}
+
+void
+f7 (struct E *p)
+{
+ f6 (p); /* { dg-error "incompatible pointer type" } */
+}
+
+struct A
+f8 (struct B *p)
+{
+ return p->A; /* { dg-error "no member" } */
+}
+
+struct C
+f9 (struct E *p)
+{
+ return p->C; /* { dg-error "no member" } */
+}
diff --git a/gcc/testsuite/gcc.dg/assign-warn-1.c b/gcc/testsuite/gcc.dg/assign-warn-1.c
index 3650257..c483276 100644
--- a/gcc/testsuite/gcc.dg/assign-warn-1.c
+++ b/gcc/testsuite/gcc.dg/assign-warn-1.c
@@ -1,7 +1,7 @@
/* Test diagnostics for bad implicit type conversions. */
/* Origin: Joseph Myers <jsm@polyomino.org.uk> */
/* { dg-do compile } */
-/* { dg-options "-pedantic -ftrack-macro-expansion=0" } */
+/* { dg-options "-pedantic -fpermissive -ftrack-macro-expansion=0" } */
#define TESTARG(ID, TL, TR) void ID##F(TL); void ID##F2(TR x) { ID##F(x); } extern int dummy
#define TESTARP(ID, TL, TR) struct { void (*x)(TL); } ID##Fp; void ID##F2(TR x) { ID##Fp.x(x); } extern int dummy
diff --git a/gcc/testsuite/gcc.dg/assign-warn-4.c b/gcc/testsuite/gcc.dg/assign-warn-4.c
new file mode 100644
index 0000000..da834f7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/assign-warn-4.c
@@ -0,0 +1,21 @@
+/* Test diagnostics for bad implicit type conversions. Error variant. */
+/* { dg-do compile } */
+/* { dg-options "-ftrack-macro-expansion=0" } */
+
+#define TESTARG(ID, TL, TR) void ID##F(TL); void ID##F2(TR x) { ID##F(x); } extern int dummy
+#define TESTARP(ID, TL, TR) struct { void (*x)(TL); } ID##Fp; void ID##F2(TR x) { ID##Fp.x(x); } extern int dummy
+#define TESTASS(ID, TL, TR) void ID##F(TR x) { TL y; y = x; } extern int dummy
+#define TESTINI(ID, TL, TR) void ID##F(TR x) { TL y = x; } extern int dummy
+#define TESTRET(ID, TL, TR) TR ID##V; TL ID##F(void) { return ID##V; } extern int dummy
+
+TESTARG(ciia, char *, int); /* { dg-error "passing argument 1 of 'ciiaF' makes pointer from integer without a cast" } */
+TESTARP(ciib, char *, int); /* { dg-error "passing argument 1 of 'ciibFp.x' makes pointer from integer without a cast" } */
+TESTASS(ciic, char *, int); /* { dg-error "assignment to 'char \\*' from 'int' makes pointer from integer without a cast" } */
+TESTINI(ciid, char *, int); /* { dg-error "initialization of 'char \\*' from 'int' makes pointer from integer without a cast" } */
+TESTRET(ciie, char *, int); /* { dg-error "returning 'int' from a function with return type 'char \\*' makes pointer from integer without a cast" } */
+
+TESTARG(iica, int, char *); /* { dg-error "passing argument 1 of 'iicaF' makes integer from pointer without a cast" } */
+TESTARP(iicb, int, char *); /* { dg-error "passing argument 1 of 'iicbFp.x' makes integer from pointer without a cast" } */
+TESTASS(iicc, int, char *); /* { dg-error "assignment to 'int' from 'char \\*' makes integer from pointer without a cast" } */
+TESTINI(iicd, int, char *); /* { dg-error "initialization of 'int' from 'char \\*' makes integer from pointer without a cast" } */
+TESTRET(iice, int, char *); /* { dg-error "returning 'char \\*' from a function with return type 'int' makes integer from pointer without a cast" } */
diff --git a/gcc/testsuite/gcc.dg/bitint-41.c b/gcc/testsuite/gcc.dg/bitint-41.c
index d87ea08..f97f03c 100644
--- a/gcc/testsuite/gcc.dg/bitint-41.c
+++ b/gcc/testsuite/gcc.dg/bitint-41.c
@@ -1,6 +1,6 @@
/* PR middle-end/112336 */
/* { dg-do compile { target bitint } } */
-/* { dg-options "-std=c2x" } */
+/* { dg-options "-std=c23" } */
unsigned _BitInt(1) v1;
unsigned _BitInt(1) *p1 = &v1;
diff --git a/gcc/testsuite/gcc.dg/bitint-43.c b/gcc/testsuite/gcc.dg/bitint-43.c
new file mode 100644
index 0000000..3a506b3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitint-43.c
@@ -0,0 +1,19 @@
+/* PR tree-optimization/112719 */
+/* { dg-do compile { target bitint } } */
+/* { dg-options "-O2" } */
+
+#if __BITINT_MAXWIDTH__ >= 252
+int
+foo (unsigned _BitInt(239) x, unsigned _BitInt(252) y)
+{
+ x &= 0x2aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaauwb;
+ y &= 0x555555555555555555555555555555555555555555555555555555555555555uwb;
+ return __builtin_popcountg (x) + __builtin_popcountg (y);
+}
+
+int
+bar (unsigned _BitInt(239) x, unsigned _BitInt(252) y)
+{
+ return __builtin_parityg (x) ^ __builtin_parityg (y);
+}
+#endif
diff --git a/gcc/testsuite/gcc.dg/bitint-44.c b/gcc/testsuite/gcc.dg/bitint-44.c
new file mode 100644
index 0000000..d1f34d0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitint-44.c
@@ -0,0 +1,10 @@
+/* PR middle-end/112771 */
+/* { dg-do compile { target bitint575 } } */
+/* { dg-options "-std=c23" } */
+
+_BitInt(575)
+foo (_BitInt(575) a)
+{
+ a /= 0; /* { dg-warning "division by zero" } */
+ return a;
+}
diff --git a/gcc/testsuite/gcc.dg/bitint-45.c b/gcc/testsuite/gcc.dg/bitint-45.c
new file mode 100644
index 0000000..4dfb92d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitint-45.c
@@ -0,0 +1,11 @@
+/* PR middle-end/112770 */
+/* { dg-do compile { target bitint128 } } */
+/* { dg-options "-std=c23 -fnon-call-exceptions" } */
+
+void
+foo (void)
+{
+ _BitInt(128) a = 0;
+ a /= 0; /* { dg-warning "division by zero" } */
+ &a;
+}
diff --git a/gcc/testsuite/gcc.dg/bitint-46.c b/gcc/testsuite/gcc.dg/bitint-46.c
new file mode 100644
index 0000000..4e50337
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitint-46.c
@@ -0,0 +1,32 @@
+/* PR middle-end/112807 */
+/* { dg-do compile { target bitint } } */
+/* { dg-options "-std=gnu23 -O2" } */
+
+#if __BITINT_MAXWIDTH__ >= 256
+__attribute__((noipa)) int
+foo (_BitInt (256) a, _BitInt (2) b)
+{
+ if (a < 0 || a > ~0U)
+ return -1;
+ return __builtin_sub_overflow_p (a, b, 0);
+}
+#endif
+
+int
+main ()
+{
+#if __BITINT_MAXWIDTH__ >= 256
+ if (foo (-5wb, 1wb) != -1
+ || foo (1 + (_BitInt (256)) ~0U, -2) != -1
+ || foo (0, 0) != 0
+ || foo (0, 1) != 0
+ || foo (0, -1) != 0
+ || foo (~0U, 0) != 1
+ || foo (__INT_MAX__, 0) != 0
+ || foo (__INT_MAX__, -1) != 1
+ || foo (1 + (_BitInt (256)) __INT_MAX__, 0) != 1
+ || foo (1 + (_BitInt (256)) __INT_MAX__, 1) != 0
+ || foo (1 + (_BitInt (256)) __INT_MAX__, -2) != 1)
+ __builtin_abort ();
+#endif
+}
diff --git a/gcc/testsuite/gcc.dg/bitint-47.c b/gcc/testsuite/gcc.dg/bitint-47.c
new file mode 100644
index 0000000..d5082ba
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitint-47.c
@@ -0,0 +1,13 @@
+/* PR tree-optimization/112843 */
+/* { dg-do compile { target bitint } } */
+/* { dg-options "-O2" } */
+
+#if __BITINT_MAXWIDTH__ >= 256
+_BitInt (256)
+foo (_BitInt (128) x, _BitInt (256) y)
+{
+ return x * 5 * y;
+}
+#else
+int x;
+#endif
diff --git a/gcc/testsuite/gcc.dg/bitint-48.c b/gcc/testsuite/gcc.dg/bitint-48.c
new file mode 100644
index 0000000..8701ebb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitint-48.c
@@ -0,0 +1,23 @@
+/* PR tree-optimization/112809 */
+/* { dg-do compile { target bitint } } */
+/* { dg-options "-O2" } */
+
+#if __BITINT_MAXWIDTH__ >= 512
+_BitInt (512) a;
+_BitInt (256) b;
+_BitInt (256) c;
+
+int
+foo (void)
+{
+ return a == (b | c);
+}
+
+void
+bar (void)
+{
+ a /= b - 2;
+}
+#else
+int i;
+#endif
diff --git a/gcc/testsuite/gcc.dg/builtin-arith-overflow-4.c b/gcc/testsuite/gcc.dg/builtin-arith-overflow-4.c
index ab7d82a..4c2c89f 100644
--- a/gcc/testsuite/gcc.dg/builtin-arith-overflow-4.c
+++ b/gcc/testsuite/gcc.dg/builtin-arith-overflow-4.c
@@ -1,6 +1,6 @@
/* PR c/90628 */
/* { dg-do compile } */
-/* { dg-options "" } */
+/* { dg-options "-fpermissive" } */
_Atomic int a = 1, b = 2, c = 3;
_Atomic long d = 4, e = 5, f = 6;
diff --git a/gcc/testsuite/gcc.dg/builtin-arith-overflow-4a.c b/gcc/testsuite/gcc.dg/builtin-arith-overflow-4a.c
new file mode 100644
index 0000000..c021cab
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/builtin-arith-overflow-4a.c
@@ -0,0 +1,43 @@
+/* PR c/90628 */
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+_Atomic int a = 1, b = 2, c = 3;
+_Atomic long d = 4, e = 5, f = 6;
+_Atomic long long g = 7, h = 8, i = 9;
+
+void
+f1 ()
+{
+ __builtin_add_overflow (a, b, &c); /* { dg-error "argument 3 in call to function '__builtin_add_overflow' has pointer to '_Atomic' type" } */
+}
+
+void
+f2 ()
+{
+ __builtin_sub_overflow (d, e, &f); /* { dg-error "argument 3 in call to function '__builtin_sub_overflow' has pointer to '_Atomic' type" } */
+}
+
+void
+f3 ()
+{
+ __builtin_mul_overflow (g, h, &i); /* { dg-error "argument 3 in call to function '__builtin_mul_overflow' has pointer to '_Atomic' type" } */
+}
+
+void
+f4 ()
+{
+ __builtin_sadd_overflow (a, b, &c); /* { dg-error "passing argument 3 of '__builtin_sadd_overflow' from incompatible pointer type" } */
+}
+
+void
+f5 ()
+{
+ __builtin_ssubl_overflow (d, e, &f); /* { dg-error "passing argument 3 of '__builtin_ssubl_overflow' from incompatible pointer type" } */
+}
+
+void
+f6 ()
+{
+ __builtin_smulll_overflow (g, h, &i); /* { dg-error "passing argument 3 of '__builtin_smulll_overflow' from incompatible pointer type" } */
+}
diff --git a/gcc/testsuite/gcc.dg/c23-qual-4.c b/gcc/testsuite/gcc.dg/c23-qual-4.c
index 8a7a9f2..a8538de 100644
--- a/gcc/testsuite/gcc.dg/c23-qual-4.c
+++ b/gcc/testsuite/gcc.dg/c23-qual-4.c
@@ -83,9 +83,9 @@ void test(void)
(void)(1 ? x0 : z0);
(void)(1 ? x1 : z1);
(void)(1 ? x2 : z2);
- (void)(1 ? x0 : x1); /* { dg-warning "pointer type mismatch in conditional expression" } */
- (void)(1 ? x1 : x2); /* { dg-warning "pointer type mismatch in conditional expression" } */
- (void)(1 ? x2 : x0); /* { dg-warning "pointer type mismatch in conditional expression" } */
+ (void)(1 ? x0 : x1); /* { dg-error "pointer type mismatch in conditional expression" } */
+ (void)(1 ? x1 : x2); /* { dg-error "pointer type mismatch in conditional expression" } */
+ (void)(1 ? x2 : x0); /* { dg-error "pointer type mismatch in conditional expression" } */
v0p = (1 ? z0 : v0p); /* { dg-warning "assignment discards 'const' qualifier from pointer target type" } */
v1p = (1 ? z1 : v1p); /* { dg-warning "assignment discards 'const' qualifier from pointer target type" } */
v2p = (1 ? z2 : v2p); /* { dg-warning "assignment discards 'const' qualifier from pointer target type" } */
diff --git a/gcc/testsuite/gcc.dg/cpp/expr.c b/gcc/testsuite/gcc.dg/cpp/expr.c
index 532bd68..055e17a 100644
--- a/gcc/testsuite/gcc.dg/cpp/expr.c
+++ b/gcc/testsuite/gcc.dg/cpp/expr.c
@@ -1,6 +1,7 @@
/* Copyright (C) 2000, 2001 Free Software Foundation, Inc. */
/* { dg-do preprocess } */
+/* { dg-additional-options "-Wall" } */
/* Test we get signedness of ?: operator correct. We would skip
evaluation of one argument, and might therefore not transfer its
@@ -8,10 +9,27 @@
/* Neil Booth, 19 Jul 2002. */
-#if (1 ? -2: 0 + 1U) < 0
+#if (1 ? -2: 0 + 1U) < 0 /* { dg-warning {the left operand of ":" changes sign} } */
#error /* { dg-bogus "error" } */
#endif
-#if (0 ? 0 + 1U: -2) < 0
+#if (0 ? 0 + 1U: -2) < 0 /* { dg-warning {the right operand of ":" changes sign} } */
#error /* { dg-bogus "error" } */
#endif
+
+/* PR preprocessor/112701 */
+#if (0 ? 0/0u : -1) < 0 /* { dg-warning {the right operand of ":" changes sign} } */
+#error /* { dg-bogus "error" } */
+#endif
+
+#if (0 ? 0u/0 : -1) < 0 /* { dg-warning {the right operand of ":" changes sign} } */
+#error /* { dg-bogus "error" } */
+#endif
+
+#if (1 ? -1 : 0/0u) < 0 /* { dg-warning {the left operand of ":" changes sign} } */
+#error /* { dg-bogus "error" } */
+#endif
+
+#if (1 ? -1 : 0u/0) < 0 /* { dg-warning {the left operand of ":" changes sign} } */
+#error /* { dg-bogus "error" } */
+#endif
diff --git a/gcc/testsuite/gcc.dg/debug/btf/btf-datasec-3.c b/gcc/testsuite/gcc.dg/debug/btf/btf-datasec-3.c
new file mode 100644
index 0000000..297340c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/debug/btf/btf-datasec-3.c
@@ -0,0 +1,28 @@
+/* PR debug/112849
+ Test that we do not incorrectly create BTF_KIND_DATASEC entries for
+ extern decls with no known section. */
+
+/* { dg-do compile } */
+/* { dg-options "-O0 -gbtf -dA" } */
+
+extern int VERSION __attribute__((section (".version")));
+
+extern int test_bss1;
+extern int test_data1;
+
+int test_bss2;
+int test_data2 = 2;
+
+int
+foo (void)
+{
+ test_bss2 = VERSION;
+ return test_bss1 + test_data1 + test_data2;
+}
+
+/* There should be 3 DATASEC entries total. Of the extern decls, only VERSION
+ has a known section; entries are not created for the other two. */
+/* { dg-final { scan-assembler-times "bts_type" 3 } } */
+/* { dg-final { scan-assembler-times "bts_type: \\(BTF_KIND_VAR 'test_data2'\\)" 1 } } */
+/* { dg-final { scan-assembler-times "bts_type: \\(BTF_KIND_VAR 'test_bss2'\\)" 1 } } */
+/* { dg-final { scan-assembler-times "bts_type: \\(BTF_KIND_VAR 'VERSION'\\)" 1 } } */
diff --git a/gcc/testsuite/gcc.dg/debug/btf/btf-enum-small.c b/gcc/testsuite/gcc.dg/debug/btf/btf-enum-small.c
new file mode 100644
index 0000000..eb8a1bd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/debug/btf/btf-enum-small.c
@@ -0,0 +1,28 @@
+/* Test BTF generation for small enums. */
+
+/* { dg-do compile } */
+/* { dg-options "-O2 -gbtf -dA" } */
+
+/* { dg-final { scan-assembler-not "bte_value_lo32" } } */
+/* { dg-final { scan-assembler-not "bte_value_hi32" } } */
+/* { dg-final { scan-assembler-times "\[\t \]0x6000002\[\t \]+\[^\n\]*btt_info" 1 } } */
+/* { dg-final { scan-assembler-times " ENUM_CONST 'eSMALL' idx=0" 1 } } */
+/* { dg-final { scan-assembler-times " ENUM_CONST 'eSMALLY' idx=1" 1 } } */
+/* { dg-final { scan-assembler-times "ascii \"eSMALL.0\"\[\t \]+\[^\n\]*btf_string" 1 } } */
+/* { dg-final { scan-assembler-times "ascii \"eSMALLY.0\"\[\t \]+\[^\n\]*btf_string" 1 } } */
+/* { dg-final { scan-assembler-times "bte_value" 2 } } */
+
+enum smalled_enum
+{
+ eSMALL,
+ eSMALLY,
+} __attribute__((mode(byte)));
+
+struct root_struct {
+ enum smalled_enum esmall;
+};
+
+enum smalled_enum
+foo(struct root_struct *root) {
+ return root->esmall;
+}
diff --git a/gcc/testsuite/gcc.dg/debug/btf/btf-function-6.c b/gcc/testsuite/gcc.dg/debug/btf/btf-function-6.c
index e014d99..802bc6d 100644
--- a/gcc/testsuite/gcc.dg/debug/btf/btf-function-6.c
+++ b/gcc/testsuite/gcc.dg/debug/btf/btf-function-6.c
@@ -6,8 +6,8 @@
/* { dg-do compile } */
/* { dg-options "-O0 -gbtf -dA" } */
-/* { dg-final { scan-assembler-times " BTF_KIND_FUNC\[^\\r\\n\]*\[\\r\\n\]+\[^\\r\\n\]*linkage=2\[\\r\\n\]+\[^\\r\\n\]*\\(BTF_KIND_FUNC_PROTO 'extfunc'" 1 } } */
-/* { dg-final { scan-assembler-times " BTF_KIND_FUNC\[^\\r\\n\]*\[\\r\\n\]+\[^\\r\\n\]*linkage=1\[\\r\\n\]+\[^\\r\\n\]*\\(BTF_KIND_FUNC_PROTO 'foo'" 1 } } */
+/* { dg-final { scan-assembler-times " BTF_KIND_FUNC\[^\\r\\n\]*\[\\r\\n\]+\[^\\r\\n\]*linkage=2\[\\r\\n\]+\[^\\r\\n\]*\\(BTF_KIND_FUNC_PROTO ''" 1 } } */
+/* { dg-final { scan-assembler-times " BTF_KIND_FUNC\[^\\r\\n\]*\[\\r\\n\]+\[^\\r\\n\]*linkage=1\[\\r\\n\]+\[^\\r\\n\]*\\(BTF_KIND_FUNC_PROTO ''" 1 } } */
extern int extfunc(int a, int b);
diff --git a/gcc/testsuite/gcc.dg/debug/btf/btf-function-7.c b/gcc/testsuite/gcc.dg/debug/btf/btf-function-7.c
new file mode 100644
index 0000000..b560dc7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/debug/btf/btf-function-7.c
@@ -0,0 +1,19 @@
+/* Test BTF for inlined functions.
+
+ See PR/112656 - btf: function prototypes generated with name
+ BTF_KIND_FUNC_PROTO must be anonymous. */
+
+/* { dg-do compile } */
+/* { dg-options "-O2 -gbtf -dA" } */
+
+/* { dg-final { scan-assembler-times "BTF_KIND_FUNC_PROTO ''\\(\[0-9a-z\]*\\)'" 0 } } */
+
+static int log_event(const char *event_name, void *dev_ptr)
+{
+ return 666;
+}
+
+int foo ()
+{
+ return log_event ("foobar", ((void *)0));
+}
diff --git a/gcc/testsuite/gcc.dg/dfp/composite-type-2.c b/gcc/testsuite/gcc.dg/dfp/composite-type-2.c
new file mode 100644
index 0000000..5c6d95c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/dfp/composite-type-2.c
@@ -0,0 +1,58 @@
+/* { dg-do compile } */
+/* { dg-options "-O -Wall -ftrack-macro-expansion=0" } */
+
+/* C99 6.2.7: Compatible type and composite type. */
+
+#define DECIMAL_COMPOSITE_DECL(TYPE) \
+ _Decimal##TYPE g1_##TYPE(); \
+ _Decimal##TYPE g2_##TYPE(); \
+ _Decimal##TYPE (*h1_##TYPE)[2]; \
+ _Decimal##TYPE (*h2_##TYPE)[3]; \
+ _Decimal##TYPE (*h3_##TYPE)[4]; \
+ _Decimal##TYPE f1_##TYPE(_Decimal##TYPE(*)()); \
+ _Decimal##TYPE f1_##TYPE(_Decimal##TYPE(*)(_Decimal##TYPE*)); \
+ _Decimal##TYPE f1_##TYPE (_Decimal##TYPE(*g)(_Decimal##TYPE*)) \
+ { \
+ _Decimal##TYPE d##TYPE; \
+ d##TYPE = ((_Decimal##TYPE (*) (_Decimal##TYPE*)) g)(&d##TYPE); \
+ d##TYPE = ((_Decimal##TYPE (*) ()) g); \
+ return d##TYPE; \
+ } \
+ _Decimal##TYPE f2_##TYPE(_Decimal##TYPE(*)[]); \
+ _Decimal##TYPE f2_##TYPE(_Decimal##TYPE(*)[3]);
+
+#define DECIMAL_COMPOSITE_TEST(TYPE) \
+do \
+{ \
+ _Decimal##TYPE d##TYPE; \
+ d##TYPE = f1_##TYPE(g1_##TYPE); \
+ d##TYPE = f1_##TYPE(g2_##TYPE); \
+ d##TYPE = f2_##TYPE(h1_##TYPE); \
+ d##TYPE = f2_##TYPE(h2_##TYPE); \
+ d##TYPE = f2_##TYPE(h3_##TYPE); \
+ (void) d##TYPE; \
+} while(0)
+
+DECIMAL_COMPOSITE_DECL(32); /* { dg-error "incompatible types when assigning to type '\[^\n\]*' from type '\[^\n\]*'" } */
+/* { dg-message "note: expected '\[^'\n\]*' but argument is of type '\[^'\n\]*'" "note: expected" { target *-*-* } .-1 } */
+
+
+DECIMAL_COMPOSITE_DECL(64); /* { dg-error "incompatible types when assigning to type '\[^\n\]*' from type '\[^\n\]*'" } */
+/* { dg-message "note: expected '\[^'\n\]*' but argument is of type '\[^'\n\]*'" "note: expected" { target *-*-* } .-1 } */
+
+
+DECIMAL_COMPOSITE_DECL(128); /* { dg-error "incompatible types when assigning to type '\[^\n\]*' from type '\[^\n\]*'" } */
+/* { dg-message "note: expected '\[^'\n\]*' but argument is of type '\[^'\n\]*'" "note: expected" { target *-*-* } .-1 } */
+
+
+int main()
+{
+ DECIMAL_COMPOSITE_TEST(32); /* { dg-error "incompatible pointer type" } */
+ DECIMAL_COMPOSITE_TEST(64); /* { dg-error "incompatible pointer type" } */
+ DECIMAL_COMPOSITE_TEST(128); /* { dg-error "incompatible pointer type" } */
+
+ return 0;
+}
+
+/* The invalid function redeclarations might also trigger:
+ { dg-prune-output "-Warray-parameter" } */
diff --git a/gcc/testsuite/gcc.dg/dfp/composite-type.c b/gcc/testsuite/gcc.dg/dfp/composite-type.c
index ce7d5c1..2eb6014 100644
--- a/gcc/testsuite/gcc.dg/dfp/composite-type.c
+++ b/gcc/testsuite/gcc.dg/dfp/composite-type.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O -Wall -ftrack-macro-expansion=0" } */
+/* { dg-options "-fpermissive -O -Wall -ftrack-macro-expansion=0" } */
/* C99 6.2.7: Compatible type and composite type. */
diff --git a/gcc/testsuite/gcc.dg/diag-aka-1.c b/gcc/testsuite/gcc.dg/diag-aka-1.c
index 3383c1c..485a8a5 100644
--- a/gcc/testsuite/gcc.dg/diag-aka-1.c
+++ b/gcc/testsuite/gcc.dg/diag-aka-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-Wc++-compat" } */
+/* { dg-options "-fpermissive -Wc++-compat" } */
typedef struct A { int i; } B;
typedef struct T { int i; } *T; /* { dg-warning "using 'T' as both a typedef and a tag is invalid" } */
diff --git a/gcc/testsuite/gcc.dg/diag-aka-1a.c b/gcc/testsuite/gcc.dg/diag-aka-1a.c
new file mode 100644
index 0000000..d161b78
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/diag-aka-1a.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-options "-Wc++-compat" } */
+
+typedef struct A { int i; } B;
+typedef struct T { int i; } *T; /* { dg-warning "using 'T' as both a typedef and a tag is invalid" } */
+typedef const float TFA;
+typedef TFA TFB;
+typedef TFB TFC;
+typedef int IA[];
+typedef IA *IAP;
+extern IAP arr[];
+
+void fn1 (B *); /* { dg-message "expected 'B \\*' {aka 'struct A \\*'} but argument is of type 'struct B \\*'" } */
+void fn2 (TFC *);
+
+void
+bar (B *b, int *i)
+{
+ fn1 ((struct B *) b); /* { dg-error "passing argument" } */
+ fn2 (i); /* { dg-error "passing argument" } */
+ sizeof (arr); /* { dg-error "invalid application of .sizeof. to incomplete type .int \\(\\*\\\[\\\]\\)\\\[\\\]." } */
+}
+
+int
+foo (void *a)
+{
+ T t = a; /* { dg-warning "request for implicit conversion from 'void \\*' to 'T' {aka 'struct T \\*'} not" } */
+ return t->i;
+}
diff --git a/gcc/testsuite/gcc.dg/diagnostic-range-bad-return-2.c b/gcc/testsuite/gcc.dg/diagnostic-range-bad-return-2.c
new file mode 100644
index 0000000..2fe8d34
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/diagnostic-range-bad-return-2.c
@@ -0,0 +1,52 @@
+/* { dg-options "-fdiagnostics-show-caret -Wreturn-local-addr" } */
+
+int *address_of_local (void)
+{
+ int some_local;
+ return &some_local; /* { dg-warning "function returns address of local variable" } */
+/* { dg-begin-multiline-output "" }
+ return &some_local;
+ ^~~~~~~~~~~
+ { dg-end-multiline-output "" } */
+}
+
+void surplus_return_when_void_1 (void)
+{
+ return 500; /* { dg-error "'return' with a value, in function returning void" } */
+/* { dg-begin-multiline-output "" }
+ return 500;
+ ^~~
+ { dg-end-multiline-output "" } */
+/* { dg-begin-multiline-output "" }
+ void surplus_return_when_void_1 (void)
+ ^~~~~~~~~~~~~~~~~~~~~~~~~~
+ { dg-end-multiline-output "" } */
+}
+
+void surplus_return_when_void_2 (int i, int j)
+{
+ return i * j; /* { dg-error "'return' with a value, in function returning void" } */
+/* { dg-begin-multiline-output "" }
+ return i * j;
+ ~~^~~
+ { dg-end-multiline-output "" } */
+/* { dg-begin-multiline-output "" }
+ void surplus_return_when_void_2 (int i, int j)
+ ^~~~~~~~~~~~~~~~~~~~~~~~~~
+ { dg-end-multiline-output "" } */
+}
+
+int missing_return_value (void)
+{
+ return; /* { dg-error "'return' with no value, in function returning non-void" } */
+/* { dg-begin-multiline-output "" }
+ return;
+ ^~~~~~
+ { dg-end-multiline-output "" } */
+/* { dg-begin-multiline-output "" }
+ int missing_return_value (void)
+ ^~~~~~~~~~~~~~~~~~~~
+ { dg-end-multiline-output "" } */
+/* TODO: ideally we'd underline the return type i.e. "int", but that
+ location isn't captured. */
+}
diff --git a/gcc/testsuite/gcc.dg/diagnostic-range-bad-return.c b/gcc/testsuite/gcc.dg/diagnostic-range-bad-return.c
index 063fdf1..b74481b 100644
--- a/gcc/testsuite/gcc.dg/diagnostic-range-bad-return.c
+++ b/gcc/testsuite/gcc.dg/diagnostic-range-bad-return.c
@@ -1,4 +1,4 @@
-/* { dg-options "-fdiagnostics-show-caret -Wreturn-local-addr" } */
+/* { dg-options "-fpermissive -fdiagnostics-show-caret -Wreturn-local-addr" } */
int *address_of_local (void)
{
diff --git a/gcc/testsuite/gcc.dg/diagnostic-types-1.c b/gcc/testsuite/gcc.dg/diagnostic-types-1.c
index fc4b104..94b67c6 100644
--- a/gcc/testsuite/gcc.dg/diagnostic-types-1.c
+++ b/gcc/testsuite/gcc.dg/diagnostic-types-1.c
@@ -1,6 +1,6 @@
/* PR c/81233 */
/* { dg-do compile } */
-/* { dg-options "-Wc++-compat -Wpedantic" } */
+/* { dg-options "-fpermissive -Wc++-compat -Wpedantic" } */
/* Test we're printing the types, like the good compiler we are. */
enum E1 { A } e;
diff --git a/gcc/testsuite/gcc.dg/diagnostic-types-2.c b/gcc/testsuite/gcc.dg/diagnostic-types-2.c
new file mode 100644
index 0000000..e6d284d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/diagnostic-types-2.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-Wpedantic" } */
+/* Test we're printing the types, like the good compiler we are. */
+
+extern void foo2 (int *); /* { dg-message "expected 'int \\*' but argument is of type 'int'" } */
+extern void foo3 (int); /* { dg-message "expected 'int' but argument is of type 'int \\*'" } */
+
+int *
+fn1 (int *p)
+{
+ p = 1; /* { dg-error "assignment to 'int \\*' from 'int' makes pointer from integer without a cast" } */
+ int *q = 1; /* { dg-error "initialization of 'int \\*' from 'int' makes pointer from integer without a cast" } */
+ foo2 (1); /* { dg-error "passing argument 1 of 'foo2' makes pointer from integer without a cast" } */
+ return 1; /* { dg-error "returning 'int' from a function with return type 'int \\*' makes pointer from integer without a cast" } */
+}
+
+int
+fn2 (int i, int *p)
+{
+ i = p; /* { dg-error "assignment to 'int' from 'int \\*' makes integer from pointer without a cast" } */
+ int j = p; /* { dg-error "initialization of 'int' from 'int \\*' makes integer from pointer without a cast" } */
+ foo3 (p); /* { dg-error "passing argument 1 of 'foo3' makes integer from pointer without a cast" } */
+ return p; /* { dg-error "returning 'int \\*' from a function with return type 'int' makes integer from pointer without a cast" } */
+}
diff --git a/gcc/testsuite/gcc.dg/enum-compat-1.c b/gcc/testsuite/gcc.dg/enum-compat-1.c
index 5fb150c..b7352f6 100644
--- a/gcc/testsuite/gcc.dg/enum-compat-1.c
+++ b/gcc/testsuite/gcc.dg/enum-compat-1.c
@@ -3,7 +3,7 @@
/* Origin: Joseph Myers <jsm@polyomino.org.uk>, based on
PR c/6024 from Richard Earnshaw <rearnsha@arm.com> */
/* { dg-do compile } */
-/* { dg-options "" } */
+/* { dg-options "-fpermissive" } */
/* Original test from PR c/6024. */
enum e1 {a, b};
diff --git a/gcc/testsuite/gcc.dg/enum-compat-2.c b/gcc/testsuite/gcc.dg/enum-compat-2.c
new file mode 100644
index 0000000..6950901
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/enum-compat-2.c
@@ -0,0 +1,32 @@
+/* Test that enumerated types are only considered compatible when they
+ are the same type. PR c/6024. */
+/* Origin: Joseph Myers <jsm@polyomino.org.uk>, based on
+ PR c/6024 from Richard Earnshaw <rearnsha@arm.com> */
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+/* Original test from PR c/6024. */
+enum e1 {a, b};
+enum e2 {c, d};
+
+void f(enum e1); /* { dg-error "prototype" "error at decl" } */
+
+void f(x)
+ enum e2 x; /* { dg-error "doesn't match prototype" } */
+{
+ return;
+}
+
+/* Other compatibility tests. */
+enum e3 { A };
+enum e4 { B };
+
+enum e3 v3;
+enum e4 *p = &v3; /* { dg-error "incompatible" "incompatible pointer" } */
+enum e3 *q = &v3;
+
+void g(enum e3); /* { dg-message "note: previous declaration" "error at first decl" } */
+void g(enum e4); /* { dg-error "conflicting types" "error at second decl" } */
+
+void h(enum e3);
+void h(enum e3);
diff --git a/gcc/testsuite/gcc.dg/func-ptr-conv-1.c b/gcc/testsuite/gcc.dg/func-ptr-conv-1.c
index 5c8a101..7c2876c 100644
--- a/gcc/testsuite/gcc.dg/func-ptr-conv-1.c
+++ b/gcc/testsuite/gcc.dg/func-ptr-conv-1.c
@@ -5,7 +5,7 @@
are not permitted. PR c/11234. */
/* Origin: Joseph Myers <jsm@polyomino.org.uk> */
/* { dg-do compile } */
-/* { dg-options "-pedantic" } */
+/* { dg-options "-fpermissive -pedantic" } */
void f(void);
diff --git a/gcc/testsuite/gcc.dg/func-ptr-conv-2.c b/gcc/testsuite/gcc.dg/func-ptr-conv-2.c
new file mode 100644
index 0000000..a9884a6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/func-ptr-conv-2.c
@@ -0,0 +1,56 @@
+/* Conversions between function and object pointers are not permitted
+ in any version of ISO C, even with casts, except for the special
+ case of converting a null pointer constant to function pointer
+ type. Likewise, comparisons between function and object pointers
+ are not permitted. PR c/11234. */
+/* Origin: Joseph Myers <jsm@polyomino.org.uk> */
+/* { dg-do compile } */
+/* { dg-options "-pedantic" } */
+
+void f(void);
+
+void *v1 = f; /* { dg-warning "12:pointer" "bad conversion" } */
+void *v2 = &f; /* { dg-warning "12:pointer" "bad conversion" } */
+void *v3 = (void *)f; /* { dg-warning "12:pointer" "bad conversion" } */
+void *v4 = (void *)&f; /* { dg-warning "12:pointer" "bad conversion" } */
+void *v5;
+char *c1 = f; /* { dg-error "12:pointer" "bad conversion" } */
+char *c2 = &f; /* { dg-error "12:pointer" "bad conversion" } */
+char *c3 = (char *)f; /* { dg-warning "12:pointer" "bad conversion" } */
+char *c4 = (char *)&f; /* { dg-warning "12:pointer" "bad conversion" } */
+char *c5;
+void (*fp)(void);
+int a;
+
+void
+g(void)
+{
+ v5 = f; /* { dg-warning "6:pointer" "bad conversion" } */
+ v5 = &f; /* { dg-warning "6:pointer" "bad conversion" } */
+ v5 = (void *)f; /* { dg-warning "8:pointer" "bad conversion" } */
+ v5 = (void *)&f; /* { dg-warning "8:pointer" "bad conversion" } */
+ c5 = f; /* { dg-error "6:pointer" "bad conversion" } */
+ c5 = &f; /* { dg-error "6:pointer" "bad conversion" } */
+ c5 = (char *)f; /* { dg-warning "8:pointer" "bad conversion" } */
+ c5 = (char *)&f; /* { dg-warning "8:pointer" "bad conversion" } */
+ fp = v5; /* { dg-warning "6:pointer" "bad conversion" } */
+ fp = c5; /* { dg-error "6:pointer" "bad conversion" } */
+ fp = (void (*)(void))v5; /* { dg-warning "8:pointer" "bad conversion" } */
+ fp = (void (*)(void))c5; /* { dg-warning "8:pointer" "bad conversion" } */
+ (a ? f : v3); /* { dg-warning "10:pointer" "bad conversion" } */
+ (a ? v2 : fp); /* { dg-warning "11:pointer" "bad conversion" } */
+ /* The following are OK. */
+ fp = 0;
+ fp = (void *)0;
+ fp = 0L;
+ fp = (void (*)(void))0;
+ fp = (void (*)(void))(void *)0;
+ (a ? f : 0);
+ (a ? f : (void *)0);
+ (a ? (void *)0 : fp);
+ (a ? 0 : fp);
+}
+
+/* The following are OK. */
+void (*fp2)(void) = 0;
+void (*fp3)(void) = (void *)0;
diff --git a/gcc/testsuite/gcc.dg/gnu23-attr-syntax-2.c b/gcc/testsuite/gcc.dg/gnu23-attr-syntax-2.c
index ba60f7a..8943534 100644
--- a/gcc/testsuite/gcc.dg/gnu23-attr-syntax-2.c
+++ b/gcc/testsuite/gcc.dg/gnu23-attr-syntax-2.c
@@ -1,7 +1,7 @@
/* Test C23 attribute syntax. Invalid uses of attributes with GNU C
features. */
/* { dg-do compile } */
-/* { dg-options "-std=gnu23 -w" } */
+/* { dg-options "-fpermissive -std=gnu23 -w" } */
/* Attributes cannot be used as prefix attributes on old-style
parameter declarations or on function declarators with identifier
diff --git a/gcc/testsuite/gcc.dg/gnu23-attr-syntax-3.c b/gcc/testsuite/gcc.dg/gnu23-attr-syntax-3.c
new file mode 100644
index 0000000..d73d6a3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/gnu23-attr-syntax-3.c
@@ -0,0 +1,17 @@
+/* Test C23 attribute syntax. Invalid uses of attributes with GNU C
+ features. Non-permissive variant. */
+/* { dg-do compile } */
+/* { dg-options "-std=gnu23 -w" } */
+
+/* Attributes cannot be used as prefix attributes on old-style
+ parameter declarations or on function declarators with identifier
+ lists (removed from C23). */
+
+void (*f(a, b) [[]])() int a, b; { } /* { dg-error "expected" } */
+
+void f(x, y) int x; [[]] int y; { } /* { dg-error "expected" } */
+/* { dg-error "type of 'y' defaults to 'int'" "" { target *-*-* } .-1 } */
+
+/* Nonempty attributes cannot be used as postfix attributes with
+ __auto_type. */
+__auto_type [[gnu::no_such_attr]] x = 1; /* { dg-error "'__auto_type' followed by" } */
diff --git a/gcc/testsuite/gcc.dg/gnu23-builtins-no-dfp-1.c b/gcc/testsuite/gcc.dg/gnu23-builtins-no-dfp-1.c
index 9fa25f0..7cb200f 100644
--- a/gcc/testsuite/gcc.dg/gnu23-builtins-no-dfp-1.c
+++ b/gcc/testsuite/gcc.dg/gnu23-builtins-no-dfp-1.c
@@ -10,9 +10,9 @@ int nand32 (void);
int nand64 (void);
int nand128 (void);
-__typeof__ (__builtin_fabsd32 (0)) d32; /* { dg-warning "implicit" } */
-__typeof__ (__builtin_fabsd64 (0)) d64; /* { dg-warning "implicit" } */
-__typeof__ (__builtin_fabsd128 (0)) d128; /* { dg-warning "implicit" } */
-__typeof__ (__builtin_nand32 (0)) d32n; /* { dg-warning "implicit" } */
-__typeof__ (__builtin_nand64 (0)) d64n; /* { dg-warning "implicit" } */
-__typeof__ (__builtin_nand128 (0)) d128n; /* { dg-warning "implicit" } */
+__typeof__ (__builtin_fabsd32 (0)) d32; /* { dg-error "implicit" } */
+__typeof__ (__builtin_fabsd64 (0)) d64; /* { dg-error "implicit" } */
+__typeof__ (__builtin_fabsd128 (0)) d128; /* { dg-error "implicit" } */
+__typeof__ (__builtin_nand32 (0)) d32n; /* { dg-error "implicit" } */
+__typeof__ (__builtin_nand64 (0)) d64n; /* { dg-error "implicit" } */
+__typeof__ (__builtin_nand128 (0)) d128n; /* { dg-error "implicit" } */
diff --git a/gcc/testsuite/gcc.dg/gomp/pr35738-2.c b/gcc/testsuite/gcc.dg/gomp/pr35738-2.c
new file mode 100644
index 0000000..846afe7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/gomp/pr35738-2.c
@@ -0,0 +1,18 @@
+/* PR c/35738 */
+/* { dg-do compile } */
+/* { dg-options "-fopenmp" } */
+
+void foo (void);
+
+void
+bar (void *p)
+{
+ int i = 0;
+ char q[10];
+#pragma omp atomic
+ i += q; /* { dg-error "makes integer from pointer without a cast" } */
+#pragma omp atomic
+ i += foo; /* { dg-error "makes integer from pointer without a cast" } */
+#pragma omp atomic
+ i += p; /* { dg-error "makes integer from pointer without a cast" } */
+}
diff --git a/gcc/testsuite/gcc.dg/gomp/pr35738.c b/gcc/testsuite/gcc.dg/gomp/pr35738.c
index 0b3866e..954cfa4 100644
--- a/gcc/testsuite/gcc.dg/gomp/pr35738.c
+++ b/gcc/testsuite/gcc.dg/gomp/pr35738.c
@@ -1,6 +1,6 @@
/* PR c/35738 */
/* { dg-do compile } */
-/* { dg-options "-fopenmp" } */
+/* { dg-options "-fpermissive -fopenmp" } */
void foo (void);
diff --git a/gcc/testsuite/gcc.dg/graphite/pr83126.c b/gcc/testsuite/gcc.dg/graphite/pr83126.c
index 36bf5d5..a69dbd5 100644
--- a/gcc/testsuite/gcc.dg/graphite/pr83126.c
+++ b/gcc/testsuite/gcc.dg/graphite/pr83126.c
@@ -12,7 +12,7 @@ ew (unsigned short int c9, int stuff)
int *fd = &stuff;
*fd = c9;
- fd = *fd;
+ fd = (int *) (__INTPTR_TYPE__) *fd;
if (*fd != 0)
for (*by = 0; *by < 2; ++*by)
c9 *= e1;
diff --git a/gcc/testsuite/gcc.dg/graphite/pr83255.c b/gcc/testsuite/gcc.dg/graphite/pr83255.c
index cb376fa..0753263 100644
--- a/gcc/testsuite/gcc.dg/graphite/pr83255.c
+++ b/gcc/testsuite/gcc.dg/graphite/pr83255.c
@@ -1,5 +1,5 @@
/* { dg-do run } */
-/* { dg-options "-O -floop-nest-optimize -fdump-tree-graphite-details" } */
+/* { dg-options "-O -floop-nest-optimize -fno-tree-scev-cprop -fdump-tree-graphite-details" } */
int rx, in;
diff --git a/gcc/testsuite/gcc.dg/hardbool-err.c b/gcc/testsuite/gcc.dg/hardbool-err.c
new file mode 100644
index 0000000..e5bf58e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/hardbool-err.c
@@ -0,0 +1,31 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+typedef _Bool __attribute__ ((__hardbool__))
+hbbl; /* { dg-error "integral types" } */
+
+typedef double __attribute__ ((__hardbool__))
+hbdbl; /* { dg-error "integral types" } */
+
+typedef _Complex int __attribute__ ((__hardbool__))
+hbcplx; /* { dg-error "integral types" } */
+
+enum x;
+typedef enum x __attribute__ ((__hardbool__))
+hbenum; /* { dg-error "integral types" } */
+
+struct s;
+typedef struct s __attribute__ ((__hardbool__))
+hbstruct; /* { dg-error "integral types" } */
+
+typedef int __attribute__ ((__hardbool__ (0, 0)))
+hb00; /* { dg-error "different values" } */
+
+typedef int __attribute__ ((__hardbool__ (4, 16))) hb4x;
+struct s {
+ hb4x m:2;
+}; /* { dg-error "is a GCC extension|different values" } */
+/* { dg-warning "changes value" "warning" { target *-*-* } .-1 } */
+
+hb4x __attribute__ ((vector_size (4 * sizeof (hb4x))))
+vvar; /* { dg-error "invalid vector type" } */
diff --git a/gcc/testsuite/gcc.dg/hardbool-trap.c b/gcc/testsuite/gcc.dg/hardbool-trap.c
new file mode 100644
index 0000000..2eebd0e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/hardbool-trap.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fdump-tree-optimized" } */
+
+typedef char __attribute__ ((__hardbool__ (1))) hbool;
+
+hbool var;
+
+int main () {
+ __builtin_memset (&var, 0, sizeof (var));
+ (void)var;
+}
+
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/init-bad-7.c b/gcc/testsuite/gcc.dg/init-bad-7.c
index de5e570..caa8c78 100644
--- a/gcc/testsuite/gcc.dg/init-bad-7.c
+++ b/gcc/testsuite/gcc.dg/init-bad-7.c
@@ -1,6 +1,6 @@
/* PR c/37724 */
/* { dg-do compile } */
-/* { dg-options "-std=gnu99 -pedantic" } */
+/* { dg-options "-fpermissive -std=gnu99 -pedantic" } */
struct f
{
diff --git a/gcc/testsuite/gcc.dg/init-bad-7a.c b/gcc/testsuite/gcc.dg/init-bad-7a.c
new file mode 100644
index 0000000..9ead290
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/init-bad-7a.c
@@ -0,0 +1,12 @@
+/* PR c/37724 */
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+struct f
+{
+ int *a;
+};
+
+char b[10];
+struct f g = {b}; /* { dg-error "initialization of 'int \\*' from incompatible pointer type" } */
+/* { dg-note "near initialization for" "" { target *-*-* } .-1 } */
diff --git a/gcc/testsuite/gcc.dg/init-excess-3.c b/gcc/testsuite/gcc.dg/init-excess-3.c
index c03a984..6ea7858 100644
--- a/gcc/testsuite/gcc.dg/init-excess-3.c
+++ b/gcc/testsuite/gcc.dg/init-excess-3.c
@@ -7,9 +7,9 @@
char s0[] = {"abc",1}; /* { dg-error "'char..' initializer|near init" } */
char s1[] = {"abc","a"}; /* { dg-error "'char..' initializer|near init" } */
char s2[] = {1,"abc"}; /* { dg-error "'char..' initializer|near init|computable at load time" } */
-/* { dg-warning "integer from pointer without a cast" "" { target *-*-* } .-1 } */
+/* { dg-error "integer from pointer without a cast" "" { target *-*-* } .-1 } */
char s3[5] = {"abc",1}; /* { dg-error "'char.5.' initializer|near init" } */
char s4[5] = {"abc","a"}; /* { dg-error "'char.5.' initializer|near init" } */
char s5[5] = {1,"abc"}; /* { dg-error "'char.5.' initializer|near init|computable at load time" } */
-/* { dg-warning "integer from pointer without a cast" "" { target *-*-* } .-1 } */
+/* { dg-error "integer from pointer without a cast" "" { target *-*-* } .-1 } */
diff --git a/gcc/testsuite/gcc.dg/missing-header-fixit-1.c b/gcc/testsuite/gcc.dg/missing-header-fixit-1.c
index 2b28357..eb33d9b 100644
--- a/gcc/testsuite/gcc.dg/missing-header-fixit-1.c
+++ b/gcc/testsuite/gcc.dg/missing-header-fixit-1.c
@@ -5,7 +5,7 @@
/* This is padding (to avoid the generated patch containing DejaGnu
directives). */
-/* { dg-options "-fdiagnostics-generate-patch" } */
+/* { dg-options "-fpermissive -fdiagnostics-generate-patch" } */
void test (int i, int j)
{
diff --git a/gcc/testsuite/gcc.dg/missing-header-fixit-1a.c b/gcc/testsuite/gcc.dg/missing-header-fixit-1a.c
new file mode 100644
index 0000000..e47236b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/missing-header-fixit-1a.c
@@ -0,0 +1,37 @@
+/* Example of a fix-it hint that adds a #include directive,
+ adding them to the top of the file, given that there is no
+ pre-existing #include. */
+
+/* This is padding (to avoid the generated patch containing DejaGnu
+ directives). */
+
+/* { dg-options "-fdiagnostics-generate-patch" } */
+
+void test (int i, int j)
+{
+ printf ("%i of %i\n", i, j); /* { dg-error "implicit declaration" } */
+ /* { dg-message "include '<stdio.h>' or provide a declaration of 'printf'" "" { target *-*-* } .-1 } */
+ /* { dg-warning "incompatible implicit declaration of built-in function 'printf'" "" { target *-*-* } .-2 } */
+}
+
+/* Verify the output from -fdiagnostics-generate-patch.
+ We expect the patch to begin with a header, containing this
+ source filename, via an absolute path.
+ Given the path, we can only capture it via regexps. */
+/* { dg-regexp "\\-\\-\\- .*" } */
+/* { dg-regexp "\\+\\+\\+ .*" } */
+/* Use #if 0/#endif rather than comments, to allow the text to contain
+ a comment. */
+#if 0
+{ dg-begin-multiline-output "" }
+@@ -1,3 +1,4 @@
++#include <stdio.h>
+ /* Example of a fix-it hint that adds a #include directive,
+ adding them to the top of the file, given that there is no
+ pre-existing #include. */
+{ dg-end-multiline-output "" }
+#endif
+
+/* FIXME: should we attempt to skip leading comments when determining the
+ insertion location?
+ Similarly, should we attempt to be within single-inclusion guards, etc? */
diff --git a/gcc/testsuite/gcc.dg/missing-header-fixit-2.c b/gcc/testsuite/gcc.dg/missing-header-fixit-2.c
index 5d5f874..38d36a6 100644
--- a/gcc/testsuite/gcc.dg/missing-header-fixit-2.c
+++ b/gcc/testsuite/gcc.dg/missing-header-fixit-2.c
@@ -9,7 +9,7 @@
/* { dg-warning "implicit declaration of function 'printf'" "" { target *-*-* } 6 } */
/* { dg-warning "incompatible implicit declaration of built-in function 'printf'" "" { target *-*-* } 6 } */
-/* { dg-options "-fdiagnostics-generate-patch" } */
+/* { dg-options "-fpermissive -fdiagnostics-generate-patch" } */
/* Verify the output from -fdiagnostics-generate-patch.
We expect the patch to begin with a header, containing the
diff --git a/gcc/testsuite/gcc.dg/missing-header-fixit-2a.c b/gcc/testsuite/gcc.dg/missing-header-fixit-2a.c
new file mode 100644
index 0000000..4ae617b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/missing-header-fixit-2a.c
@@ -0,0 +1,31 @@
+/* Verify that when we suggest adding #include directives that they
+ are added to the affected file. */
+
+/* The following header file is missing a "#include <stdio.h>". */
+
+#include "missing-header-fixit-2.h"
+
+/* These directives actually apply to the header. */
+/* { dg-error "implicit declaration of function 'printf'" "" { target *-*-* } 6 } */
+/* { dg-warning "incompatible implicit declaration of built-in function 'printf'" "" { target *-*-* } 6 } */
+
+/* { dg-options "-fdiagnostics-generate-patch" } */
+
+/* Verify the output from -fdiagnostics-generate-patch.
+ We expect the patch to begin with a header, containing the
+ filename of the header, via an absolute path.
+ Given the path, we can only capture it via regexps. */
+/* { dg-regexp "\\-\\-\\- .*" } */
+/* { dg-regexp "\\+\\+\\+ .*" } */
+/* Use #if 0/#endif rather than comments, to allow the text to contain
+ a comment.
+ We expect the *header* to have been patched, adding the missing include. */
+#if 0
+{ dg-begin-multiline-output "" }
+@@ -1,3 +1,4 @@
++#include <stdio.h>
+ /* This is missing-header-fixit-2.h, for use by
+ missing-header-fixit-2.c */
+
+{ dg-end-multiline-output "" }
+#endif
diff --git a/gcc/testsuite/gcc.dg/missing-header-fixit-4.c b/gcc/testsuite/gcc.dg/missing-header-fixit-4.c
index b668056..8e4e48d 100644
--- a/gcc/testsuite/gcc.dg/missing-header-fixit-4.c
+++ b/gcc/testsuite/gcc.dg/missing-header-fixit-4.c
@@ -3,7 +3,7 @@
#include "empty.h"
int the_next_line;
-/* { dg-options "-fdiagnostics-show-caret -fdiagnostics-show-line-numbers" } */
+/* { dg-options "-fpermissive -fdiagnostics-show-caret -fdiagnostics-show-line-numbers" } */
void test (int i, int j)
{
diff --git a/gcc/testsuite/gcc.dg/missing-header-fixit-4a.c b/gcc/testsuite/gcc.dg/missing-header-fixit-4a.c
new file mode 100644
index 0000000..b93061f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/missing-header-fixit-4a.c
@@ -0,0 +1,27 @@
+/* Example of a fix-it hint that adds a #include directive,
+ adding them after a pre-existing #include directive. */
+#include "empty.h"
+int the_next_line;
+
+/* { dg-options "-fdiagnostics-show-caret -fdiagnostics-show-line-numbers" } */
+
+void test (int i, int j)
+{
+ printf ("%i of %i\n", i, j); /* { dg-line printf } */
+ /* { dg-error "implicit declaration of function" "" { target *-*-* } printf } */
+ /* { dg-begin-multiline-output "" }
+ 10 | printf ("%i of %i\n", i, j);
+ | ^~~~~~
+ { dg-end-multiline-output "" } */
+ /* { dg-warning "incompatible implicit declaration" "" { target *-*-* } printf } */
+ /* { dg-begin-multiline-output "" }
+ 10 | printf ("%i of %i\n", i, j);
+ | ^~~~~~
+ { dg-end-multiline-output "" } */
+ /* { dg-message "include '<stdio.h>' or provide a declaration of 'printf'" "" { target *-*-* } 4 } */
+ /* { dg-begin-multiline-output "" }
+ 3 | #include "empty.h"
+ +++ |+#include <stdio.h>
+ 4 | int the_next_line;
+ { dg-end-multiline-output "" } */
+}
diff --git a/gcc/testsuite/gcc.dg/missing-header-fixit-5.c b/gcc/testsuite/gcc.dg/missing-header-fixit-5.c
index bf44feb..c34a47dc 100644
--- a/gcc/testsuite/gcc.dg/missing-header-fixit-5.c
+++ b/gcc/testsuite/gcc.dg/missing-header-fixit-5.c
@@ -3,7 +3,7 @@
Rely on -Wimplicit-function-declaration for fixit hints, not on
-Wbuiltin-declaration-mismatch (which misses abs, isdigit, putchar). */
-/* { dg-options "-fdiagnostics-show-caret -fdiagnostics-show-line-numbers -Wimplicit-function-declaration -Wno-builtin-declaration-mismatch" } */
+/* { dg-options "-fpermissive -fdiagnostics-show-caret -fdiagnostics-show-line-numbers -Wimplicit-function-declaration -Wno-builtin-declaration-mismatch" } */
int
foo (char *m, int i)
diff --git a/gcc/testsuite/gcc.dg/missing-header-fixit-5a.c b/gcc/testsuite/gcc.dg/missing-header-fixit-5a.c
new file mode 100644
index 0000000..420cbf7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/missing-header-fixit-5a.c
@@ -0,0 +1,42 @@
+
+/* Forget to include any standard headers, all for built-in functions.
+ Rely on -Wimplicit-function-declaration for fixit hints, not on
+ -Wbuiltin-declaration-mismatch (which misses abs, isdigit, putchar). */
+
+/* { dg-options "-fdiagnostics-show-caret -fdiagnostics-show-line-numbers -Wimplicit-function-declaration -Wno-builtin-declaration-mismatch" } */
+
+int
+foo (char *m, int i)
+{
+ if (isdigit (m[0])) /* { dg-error "implicit declaration of function" } */
+ /* { dg-begin-multiline-output "" }
+ 11 | if (isdigit (m[0]))
+ | ^~~~~~~
+ { dg-end-multiline-output "" } */
+ /* { dg-begin-multiline-output "" }
+ +++ |+#include <ctype.h>
+ 1 |
+ { dg-end-multiline-output "" } */
+ {
+ return abs (i); /* { dg-error "implicit declaration of function" } */
+ /* { dg-begin-multiline-output "" }
+ 21 | return abs (i);
+ | ^~~
+ { dg-end-multiline-output "" } */
+ /* { dg-begin-multiline-output "" }
+ +++ |+#include <stdlib.h>
+ 1 |
+ { dg-end-multiline-output "" } */
+ }
+ else
+ putchar (m[0]); /* { dg-error "implicit declaration of function" } */
+ /* { dg-begin-multiline-output "" }
+ 32 | putchar (m[0]);
+ | ^~~~~~~
+ { dg-end-multiline-output "" } */
+ /* { dg-begin-multiline-output "" }
+ +++ |+#include <stdio.h>
+ 1 |
+ { dg-end-multiline-output "" } */
+ return i;
+}
diff --git a/gcc/testsuite/gcc.dg/noncompile/incomplete-3.c b/gcc/testsuite/gcc.dg/noncompile/incomplete-3.c
index 0618b4d..b783052 100644
--- a/gcc/testsuite/gcc.dg/noncompile/incomplete-3.c
+++ b/gcc/testsuite/gcc.dg/noncompile/incomplete-3.c
@@ -4,6 +4,6 @@ typedef struct { int a; } b_t;
int foo (void)
{
b_t d;
- struct b_t *c = &d; /* { dg-warning "incompatible pointer type" } */
+ struct b_t *c = &d; /* { dg-error "incompatible pointer type" } */
c->a; /* { dg-error "invalid use of undefined type" } */
}
diff --git a/gcc/testsuite/gcc.dg/noncompile/pr79758-2.c b/gcc/testsuite/gcc.dg/noncompile/pr79758-2.c
new file mode 100644
index 0000000..e6a27f9
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/noncompile/pr79758-2.c
@@ -0,0 +1,6 @@
+/* PR c/79758 */
+/* { dg-do compile } */
+
+void fn1 (int[a]) { }; /* { dg-error "undeclared here" } */
+void fn1 (b) { }; /* { dg-error "redefinition" } */
+/* { dg-error "defaults to 'int'" "" { target *-*-* } .-1 } */
diff --git a/gcc/testsuite/gcc.dg/noncompile/pr79758.c b/gcc/testsuite/gcc.dg/noncompile/pr79758.c
index a312160..e42a443 100644
--- a/gcc/testsuite/gcc.dg/noncompile/pr79758.c
+++ b/gcc/testsuite/gcc.dg/noncompile/pr79758.c
@@ -1,5 +1,6 @@
/* PR c/79758 */
/* { dg-do compile } */
+/* { dg-additional-options "-fpermissive" } */
void fn1 (int[a]) { }; /* { dg-error "undeclared here" } */
void fn1 (b) { }; /* { dg-error "redefinition" } */
diff --git a/gcc/testsuite/gcc.dg/overflow-warn-1.c b/gcc/testsuite/gcc.dg/overflow-warn-1.c
index a9d9fba..90eb43b 100644
--- a/gcc/testsuite/gcc.dg/overflow-warn-1.c
+++ b/gcc/testsuite/gcc.dg/overflow-warn-1.c
@@ -47,10 +47,10 @@ static int sc = INT_MAX + 1; /* { dg-warning "25:integer overflow in expression"
constants. The third has the overflow in an unevaluated
subexpression, so is a null pointer constant. */
void *p = 0 * (INT_MAX + 1); /* { dg-warning "integer overflow in expression" } */
-/* { dg-warning "initialization of 'void \\*' from 'int' makes pointer from integer without a cast" "null" { target *-*-* } .-1 } */
+/* { dg-error "initialization of 'void \\*' from 'int' makes pointer from integer without a cast" "null" { target *-*-* } .-1 } */
void *q = 0 * (1 / 0); /* { dg-warning "division by zero" } */
/* { dg-error "initializer element is not constant" "constant" { target *-*-* } .-1 } */
-/* { dg-warning "initialization of 'void \\*' from 'int' makes pointer from integer without a cast" "null" { target *-*-* } .-2 } */
+/* { dg-error "initialization of 'void \\*' from 'int' makes pointer from integer without a cast" "null" { target *-*-* } .-2 } */
void *r = (1 ? 0 : INT_MAX+1);
void
diff --git a/gcc/testsuite/gcc.dg/overflow-warn-3.c b/gcc/testsuite/gcc.dg/overflow-warn-3.c
index f640477..a2ead83 100644
--- a/gcc/testsuite/gcc.dg/overflow-warn-3.c
+++ b/gcc/testsuite/gcc.dg/overflow-warn-3.c
@@ -53,10 +53,10 @@ static int sc = INT_MAX + 1; /* { dg-warning "integer overflow in expression" }
subexpression, so is a null pointer constant. */
void *p = 0 * (INT_MAX + 1); /* { dg-warning "integer overflow in expression" } */
/* { dg-warning "overflow in constant expression" "constant" { target *-*-* } .-1 } */
-/* { dg-warning "initialization of 'void \\*' from 'int' makes pointer from integer without a cast" "null" { target *-*-* } .-2 } */
+/* { dg-error "initialization of 'void \\*' from 'int' makes pointer from integer without a cast" "null" { target *-*-* } .-2 } */
void *q = 0 * (1 / 0); /* { dg-warning "division by zero" } */
/* { dg-error "initializer element is not constant" "constant" { target *-*-* } .-1 } */
-/* { dg-warning "initialization of 'void \\*' from 'int' makes pointer from integer without a cast" "null" { target *-*-* } .-2 } */
+/* { dg-error "initialization of 'void \\*' from 'int' makes pointer from integer without a cast" "null" { target *-*-* } .-2 } */
void *r = (1 ? 0 : INT_MAX+1);
void
diff --git a/gcc/testsuite/gcc.dg/param-type-mismatch-2.c b/gcc/testsuite/gcc.dg/param-type-mismatch-2.c
new file mode 100644
index 0000000..83bc360
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/param-type-mismatch-2.c
@@ -0,0 +1,187 @@
+/* { dg-options "-fdiagnostics-show-caret -Wpointer-sign" } */
+
+/* A collection of calls where argument 2 is of the wrong type.
+ Like param-type-mismatch.c, but expecting errors. */
+
+/* decl, with argname. */
+
+extern int callee_1 (int one, const char *two, float three); /* { dg-line callee_1 } */
+
+int test_1 (int first, int second, float third)
+{
+ return callee_1 (first, second, third); /* { dg-error "passing argument 2 of 'callee_1' makes pointer from integer without a cast" } */
+ /* { dg-begin-multiline-output "" }
+ return callee_1 (first, second, third);
+ ^~~~~~
+ |
+ int
+ { dg-end-multiline-output "" } */
+ /* { dg-message "expected 'const char \\*' but argument is of type 'int'" "" { target *-*-* } callee_1 } */
+ /* { dg-begin-multiline-output "" }
+ extern int callee_1 (int one, const char *two, float three);
+ ~~~~~~~~~~~~^~~
+ { dg-end-multiline-output "" } */
+}
+
+/* decl, without argname. */
+
+extern int callee_2 (int, const char *, float); /* { dg-line callee_2 } */
+
+int test_2 (int first, int second, float third)
+{
+ return callee_2 (first, second, third); /* { dg-error "passing argument 2 of 'callee_2' makes pointer from integer without a cast" } */
+ /* { dg-begin-multiline-output "" }
+ return callee_2 (first, second, third);
+ ^~~~~~
+ |
+ int
+ { dg-end-multiline-output "" } */
+ /* { dg-message "expected 'const char \\*' but argument is of type 'int'" "" { target *-*-* } callee_2 } */
+ /* { dg-begin-multiline-output "" }
+ extern int callee_2 (int, const char *, float);
+ ^~~~~~~~~~~~
+ { dg-end-multiline-output "" } */
+}
+
+/* defn, with argname. */
+
+static int callee_3 (int one, const char *two, float three) /* { dg-line callee_3 } */
+{
+ return callee_2 (one, two, three);
+}
+
+int test_3 (int first, int second, float third)
+{
+ return callee_3 (first, second, third); // { dg-error "passing argument 2 of 'callee_3' makes pointer from integer without a cast" }
+ /* { dg-begin-multiline-output "" }
+ return callee_3 (first, second, third);
+ ^~~~~~
+ |
+ int
+ { dg-end-multiline-output "" } */
+ /* { dg-message "expected 'const char \\*' but argument is of type 'int'" "" { target *-*-* } callee_3 } */
+ /* { dg-begin-multiline-output "" }
+ static int callee_3 (int one, const char *two, float three)
+ ~~~~~~~~~~~~^~~
+ { dg-end-multiline-output "" } */
+}
+
+/* Trivial decl, with argname. */
+
+extern int callee_4 (int one, float two, float three); /* { dg-line callee_4 } */
+
+int test_4 (int first, const char *second, float third)
+{
+ return callee_4 (first, second, third); /* { dg-error "incompatible type for argument 2 of 'callee_4'" } */
+ /* { dg-begin-multiline-output "" }
+ return callee_4 (first, second, third);
+ ^~~~~~
+ |
+ const char *
+ { dg-end-multiline-output "" } */
+ /* { dg-message "expected 'float' but argument is of type 'const char \\*'" "" { target *-*-* } callee_4 } */
+ /* { dg-begin-multiline-output "" }
+ extern int callee_4 (int one, float two, float three);
+ ~~~~~~^~~
+ { dg-end-multiline-output "" } */
+}
+
+/* Trivial decl, without argname. */
+
+extern int callee_5 (int, float, float); /* { dg-line callee_5 } */
+
+int test_5 (int first, const char *second, float third)
+{
+ return callee_5 (first, second, third); /* { dg-error "incompatible type for argument 2 of 'callee_5'" } */
+ /* { dg-begin-multiline-output "" }
+ return callee_5 (first, second, third);
+ ^~~~~~
+ |
+ const char *
+ { dg-end-multiline-output "" } */
+ /* { dg-message "expected 'float' but argument is of type 'const char \\*'" "" { target *-*-* } callee_5 } */
+ /* { dg-begin-multiline-output "" }
+ extern int callee_5 (int, float, float);
+ ^~~~~
+ { dg-end-multiline-output "" } */
+}
+
+/* Callback with name. */
+
+extern int callee_6 (int one, int (*two)(int, int), float three); /* { dg-line callee_6 } */
+
+int test_6 (int first, int second, float third)
+{
+ return callee_6 (first, second, third); /* { dg-error "passing argument 2 of 'callee_6' makes pointer from integer without a cast" } */
+ /* { dg-begin-multiline-output "" }
+ return callee_6 (first, second, third);
+ ^~~~~~
+ |
+ int
+ { dg-end-multiline-output "" } */
+ /* { dg-message " expected 'int \\(\\*\\)\\(int, int\\)' but argument is of type 'int'" "" { target *-*-* } callee_6 } */
+ /* { dg-begin-multiline-output "" }
+ extern int callee_6 (int one, int (*two)(int, int), float three);
+ ~~~~~~^~~~~~~~~~~~~~
+ { dg-end-multiline-output "" } */
+}
+
+/* Callback without name. */
+
+extern int callee_7 (int one, int (*)(int, int), float three); /* { dg-line callee_7 } */
+
+int test_7 (int first, int second, float third)
+{
+ return callee_7 (first, second, third); /* { dg-error "passing argument 2 of 'callee_7' makes pointer from integer without a cast" } */
+ /* { dg-begin-multiline-output "" }
+ return callee_7 (first, second, third);
+ ^~~~~~
+ |
+ int
+ { dg-end-multiline-output "" } */
+ /* { dg-message " expected 'int \\(\\*\\)\\(int, int\\)' but argument is of type 'int'" "" { target *-*-* } callee_7 } */
+ /* { dg-begin-multiline-output "" }
+ extern int callee_7 (int one, int (*)(int, int), float three);
+ ^~~~~~~~~~~~~~~~~
+ { dg-end-multiline-output "" } */
+}
+
+/* -Wincompatible-pointer-types for a parameter. */
+
+extern int callee_8 (int one, float *two, float (three)); /* { dg-line callee_8 } */
+
+int test_8 (int first, int *second, float third)
+{
+ return callee_8 (first, second, third); /* { dg-error "passing argument 2 of 'callee_8' from incompatible pointer type" } */
+ /* { dg-begin-multiline-output "" }
+ return callee_8 (first, second, third);
+ ^~~~~~
+ |
+ int *
+ { dg-end-multiline-output "" } */
+ /* { dg-message "expected 'float \\*' but argument is of type 'int \\*'" "" { target *-*-* } callee_8 } */
+ /* { dg-begin-multiline-output "" }
+ extern int callee_8 (int one, float *two, float (three));
+ ~~~~~~~^~~
+ { dg-end-multiline-output "" } */
+}
+
+/* -Wpointer-sign for a parameter. */
+
+extern int callee_9 (int one, int *two, float (three)); /* { dg-line callee_9 } */
+
+int test_9 (int first, unsigned int *second, float third)
+{
+ return callee_9 (first, second, third); /* { dg-warning "pointer targets in passing argument 2 of 'callee_9' differ in signedness" } */
+ /* { dg-begin-multiline-output "" }
+ return callee_9 (first, second, third);
+ ^~~~~~
+ |
+ unsigned int *
+ { dg-end-multiline-output "" } */
+ /* { dg-message "expected 'int \\*' but argument is of type 'unsigned int \\*'" "" { target *-*-* } callee_9 } */
+ /* { dg-begin-multiline-output "" }
+ extern int callee_9 (int one, int *two, float (three));
+ ~~~~~^~~
+ { dg-end-multiline-output "" } */
+}
diff --git a/gcc/testsuite/gcc.dg/param-type-mismatch.c b/gcc/testsuite/gcc.dg/param-type-mismatch.c
index 9e654a9..f6d6874 100644
--- a/gcc/testsuite/gcc.dg/param-type-mismatch.c
+++ b/gcc/testsuite/gcc.dg/param-type-mismatch.c
@@ -1,4 +1,4 @@
-/* { dg-options "-fdiagnostics-show-caret -Wpointer-sign" } */
+/* { dg-options "-fpermissive -fdiagnostics-show-caret -Wpointer-sign" } */
/* A collection of calls where argument 2 is of the wrong type. */
diff --git a/gcc/testsuite/gcc.dg/pch/rwsr-pch.c b/gcc/testsuite/gcc.dg/pch/rwsr-pch.c
new file mode 100644
index 0000000..f49d276
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pch/rwsr-pch.c
@@ -0,0 +1,7 @@
+#include "rwsr-pch.h"
+extern int printf (const char *, ...);
+int main (void) {
+ long long val = rwsr ();
+ printf ("%lld\n", val);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/pch/rwsr-pch.hs b/gcc/testsuite/gcc.dg/pch/rwsr-pch.hs
new file mode 100644
index 0000000..79b37544
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pch/rwsr-pch.hs
@@ -0,0 +1,10 @@
+/* { dg-skip-if "" { ! aarch64*-*-* } } */
+static inline long long
+rwsr (void)
+{
+ long long a = __builtin_aarch64_rsr64 ("trcseqstr");
+ __builtin_aarch64_wsr64 ("trcseqstr", a + 1);
+ a = __builtin_aarch64_rsr64 ("trcseqstr");
+ return a;
+}
+
diff --git a/gcc/testsuite/gcc.dg/permerror-default.c b/gcc/testsuite/gcc.dg/permerror-default.c
new file mode 100644
index 0000000..c674d68
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/permerror-default.c
@@ -0,0 +1,85 @@
+/* { dg-options "" } */
+
+/* Overview test for C permerrors. This test should be kept in sync with the
+ other permerror-* tests. If new permerrors are added, test cases should be
+ added to this and the other files. */
+
+void
+implicit_function_declaration (void)
+{
+ f1 (); /* { dg-error "'f1' \\\[-Wimplicit-function-declaration\\\]" } */
+}
+
+extern implicit_int_1; /* { dg-error "'implicit_int_1' \\\[-Wimplicit-int\\\]" } */
+typedef implicit_int_2; /* { dg-error "'implicit_int_2' \\\[-Wimplicit-int\\\]" } */
+extern implicit_int_3 (void); /* { dg-error "'implicit_int_3' \\\[-Wimplicit-int\\]" } */
+implicit_int_4 (i) /* { dg-error "return type defaults to 'int' \\\[-Wimplicit-int\\\]" } */
+/* { dg-error "type of 'i' defaults to 'int' \\\[-Wimplicit-int\\\]" "" { target *-*-*} .-1 } */
+{
+ (const) 0; /* { dg-error "type defaults to 'int' in type name \\\[-Wimplicit-int\\\]" } */
+}
+
+extern int missing_parameter_type (i); /* { dg-error "parameter names \\\(without types\\\) in function declaration \\\[-Wdeclaration-missing-parameter-type\\\]" } */
+
+
+int *
+int_conversion_1 (int flag)
+{
+ void f2 (int *);
+ flag ? "1" : 1; /* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ flag ? 1 : "1"; /* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ f2 (flag); /* { dg-error "passing argument 1 of 'f2' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int i1 = &flag; /* { dg-error "initialization of 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = &flag; /* { dg-error "assignment to 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return flag; /* { dg-error "returning 'int' from a function with return type 'int \\\*' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int
+int_conversion_2 (int flag)
+{
+ void f3 (int);
+ f3 (&flag); /* { dg-error "passing argument 1 of 'f3' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int *i1 = flag; /* { dg-error "initialization of 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = flag; /* { dg-error "assignment to 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return &flag; /* { dg-error "returning 'int \\\*' from a function with return type 'int' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int *
+incompatible_pointer_types (int flag)
+{
+ void f4 (int *);
+ flag ? __builtin_abs : __builtin_labs; /* { dg-error "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p1 = __builtin_abs; /* { dg-error "initialization of 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p1 = __builtin_abs; /* { dg-error "assignment to 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ {
+ int *p2 = incompatible_pointer_types; /* { dg-error "initialization of 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p2 = incompatible_pointer_types; /* { dg-error "assignment to 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p3 = &p2; /* { dg-error "initialization of 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ p3 = &p2; /* { dg-error "assignment to 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ f4 (&p2); /* { dg-error "passing argument 1 of 'f4' from incompatible pointer type \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ if (flag)
+ return __builtin_abs; /* { dg-error "returning pointer to '__builtin_abs' of type 'int \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ else
+ return incompatible_pointer_types; /* { dg-error "returning 'int \\\* \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible return type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+}
+
+void
+return_mismatch_1 (void)
+{
+ return 0; /* { dg-error "'return' with a value, in function returning void \\\[-Wreturn-mismatch\\\]" } */
+}
+
+int
+return_mismatch_2 (void)
+{
+ return; /* { dg-error "return' with no value, in function returning non-void \\\[-Wreturn-mismatch\\\]" } */
+}
diff --git a/gcc/testsuite/gcc.dg/permerror-fpermissive-nowarning.c b/gcc/testsuite/gcc.dg/permerror-fpermissive-nowarning.c
new file mode 100644
index 0000000..d07c863
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/permerror-fpermissive-nowarning.c
@@ -0,0 +1,6 @@
+/* { dg-options "-fpermissive -Wno-implicit-function-declaration -Wno-implicit-int -Wno-int-conversion -Wno-incompatible-pointer-types -Wno-return-mismatch -Wno-declaration-missing-parameter-type" } */
+
+/* This test checks that permerrors can be disabled using -Wno-* options even
+ if -fpermissive is used. */
+
+#include "permerror-default.c"
diff --git a/gcc/testsuite/gcc.dg/permerror-fpermissive.c b/gcc/testsuite/gcc.dg/permerror-fpermissive.c
new file mode 100644
index 0000000..fd3020d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/permerror-fpermissive.c
@@ -0,0 +1,85 @@
+/* { dg-options "-fpermissive" } */
+
+/* Overview test for C permerrors. This test should be kept in sync with the
+ other permerror-* tests. If new permerrors are added, test cases should be
+ added to this and the other files. */
+
+void
+implicit_function_declaration (void)
+{
+ f1 (); /* { dg-warning "'f1' \\\[-Wimplicit-function-declaration\\\]" } */
+}
+
+extern implicit_int_1; /* { dg-warning "'implicit_int_1' \\\[-Wimplicit-int\\\]" } */
+typedef implicit_int_2; /* { dg-warning "'implicit_int_2' \\\[-Wimplicit-int\\\]" } */
+extern implicit_int_3 (void); /* { dg-warning "'implicit_int_3' \\\[-Wimplicit-int\\]" } */
+implicit_int_4 (i) /* { dg-warning "return type defaults to 'int' \\\[-Wimplicit-int\\\]" } */
+/* { dg-warning "type of 'i' defaults to 'int' \\\[-Wimplicit-int\\\]" "" { target *-*-*} .-1 } */
+{
+ (const) 0; /* { dg-warning "type defaults to 'int' in type name \\\[-Wimplicit-int\\\]" } */
+}
+
+extern int missing_parameter_type (i); /* { dg-warning "parameter names \\\(without types\\\) in function declaration \\\[-Wdeclaration-missing-parameter-type\\\]" } */
+
+
+int *
+int_conversion_1 (int flag)
+{
+ void f2 (int *);
+ flag ? "1" : 1; /* { dg-warning "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ flag ? 1 : "1"; /* { dg-warning "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ f2 (flag); /* { dg-warning "passing argument 1 of 'f2' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int i1 = &flag; /* { dg-warning "initialization of 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = &flag; /* { dg-warning "assignment to 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return flag; /* { dg-warning "returning 'int' from a function with return type 'int \\\*' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int
+int_conversion_2 (int flag)
+{
+ void f3 (int);
+ f3 (&flag); /* { dg-warning "passing argument 1 of 'f3' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int *i1 = flag; /* { dg-warning "initialization of 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = flag; /* { dg-warning "assignment to 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return &flag; /* { dg-warning "returning 'int \\\*' from a function with return type 'int' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int *
+incompatible_pointer_types (int flag)
+{
+ void f4 (int *);
+ flag ? __builtin_abs : __builtin_labs; /* { dg-warning "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p1 = __builtin_abs; /* { dg-warning "initialization of 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p1 = __builtin_abs; /* { dg-warning "assignment to 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ {
+ int *p2 = incompatible_pointer_types; /* { dg-warning "initialization of 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p2 = incompatible_pointer_types; /* { dg-warning "assignment to 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p3 = &p2; /* { dg-warning "initialization of 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ p3 = &p2; /* { dg-warning "assignment to 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ f4 (&p2); /* { dg-warning "passing argument 1 of 'f4' from incompatible pointer type \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ if (flag)
+ return __builtin_abs; /* { dg-warning "returning pointer to '__builtin_abs' of type 'int \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ else
+ return incompatible_pointer_types; /* { dg-warning "returning 'int \\\* \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible return type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+}
+
+void
+return_mismatch_1 (void)
+{
+ return 0; /* { dg-warning "'return' with a value, in function returning void \\\[-Wreturn-mismatch\\\]" } */
+}
+
+int
+return_mismatch_2 (void)
+{
+ return; /* { dg-warning "return' with no value, in function returning non-void \\\[-Wreturn-mismatch\\\]" } */
+}
diff --git a/gcc/testsuite/gcc.dg/permerror-gnu89-nopermissive.c b/gcc/testsuite/gcc.dg/permerror-gnu89-nopermissive.c
new file mode 100644
index 0000000..aba4b24
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/permerror-gnu89-nopermissive.c
@@ -0,0 +1,85 @@
+/* { dg-options "-std=gnu89 -fno-permissive" } */
+
+/* Test for C89 dialect mode, with new permerrors enabled. In most
+ cases where the compiler warnings in C89 mode, it should issue a
+ permerror with -fno-permissive. */
+
+void
+implicit_function_declaration (void)
+{
+ f1 (); /* { dg-bogus "-Wimplicit-function-declaration" } */
+}
+
+extern implicit_int_1; /* { dg-bogus "-Wimplicit-int" } */
+typedef implicit_int_2; /* { dg-bogus "-Wimplicit-int" } */
+extern implicit_int_3 (void); /* { dg-bogus "-Wimplicit-int" } */
+implicit_int_4 (i) /* { dg-bogus "-Wimplicit-int" } */
+/* Directive here in the other files. */
+{
+ (const) 0; /* { dg-bogus "-Wimplicit-int" } */
+}
+
+extern int missing_parameter_type (i); /* { dg-error "parameter names \\\(without types\\\) in function declaration \\\[-Wdeclaration-missing-parameter-type\\\]" } */
+
+
+int *
+int_conversion_1 (int flag)
+{
+ void f2 (int *);
+ flag ? "1" : 1; /* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ flag ? 1 : "1"; /* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ f2 (flag); /* { dg-error "passing argument 1 of 'f2' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int i1 = &flag; /* { dg-error "initialization of 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = &flag; /* { dg-error "assignment to 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return flag; /* { dg-error "returning 'int' from a function with return type 'int \\\*' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int
+int_conversion_2 (int flag)
+{
+ void f3 (int);
+ f3 (&flag); /* { dg-error "passing argument 1 of 'f3' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int *i1 = flag; /* { dg-error "initialization of 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = flag; /* { dg-error "assignment to 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return &flag; /* { dg-error "returning 'int \\\*' from a function with return type 'int' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int *
+incompatible_pointer_types (int flag)
+{
+ void f4 (int *);
+ flag ? __builtin_abs : __builtin_labs; /* { dg-warning "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p1 = __builtin_abs; /* { dg-error "initialization of 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p1 = __builtin_abs; /* { dg-error "assignment to 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ {
+ int *p2 = incompatible_pointer_types; /* { dg-error "initialization of 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p2 = incompatible_pointer_types; /* { dg-error "assignment to 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p3 = &p2; /* { dg-error "initialization of 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ p3 = &p2; /* { dg-error "assignment to 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ f4 (&p2); /* { dg-error "passing argument 1 of 'f4' from incompatible pointer type \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ if (flag)
+ return __builtin_abs; /* { dg-error "returning pointer to '__builtin_abs' of type 'int \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ else
+ return incompatible_pointer_types; /* { dg-error "returning 'int \\\* \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible return type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+}
+
+void
+return_mismatch_1 (void)
+{
+ return 0; /* { dg-error "'return' with a value, in function returning void \\\[-Wreturn-mismatch\\\]" } */
+}
+
+int
+return_mismatch_2 (void)
+{
+ return; /* { dg-bogus "-Wreturn-mismatch" } */
+}
diff --git a/gcc/testsuite/gcc.dg/permerror-gnu89-pedantic.c b/gcc/testsuite/gcc.dg/permerror-gnu89-pedantic.c
new file mode 100644
index 0000000..ef4dbfc
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/permerror-gnu89-pedantic.c
@@ -0,0 +1,85 @@
+/* { dg-options "-std=gnu89 -pedantic-errors" } */
+
+/* Overview test for C permerrors. This test should be kept in sync with the
+ other permerror-* tests. If new permerrors are added, test cases should be
+ added to this and the other files. */
+
+void
+implicit_function_declaration (void)
+{
+ f1 (); /* { dg-bogus "-Wimplicit-function-declaration" } */
+}
+
+extern implicit_int_1; /* { dg-bogus "-Wimplicit-int" } */
+typedef implicit_int_2; /* { dg-bogus "-Wimplicit-int" } */
+extern implicit_int_3 (void); /* { dg-bogus "-Wimplicit-int" } */
+implicit_int_4 (i) /* { dg-bogus "-Wimplicit-int" } */
+/* Directive here in the other files. */
+{
+ (const) 0; /* { dg-bogus "-Wimplicit-int" } */
+}
+
+extern int missing_parameter_type (i); /* { dg-error "parameter names \\\(without types\\\) in function declaration \\\[-Wdeclaration-missing-parameter-type\\\]" } */
+
+
+int *
+int_conversion_1 (int flag)
+{
+ void f2 (int *);
+ flag ? "1" : 1; /* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ flag ? 1 : "1"; /* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ f2 (flag); /* { dg-error "passing argument 1 of 'f2' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int i1 = &flag; /* { dg-error "initialization of 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = &flag; /* { dg-error "assignment to 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return flag; /* { dg-error "returning 'int' from a function with return type 'int \\\*' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int
+int_conversion_2 (int flag)
+{
+ void f3 (int);
+ f3 (&flag); /* { dg-error "passing argument 1 of 'f3' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int *i1 = flag; /* { dg-error "initialization of 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = flag; /* { dg-error "assignment to 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return &flag; /* { dg-error "returning 'int \\\*' from a function with return type 'int' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int *
+incompatible_pointer_types (int flag)
+{
+ void f4 (int *);
+ flag ? __builtin_abs : __builtin_labs; /* { dg-warning "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p1 = __builtin_abs; /* { dg-error "initialization of 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p1 = __builtin_abs; /* { dg-error "assignment to 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ {
+ int *p2 = incompatible_pointer_types; /* { dg-error "initialization of 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p2 = incompatible_pointer_types; /* { dg-error "assignment to 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p3 = &p2; /* { dg-error "initialization of 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ p3 = &p2; /* { dg-error "assignment to 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ f4 (&p2); /* { dg-error "passing argument 1 of 'f4' from incompatible pointer type \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ if (flag)
+ return __builtin_abs; /* { dg-error "returning pointer to '__builtin_abs' of type 'int \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ else
+ return incompatible_pointer_types; /* { dg-error "returning 'int \\\* \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible return type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+}
+
+void
+return_mismatch_1 (void)
+{
+ return 0; /* { dg-error "'return' with a value, in function returning void \\\[-Wreturn-mismatch\\\]" } */
+}
+
+int
+return_mismatch_2 (void)
+{
+ return; /* { dg-bogus "-Wreturn-mismatch" } */
+}
diff --git a/gcc/testsuite/gcc.dg/permerror-gnu89.c b/gcc/testsuite/gcc.dg/permerror-gnu89.c
new file mode 100644
index 0000000..83792ec
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/permerror-gnu89.c
@@ -0,0 +1,85 @@
+/* { dg-options "-std=gnu89" } */
+
+/* Overview test for C permerrors. This test should be kept in sync with the
+ other permerror-* tests. If new permerrors are added, test cases should be
+ added to this and the other files. */
+
+void
+implicit_function_declaration (void)
+{
+ f1 (); /* { dg-bogus "-Wimplicit-function-declaration" } */
+}
+
+extern implicit_int_1; /* { dg-bogus "-Wimplicit-int" } */
+typedef implicit_int_2; /* { dg-bogus "-Wimplicit-int" } */
+extern implicit_int_3 (void); /* { dg-bogus "-Wimplicit-int" } */
+implicit_int_4 (i) /* { dg-bogus "-Wimplicit-int" } */
+/* Directive here in the other files. */
+{
+ (const) 0; /* { dg-bogus "-Wimplicit-int" } */
+}
+
+extern int missing_parameter_type (i); /* { dg-warning "parameter names \\\(without types\\\) in function declaration \\\[-Wdeclaration-missing-parameter-type\\\]" } */
+
+
+int *
+int_conversion_1 (int flag)
+{
+ void f2 (int *);
+ flag ? "1" : 1; /* { dg-warning "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ flag ? 1 : "1"; /* { dg-warning "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ f2 (flag); /* { dg-warning "passing argument 1 of 'f2' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int i1 = &flag; /* { dg-warning "initialization of 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = &flag; /* { dg-warning "assignment to 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return flag; /* { dg-warning "returning 'int' from a function with return type 'int \\\*' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int
+int_conversion_2 (int flag)
+{
+ void f3 (int);
+ f3 (&flag); /* { dg-warning "passing argument 1 of 'f3' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int *i1 = flag; /* { dg-warning "initialization of 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = flag; /* { dg-warning "assignment to 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return &flag; /* { dg-warning "returning 'int \\\*' from a function with return type 'int' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int *
+incompatible_pointer_types (int flag)
+{
+ void f4 (int *);
+ flag ? __builtin_abs : __builtin_labs; /* { dg-warning "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p1 = __builtin_abs; /* { dg-warning "initialization of 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p1 = __builtin_abs; /* { dg-warning "assignment to 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ {
+ int *p2 = incompatible_pointer_types; /* { dg-warning "initialization of 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p2 = incompatible_pointer_types; /* { dg-warning "assignment to 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p3 = &p2; /* { dg-warning "initialization of 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ p3 = &p2; /* { dg-warning "assignment to 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ f4 (&p2); /* { dg-warning "passing argument 1 of 'f4' from incompatible pointer type \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ if (flag)
+ return __builtin_abs; /* { dg-warning "returning pointer to '__builtin_abs' of type 'int \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ else
+ return incompatible_pointer_types; /* { dg-warning "returning 'int \\\* \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible return type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+}
+
+void
+return_mismatch_1 (void)
+{
+ return 0; /* { dg-warning "'return' with a value, in function returning void \\\[-Wreturn-mismatch\\\]" } */
+}
+
+int
+return_mismatch_2 (void)
+{
+ return; /* { dg-bogus "-Wreturn-mismatch" } */
+}
diff --git a/gcc/testsuite/gcc.dg/permerror-noerror.c b/gcc/testsuite/gcc.dg/permerror-noerror.c
new file mode 100644
index 0000000..fc68dfa
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/permerror-noerror.c
@@ -0,0 +1,85 @@
+/* { dg-options "-Wno-error=implicit-function-declaration -Wno-error=implicit-int -Wno-error=int-conversion -Wno-error=incompatible-pointer-types -Wno-error=return-mismatch -Wno-error=declaration-missing-parameter-type" } */
+
+/* This test should emulate the effect of -fpermissive by adding all the
+ -Wno-error= options that are implied by -fpermissive. It needs to be
+ kept in sync with the other permerror-* tests. */
+
+void
+implicit_function_declaration (void)
+{
+ f1 (); /* { dg-warning "'f1' \\\[-Wimplicit-function-declaration\\\]" } */
+}
+
+extern implicit_int_1; /* { dg-warning "'implicit_int_1' \\\[-Wimplicit-int\\\]" } */
+typedef implicit_int_2; /* { dg-warning "'implicit_int_2' \\\[-Wimplicit-int\\\]" } */
+extern implicit_int_3 (void); /* { dg-warning "'implicit_int_3' \\\[-Wimplicit-int\\]" } */
+implicit_int_4 (i) /* { dg-warning "return type defaults to 'int' \\\[-Wimplicit-int\\\]" } */
+/* { dg-warning "type of 'i' defaults to 'int' \\\[-Wimplicit-int\\\]" "" { target *-*-*} .-1 } */
+{
+ (const) 0; /* { dg-warning "type defaults to 'int' in type name \\\[-Wimplicit-int\\\]" } */
+}
+
+extern int missing_parameter_type (i); /* { dg-warning "parameter names \\\(without types\\\) in function declaration \\\[-Wdeclaration-missing-parameter-type\\\]" } */
+
+
+int *
+int_conversion_1 (int flag)
+{
+ void f2 (int *);
+ flag ? "1" : 1; /* { dg-warning "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ flag ? 1 : "1"; /* { dg-warning "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ f2 (flag); /* { dg-warning "passing argument 1 of 'f2' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int i1 = &flag; /* { dg-warning "initialization of 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = &flag; /* { dg-warning "assignment to 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return flag; /* { dg-warning "returning 'int' from a function with return type 'int \\\*' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int
+int_conversion_2 (int flag)
+{
+ void f3 (int);
+ f3 (&flag); /* { dg-warning "passing argument 1 of 'f3' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int *i1 = flag; /* { dg-warning "initialization of 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = flag; /* { dg-warning "assignment to 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return &flag; /* { dg-warning "returning 'int \\\*' from a function with return type 'int' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int *
+incompatible_pointer_types (int flag)
+{
+ void f4 (int *);
+ flag ? __builtin_abs : __builtin_labs; /* { dg-warning "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p1 = __builtin_abs; /* { dg-warning "initialization of 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p1 = __builtin_abs; /* { dg-warning "assignment to 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ {
+ int *p2 = incompatible_pointer_types; /* { dg-warning "initialization of 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p2 = incompatible_pointer_types; /* { dg-warning "assignment to 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p3 = &p2; /* { dg-warning "initialization of 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ p3 = &p2; /* { dg-warning "assignment to 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ f4 (&p2); /* { dg-warning "passing argument 1 of 'f4' from incompatible pointer type \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ if (flag)
+ return __builtin_abs; /* { dg-warning "returning pointer to '__builtin_abs' of type 'int \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ else
+ return incompatible_pointer_types; /* { dg-warning "returning 'int \\\* \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible return type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+}
+
+void
+return_mismatch_1 (void)
+{
+ return 0; /* { dg-warning "'return' with a value, in function returning void \\\[-Wreturn-mismatch\\\]" } */
+}
+
+int
+return_mismatch_2 (void)
+{
+ return; /* { dg-warning "return' with no value, in function returning non-void \\\[-Wreturn-mismatch\\\]" } */
+}
diff --git a/gcc/testsuite/gcc.dg/permerror-nowarning.c b/gcc/testsuite/gcc.dg/permerror-nowarning.c
new file mode 100644
index 0000000..b1cf799
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/permerror-nowarning.c
@@ -0,0 +1,5 @@
+/* { dg-options "-Wno-implicit-function-declaration -Wno-implicit-int -Wno-int-conversion -Wno-incompatible-pointer-types -Wno-return-mismatch -Wno-declaration-missing-parameter-type" } */
+
+/* This test checks that permerrors can be disabled using -Wno-* options. */
+
+#include "permerror-default.c"
diff --git a/gcc/testsuite/gcc.dg/permerror-pedantic.c b/gcc/testsuite/gcc.dg/permerror-pedantic.c
new file mode 100644
index 0000000..2380bb2
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/permerror-pedantic.c
@@ -0,0 +1,85 @@
+/* { dg-options "-pedantic-errors" } */
+
+/* Overview test for C permerrors. This test should be kept in sync with the
+ other permerror-* tests. If new permerrors are added, test cases should be
+ added to this and the other files. */
+
+void
+implicit_function_declaration (void)
+{
+ f1 (); /* { dg-error "'f1' \\\[-Wimplicit-function-declaration\\\]" } */
+}
+
+extern implicit_int_1; /* { dg-error "'implicit_int_1' \\\[-Wimplicit-int\\\]" } */
+typedef implicit_int_2; /* { dg-error "'implicit_int_2' \\\[-Wimplicit-int\\\]" } */
+extern implicit_int_3 (void); /* { dg-error "'implicit_int_3' \\\[-Wimplicit-int\\]" } */
+implicit_int_4 (i) /* { dg-error "return type defaults to 'int' \\\[-Wimplicit-int\\\]" } */
+/* { dg-error "type of 'i' defaults to 'int' \\\[-Wimplicit-int\\\]" "" { target *-*-*} .-1 } */
+{
+ (const) 0; /* { dg-error "type defaults to 'int' in type name \\\[-Wimplicit-int\\\]" } */
+}
+
+extern int missing_parameter_type (i); /* { dg-error "parameter names \\\(without types\\\) in function declaration \\\[-Wdeclaration-missing-parameter-type\\\]" } */
+
+
+int *
+int_conversion_1 (int flag)
+{
+ void f2 (int *);
+ flag ? "1" : 1; /* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ flag ? 1 : "1"; /* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */
+ f2 (flag); /* { dg-error "passing argument 1 of 'f2' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int i1 = &flag; /* { dg-error "initialization of 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = &flag; /* { dg-error "assignment to 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return flag; /* { dg-error "returning 'int' from a function with return type 'int \\\*' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int
+int_conversion_2 (int flag)
+{
+ void f3 (int);
+ f3 (&flag); /* { dg-error "passing argument 1 of 'f3' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+ {
+ int *i1 = flag; /* { dg-error "initialization of 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ i1 = flag; /* { dg-error "assignment to 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" } */
+ }
+ return &flag; /* { dg-error "returning 'int \\\*' from a function with return type 'int' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" } */
+}
+
+int *
+incompatible_pointer_types (int flag)
+{
+ void f4 (int *);
+ flag ? __builtin_abs : __builtin_labs; /* { dg-error "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p1 = __builtin_abs; /* { dg-error "initialization of 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p1 = __builtin_abs; /* { dg-error "assignment to 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ {
+ int *p2 = incompatible_pointer_types; /* { dg-error "initialization of 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ p2 = incompatible_pointer_types; /* { dg-error "assignment to 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" } */
+ {
+ int *p3 = &p2; /* { dg-error "initialization of 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ p3 = &p2; /* { dg-error "assignment to 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ f4 (&p2); /* { dg-error "passing argument 1 of 'f4' from incompatible pointer type \\\[-Wincompatible-pointer-types\\\]" } */
+ }
+ if (flag)
+ return __builtin_abs; /* { dg-error "returning pointer to '__builtin_abs' of type 'int \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+ else
+ return incompatible_pointer_types; /* { dg-error "returning 'int \\\* \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible return type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" } */
+}
+
+void
+return_mismatch_1 (void)
+{
+ return 0; /* { dg-error "'return' with a value, in function returning void \\\[-Wreturn-mismatch\\\]" } */
+}
+
+int
+return_mismatch_2 (void)
+{
+ return; /* { dg-error "return' with no value, in function returning non-void \\\[-Wreturn-mismatch\\\]" } */
+}
diff --git a/gcc/testsuite/gcc.dg/permerror-system.c b/gcc/testsuite/gcc.dg/permerror-system.c
new file mode 100644
index 0000000..790e4f0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/permerror-system.c
@@ -0,0 +1,45 @@
+/* { dg-options "-isystem ${srcdir}" } */
+
+/* Test that permerrors appear in system headers. */
+
+/* The dg-* directives in the header file are ignored. No warnings are
+ expected. */
+#include <gcc.dg/permerror-default.c>
+
+/* These errors come from permerror-default.c. */
+
+/* { dg-error "'f1' \\\[-Wimplicit-function-declaration\\\]" "" { target *-*-* } 10 } */
+
+/* { dg-error "'implicit_int_1' \\\[-Wimplicit-int\\\]" "" { target *-*-* } 13 } */
+/* { dg-error "'implicit_int_2' \\\[-Wimplicit-int\\\]" "" { target *-*-* } 14 } */
+/* { dg-error "'implicit_int_3' \\\[-Wimplicit-int\\]" "" { target *-*-* } 15 } */
+/* { dg-error "return type defaults to 'int' \\\[-Wimplicit-int\\\]" "" { target *-*-* } 16 } */
+/* { dg-error "type of 'i' defaults to 'int' \\\[-Wimplicit-int\\\]" "" { target *-*-*} 16 } */
+/* { dg-error "type defaults to 'int' in type name \\\[-Wimplicit-int\\\]" "" { target *-*-* } 19 } */
+
+/* { dg-error "parameter names \\\(without types\\\) in function declaration \\\[-Wdeclaration-missing-parameter-type\\\]" "" { target *-*-* } 22 } */
+
+/* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" "" { target *-*-* } 29 } */
+/* { dg-error "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" "" { target *-*-* } 30 } */
+/* { dg-error "passing argument 1 of 'f2' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" "" { target *-*-* } 31 } */
+/* { dg-error "initialization of 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" "" { target *-*-* } 33 } */
+/* { dg-error "assignment to 'int' from 'int \\\*' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" "" { target *-*-* } 34 } */
+/* { dg-error "returning 'int' from a function with return type 'int \\\*' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" "" { target *-*-* } 36 } */
+/* { dg-error "passing argument 1 of 'f3' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" "" { target *-*-* } 43 } */
+/* { dg-error "initialization of 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" "" { target *-*-* } 45 } */
+/* { dg-error "assignment to 'int \\\*' from 'int' makes pointer from integer without a cast \\\[-Wint-conversion\\\]" "" { target *-*-* } 46 } */
+/* { dg-error "returning 'int \\\*' from a function with return type 'int' makes integer from pointer without a cast \\\[-Wint-conversion\\\]" "" { target *-*-* } 48 } */
+
+/* { dg-error "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" "" { target *-*-* } 55 } */
+/* { dg-error "initialization of 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" "" { target *-*-* } 57 } */
+/* { dg-error "assignment to 'int \\\*' from pointer to '__builtin_abs' with incompatible type 'int \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" "" { target *-*-* } 58 } */
+/* { dg-error "initialization of 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" "" { target *-*-* } 61 } */
+/* { dg-error "assignment to 'int \\\*' from incompatible pointer type 'int \\\* \\\(\\\*\\\)\\\(int\\\)' \\\[-Wincompatible-pointer-types\\\]" "" { target *-*-* } 62 } */
+/* { dg-error "initialization of 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" "" { target *-*-* } 64 } */
+/* { dg-error "assignment to 'int \\\*' from incompatible pointer type 'int \\\*\\\*' \\\[-Wincompatible-pointer-types\\\]" "" { target *-*-* } 65 } */
+/* { dg-error "passing argument 1 of 'f4' from incompatible pointer type \\\[-Wincompatible-pointer-types\\\]" "" { target *-*-* } 67 } */
+/* { dg-error "returning pointer to '__builtin_abs' of type 'int \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" "" { target *-*-* } 70 } */
+/* { dg-error "returning 'int \\\* \\\(\\\*\\\)\\\(int\\\)' from a function with incompatible return type 'int \\\*' \\\[-Wincompatible-pointer-types\\\]" "" { target *-*-* } 72 } */
+
+/* { dg-error "'return' with a value, in function returning void \\\[-Wreturn-mismatch\\\]" "" { target *-*-* } 78 } */
+/* { dg-error "return' with no value, in function returning non-void \\\[-Wreturn-mismatch\\\]" "" { target *-*-* } 84 } */
diff --git a/gcc/testsuite/gcc.dg/plugin/analyzer_cpython_plugin.c b/gcc/testsuite/gcc.dg/plugin/analyzer_cpython_plugin.c
index a364c8a..b5814dd 100644
--- a/gcc/testsuite/gcc.dg/plugin/analyzer_cpython_plugin.c
+++ b/gcc/testsuite/gcc.dg/plugin/analyzer_cpython_plugin.c
@@ -310,18 +310,16 @@ public:
}
bool
- emit (rich_location *rich_loc, logger *) final override
+ emit (diagnostic_emission_context &ctxt) final override
{
- diagnostic_metadata m;
bool warned;
// just assuming constants for now
auto actual_refcnt
= m_actual_refcnt->dyn_cast_constant_svalue ()->get_constant ();
auto ob_refcnt = m_ob_refcnt->dyn_cast_constant_svalue ()->get_constant ();
- warned = warning_meta (rich_loc, m, get_controlling_option (),
- "expected %qE to have "
- "reference count: %qE but ob_refcnt field is: %qE",
- m_reg_tree, actual_refcnt, ob_refcnt);
+ warned = ctxt.warn ("expected %qE to have "
+ "reference count: %qE but ob_refcnt field is: %qE",
+ m_reg_tree, actual_refcnt, ob_refcnt);
// location_t loc = rich_loc->get_loc ();
// foo (loc);
diff --git a/gcc/testsuite/gcc.dg/plugin/analyzer_gil_plugin.c b/gcc/testsuite/gcc.dg/plugin/analyzer_gil_plugin.c
index e0fc9cd..6ea6c03 100644
--- a/gcc/testsuite/gcc.dg/plugin/analyzer_gil_plugin.c
+++ b/gcc/testsuite/gcc.dg/plugin/analyzer_gil_plugin.c
@@ -155,10 +155,9 @@ class double_save_thread : public gil_diagnostic
return m_call == sub_other.m_call;
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- return warning_at (rich_loc, get_controlling_option (),
- "nested usage of %qs", "Py_BEGIN_ALLOW_THREADS");
+ return ctxt.warn ("nested usage of %qs", "Py_BEGIN_ALLOW_THREADS");
}
label_text describe_final_event (const evdesc::final_event &ev) final override
@@ -194,19 +193,16 @@ class fncall_without_gil : public gil_diagnostic
&& m_arg_idx == sub_other.m_arg_idx);
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- auto_diagnostic_group d;
if (m_callee_fndecl)
- return warning_at (rich_loc, get_controlling_option (),
- "use of PyObject as argument %i of %qE"
- " without the GIL",
- m_arg_idx + 1, m_callee_fndecl);
+ return ctxt.warn ("use of PyObject as argument %i of %qE"
+ " without the GIL",
+ m_arg_idx + 1, m_callee_fndecl);
else
- return warning_at (rich_loc, get_controlling_option (),
- "use of PyObject as argument %i of call"
- " without the GIL",
- m_arg_idx + 1, m_callee_fndecl);
+ return ctxt.warn ("use of PyObject as argument %i of call"
+ " without the GIL",
+ m_arg_idx + 1, m_callee_fndecl);
}
label_text describe_final_event (const evdesc::final_event &ev) final override
@@ -245,11 +241,9 @@ class pyobject_usage_without_gil : public gil_diagnostic
((const pyobject_usage_without_gil&)base_other).m_expr);
}
- bool emit (rich_location *rich_loc, logger *) final override
+ bool emit (diagnostic_emission_context &ctxt) final override
{
- auto_diagnostic_group d;
- return warning_at (rich_loc, get_controlling_option (),
- "use of PyObject %qE without the GIL", m_expr);
+ return ctxt.warn ("use of PyObject %qE without the GIL", m_expr);
}
label_text describe_final_event (const evdesc::final_event &ev) final override
diff --git a/gcc/testsuite/gcc.dg/pointer-array-atomic-2.c b/gcc/testsuite/gcc.dg/pointer-array-atomic-2.c
new file mode 100644
index 0000000..de63ff1
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pointer-array-atomic-2.c
@@ -0,0 +1,60 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c11" } */
+/* Origin: Martin Uecker <uecker@eecs.berkeley.edu> */
+void tvoid(void* x);
+void transpose0(double* out, _Atomic double* in) { }
+void transpose1(double out[2][2], _Atomic double in[2][2]) { }
+void transpose2(double out[2][2][2], _Atomic double in[2][2][2]) { }
+// return
+int (*x2(_Atomic int x[3][3]))[3] { return x; } /* { dg-error "returning '_Atomic int \\(\\*\\)\\\[3\\\]' from a function with incompatible return type" } */
+_Atomic int (*x3(int x[3][3]))[3] { return x; } /* { dg-error "returning 'int \\(\\*\\)\\\[3\\\]' from a function with incompatible return type" } */
+void test(void)
+{
+ double x0[2];
+ double y0[2];
+ _Atomic double z0[4];
+ double x1[2][2];
+ double y1[2][2];
+ double o1[2][3];
+ _Atomic double z1[2][2];
+ double x2[2][2][2];
+ double y2[2][2][2];
+ double o2[2][2][3];
+ _Atomic double z2[2][2][2];
+ tvoid(z0);
+ tvoid(z1);
+ tvoid(z2);
+ // passing as arguments
+ transpose0(y0, x0); /* { dg-error "passing argument 2 of 'transpose0' from incompatible pointer type" } */
+ transpose1(y1, o1); /* { dg-error "passing argument 2 of 'transpose1' from incompatible pointer type" } */
+ transpose1(y1, x1); /* { dg-error "passing argument 2 of 'transpose1' from incompatible pointer type" } */
+ transpose2(y2, o2); /* { dg-error "passing argument 2 of 'transpose2' from incompatible pointer type" } */
+ transpose2(y2, x2); /* { dg-error "passing argument 2 of 'transpose2' from incompatible pointer type" } */
+ // initialization
+ _Atomic double (*x0p) = x0; /* { dg-error "initialization of '_Atomic double \\*' from incompatible pointer type" } */
+ _Atomic double (*x1p)[2] = x1; /* { dg-error "initialization of '_Atomic double \\(\\*\\)\\\[2\\\]' from incompatible pointer type" } */
+ _Atomic double (*x2p)[2][2] = x2; /* { dg-error "initialization of '_Atomic double \\(\\*\\)\\\[2\\\]\\\[2\\\]' from incompatible pointer type" } */
+ // assignment
+ x0p = x0; /* { dg-error "assignment to '_Atomic double \\*' from incompatible pointer type" } */
+ x1p = x1; /* { dg-error "assignment to '_Atomic double \\(\\*\\)\\\[2\\\]' from incompatible pointer type" } */
+ x2p = x2; /* { dg-error "assignment to '_Atomic double \\(\\*\\)\\\[2\\\]\\\[2\\\]' from incompatible pointer type" } */
+ // subtraction
+ &(x0[1]) - &(z0[0]); /* { dg-error "invalid operands to binary" } */
+ &(x1[1]) - &(z1[0]); /* { dg-error "invalid operands to binary" } */
+ &(x2[1]) - &(z2[0]); /* { dg-error "invalid operands to binary" } */
+ // comparison
+ x0 == z0; /* { dg-warning "comparison of distinct pointer types lacks a cast" } */
+ x1 == z1; /* { dg-warning "comparison of distinct pointer types lacks a cast" } */
+ x2 == z2; /* { dg-warning "comparison of distinct pointer types lacks a cast" } */
+ x0 > z0; /* { dg-warning "comparison of distinct pointer types lacks a cast" } */
+ x1 > z1; /* { dg-warning "comparison of distinct pointer types lacks a cast" } */
+ x2 > z2; /* { dg-warning "comparison of distinct pointer types lacks a cast" } */
+ x0 < z0; /* { dg-warning "comparison of distinct pointer types lacks a cast" } */
+ x1 < z1; /* { dg-warning "comparison of distinct pointer types lacks a cast" } */
+ x2 < z2; /* { dg-warning "comparison of distinct pointer types lacks a cast" } */
+ // conditional expressions
+ (void)(1 ? x0 : z0); /* { dg-error "pointer type mismatch in conditional expression" } */
+ (void)(1 ? x1 : z1); /* { dg-error "pointer type mismatch in conditional expression" } */
+ (void)(1 ? x2 : z2); /* { dg-error "pointer type mismatch in conditional expression" } */
+}
+
diff --git a/gcc/testsuite/gcc.dg/pointer-array-atomic.c b/gcc/testsuite/gcc.dg/pointer-array-atomic.c
index bb63797..87d177a 100644
--- a/gcc/testsuite/gcc.dg/pointer-array-atomic.c
+++ b/gcc/testsuite/gcc.dg/pointer-array-atomic.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-std=c11" } */
+/* { dg-options "-fpermissive -std=c11" } */
/* Origin: Martin Uecker <uecker@eecs.berkeley.edu> */
void tvoid(void* x);
void transpose0(double* out, _Atomic double* in) { }
diff --git a/gcc/testsuite/gcc.dg/pointer-array-quals-1.c b/gcc/testsuite/gcc.dg/pointer-array-quals-1.c
index 498ab22..be80912 100644
--- a/gcc/testsuite/gcc.dg/pointer-array-quals-1.c
+++ b/gcc/testsuite/gcc.dg/pointer-array-quals-1.c
@@ -84,9 +84,9 @@ void test(void)
(void)(1 ? x0 : z0);
(void)(1 ? x1 : z1);
(void)(1 ? x2 : z2);
- (void)(1 ? x0 : x1); /* { dg-warning "pointer type mismatch in conditional expression" } */
- (void)(1 ? x1 : x2); /* { dg-warning "pointer type mismatch in conditional expression" } */
- (void)(1 ? x2 : x0); /* { dg-warning "pointer type mismatch in conditional expression" } */
+ (void)(1 ? x0 : x1); /* { dg-error "pointer type mismatch in conditional expression" } */
+ (void)(1 ? x1 : x2); /* { dg-error "pointer type mismatch in conditional expression" } */
+ (void)(1 ? x2 : x0); /* { dg-error "pointer type mismatch in conditional expression" } */
v0p = (1 ? z0 : v0p); /* { dg-warning "assignment discards 'const' qualifier from pointer target type" } */
v1p = (1 ? z1 : v1p); /* { dg-warning "assignment discards 'const' qualifier from pointer target type" } */
v2p = (1 ? z2 : v2p); /* { dg-warning "assignment discards 'const' qualifier from pointer target type" } */
diff --git a/gcc/testsuite/gcc.dg/pr105635-2.c b/gcc/testsuite/gcc.dg/pr105635-2.c
new file mode 100644
index 0000000..019dbc7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr105635-2.c
@@ -0,0 +1,11 @@
+/* PR c/105635 */
+/* { dg-do compile } */
+/* { dg-options "-Wall" } */
+
+void foo (int, int[*]); /* { dg-message "previous declaration of 'foo' with type" } */
+
+foo (int x, int y) /* { dg-error "return type defaults to 'int'" } */
+{ /* { dg-warning "conflicting types for 'foo'" "" { target *-*-* } .-1 } */
+ /* { dg-message "declared here" "" { target *-*-* } .-2 } */
+ return (x >= 0) != (y < 0); /* { dg-error "'return' with a value, in function returning void" } */
+}
diff --git a/gcc/testsuite/gcc.dg/pr105635.c b/gcc/testsuite/gcc.dg/pr105635.c
index aa02f59..b98ea1b 100644
--- a/gcc/testsuite/gcc.dg/pr105635.c
+++ b/gcc/testsuite/gcc.dg/pr105635.c
@@ -1,6 +1,6 @@
/* PR c/105635 */
/* { dg-do compile } */
-/* { dg-options "-Wall" } */
+/* { dg-options "-fpermissive -Wall" } */
void foo (int, int[*]); /* { dg-message "previous declaration of 'foo' with type" } */
diff --git a/gcc/testsuite/gcc.dg/pr111409.c b/gcc/testsuite/gcc.dg/pr111409.c
index 1a79d81..82912c4 100644
--- a/gcc/testsuite/gcc.dg/pr111409.c
+++ b/gcc/testsuite/gcc.dg/pr111409.c
@@ -1,7 +1,7 @@
/* { dg-do compile } */
/* { dg-skip-if "split DWARF unsupported" { *-*-darwin* } } */
/* { dg-options "-gsplit-dwarf -g3 -dA" } */
-/* { dg-final { scan-assembler-times {.section\s+.debug_macro} 1 } } */
-/* { dg-final { scan-assembler-not {.byte\s+0x7\s*#\s*Import} } } */
+/* { dg-final { scan-assembler-times {\.section\t"?\.debug_macro} 1 } } */
+/* { dg-final { scan-assembler-not {\.byte\s+0x7\s*#\s*Import} } } */
#define foo 1
diff --git a/gcc/testsuite/gcc.dg/pr111922.c b/gcc/testsuite/gcc.dg/pr111922.c
new file mode 100644
index 0000000..4f429d7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr111922.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fno-tree-fre" } */
+
+void f2 (void);
+void f4 (int, int, int);
+struct A { int a; };
+struct B { struct A *b; int c; } v;
+
+static int
+f1 (x, y)
+ struct C *x;
+ struct A *y;
+{
+ (v.c = v.b->a) || (v.c = v.b->a);
+ f2 ();
+}
+
+static void
+f3 (int x, int y)
+{
+ int b = f1 (0, ~x);
+ f4 (0, 0, v.c);
+}
+
+void
+f5 (void)
+{
+ f3 (0, 0);
+}
diff --git a/gcc/testsuite/gcc.dg/pr112719.c b/gcc/testsuite/gcc.dg/pr112719.c
new file mode 100644
index 0000000..c69a7c1
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr112719.c
@@ -0,0 +1,18 @@
+/* PR tree-optimization/112719 */
+/* { dg-do compile } */
+/* { dg-options "-O" } */
+/* { dg-additional-options "-msse4" { target i?86-*-* x86_64-*-* } } */
+
+int
+foo (unsigned int a, unsigned short b)
+{
+ return __builtin_popcountl (a) + __builtin_popcountl (b);
+}
+
+int
+bar (unsigned int a, unsigned short b)
+{
+ a &= 0xaaaaaaaaUL;
+ b &= 0x5555;
+ return __builtin_popcountll (a) + __builtin_popcountll (b);
+}
diff --git a/gcc/testsuite/gcc.dg/pr112733.c b/gcc/testsuite/gcc.dg/pr112733.c
new file mode 100644
index 0000000..d6f99f7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr112733.c
@@ -0,0 +1,16 @@
+/* PR middle-end/112733 */
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+signed char a, c;
+short b;
+
+void
+foo (void)
+{
+ signed char *e = &a;
+ c = foo != 0;
+ *e &= c;
+ for (; b; --b)
+ *e &= -128;
+}
diff --git a/gcc/testsuite/gcc.dg/pr112760.c b/gcc/testsuite/gcc.dg/pr112760.c
new file mode 100644
index 0000000..b4ec70e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr112760.c
@@ -0,0 +1,22 @@
+/* PR rtl-optimization/112760 */
+/* { dg-do run } */
+/* { dg-options "-O2 -fno-dce -fno-guess-branch-probability --param=max-cse-insns=0" } */
+/* { dg-additional-options "-m8bit-idiv -mavx" { target i?86-*-* x86_64-*-* } } */
+
+unsigned g;
+
+__attribute__((__noipa__)) unsigned short
+foo (unsigned short a, unsigned short b)
+{
+ unsigned short x = __builtin_add_overflow_p (a, g, (unsigned short) 0);
+ g -= g / b;
+ return x;
+}
+
+int
+main ()
+{
+ unsigned short x = foo (40, 6);
+ if (x != 0)
+ __builtin_abort ();
+}
diff --git a/gcc/testsuite/gcc.dg/pr112837.c b/gcc/testsuite/gcc.dg/pr112837.c
new file mode 100644
index 0000000..2de43f5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr112837.c
@@ -0,0 +1,11 @@
+/* PR target/112837 */
+/* { dg-do compile } */
+/* { dg-options "-fcompare-elim -fprofile" } */
+/* { dg-additional-options "-fpie" { target pie } } */
+/* { dg-require-profiling "-fprofile" } */
+
+void
+foo (int i)
+{
+ foo (i);
+}
diff --git a/gcc/testsuite/gcc.dg/pr112845.c b/gcc/testsuite/gcc.dg/pr112845.c
new file mode 100644
index 0000000..ece6f45
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr112845.c
@@ -0,0 +1,9 @@
+/* PR target/112845 */
+/* { dg-do compile { target cet } } */
+/* { dg-options "-Os -fcf-protection" } */
+
+unsigned long long
+foo (void)
+{
+ return 0xfa1e0ff3ULL << 3;
+}
diff --git a/gcc/testsuite/gcc.dg/pr23075-2.c b/gcc/testsuite/gcc.dg/pr23075-2.c
new file mode 100644
index 0000000..0702ddf
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr23075-2.c
@@ -0,0 +1,14 @@
+/* PR c/23075 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -Wreturn-type" } */
+
+int
+foo (void)
+{
+ return; /* { dg-error "with no value" } */
+} /* { dg-bogus "control reaches end" } */
+
+int
+bar (void)
+{
+} /* { dg-warning "control reaches end" } */
diff --git a/gcc/testsuite/gcc.dg/pr23075.c b/gcc/testsuite/gcc.dg/pr23075.c
index 2d85fb0..28baf41 100644
--- a/gcc/testsuite/gcc.dg/pr23075.c
+++ b/gcc/testsuite/gcc.dg/pr23075.c
@@ -1,6 +1,6 @@
/* PR c/23075 */
/* { dg-do compile } */
-/* { dg-options "-O2 -Wreturn-type" } */
+/* { dg-options "-O2 -fpermissive -Wreturn-type" } */
int
foo (void)
diff --git a/gcc/testsuite/gcc.dg/pr29521-a.c b/gcc/testsuite/gcc.dg/pr29521-a.c
new file mode 100644
index 0000000..2c6a48b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr29521-a.c
@@ -0,0 +1,15 @@
+/* PR 29521 : warning for return with expression in function returning void */
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+void func (void) { }
+
+void func2 (void)
+{
+ return func ();
+}
+
+void func3 (void)
+{
+ return 1; /* { dg-error "'return' with a value" } */
+}
diff --git a/gcc/testsuite/gcc.dg/pr29521.c b/gcc/testsuite/gcc.dg/pr29521.c
index b6fb535..cd43151 100644
--- a/gcc/testsuite/gcc.dg/pr29521.c
+++ b/gcc/testsuite/gcc.dg/pr29521.c
@@ -1,6 +1,6 @@
/* PR 29521 : warning for return with expression in function returning void */
/* { dg-do compile } */
-/* { dg-options "" } */
+/* { dg-options "-fpermissive" } */
void func (void) { }
diff --git a/gcc/testsuite/gcc.dg/pr61162-2.c b/gcc/testsuite/gcc.dg/pr61162-2.c
index 4aa8493..a7d0b45 100644
--- a/gcc/testsuite/gcc.dg/pr61162-2.c
+++ b/gcc/testsuite/gcc.dg/pr61162-2.c
@@ -1,6 +1,6 @@
/* PR c/61162 */
/* { dg-do compile } */
-/* { dg-options "-Wc++-compat -Wpointer-sign -Wpedantic" } */
+/* { dg-options "-fpermissive -Wc++-compat -Wpointer-sign -Wpedantic" } */
enum e { A };
struct s { int a; };
diff --git a/gcc/testsuite/gcc.dg/pr61162-3.c b/gcc/testsuite/gcc.dg/pr61162-3.c
new file mode 100644
index 0000000..c486257
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr61162-3.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+
+int
+fn4 (int *a)
+{
+ return a; /* { dg-error "10:returning 'int \\*' from a function with return type 'int' makes integer from pointer without a cast" } */
+}
+
+int *
+fn5 (int a)
+{
+ return a; /* { dg-error "10:returning 'int' from a function with return type 'int \\*' makes pointer from integer without a cast" } */
+}
diff --git a/gcc/testsuite/gcc.dg/pr61852.c b/gcc/testsuite/gcc.dg/pr61852.c
index f488aca..e669d3c 100644
--- a/gcc/testsuite/gcc.dg/pr61852.c
+++ b/gcc/testsuite/gcc.dg/pr61852.c
@@ -1,10 +1,10 @@
/* PR c/61852 */
/* { dg-do compile } */
-/* { dg-options "-Wimplicit-function-declaration" } */
+/* { dg-options "" } */
int
f (int a)
{
- int b = a + a + a + ff (a); /* { dg-warning "23:implicit declaration of function" } */
+ int b = a + a + a + ff (a); /* { dg-error "23:implicit declaration of function" } */
return b;
}
diff --git a/gcc/testsuite/gcc.dg/pr67730-a.c b/gcc/testsuite/gcc.dg/pr67730-a.c
new file mode 100644
index 0000000..08737cc
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr67730-a.c
@@ -0,0 +1,11 @@
+/* PR c/67730 */
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+#include <stddef.h>
+
+void
+fn1 (void)
+{
+ return NULL; /* { dg-error "10:.return. with a value" } */
+}
diff --git a/gcc/testsuite/gcc.dg/pr67730.c b/gcc/testsuite/gcc.dg/pr67730.c
index 54d73a6..cc51858 100644
--- a/gcc/testsuite/gcc.dg/pr67730.c
+++ b/gcc/testsuite/gcc.dg/pr67730.c
@@ -1,6 +1,6 @@
/* PR c/67730 */
/* { dg-do compile } */
-/* { dg-options "" } */
+/* { dg-options "-fpermissive" } */
#include <stddef.h>
diff --git a/gcc/testsuite/gcc.dg/spec-barrier-3.c b/gcc/testsuite/gcc.dg/spec-barrier-3.c
index 3ed4d39..0940a21 100644
--- a/gcc/testsuite/gcc.dg/spec-barrier-3.c
+++ b/gcc/testsuite/gcc.dg/spec-barrier-3.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-Wpedantic" } */
+/* { dg-options "-fpermissive -Wpedantic" } */
/* __builtin_speculation_safe_value returns a value with the same type
as its first argument. There should be a warning if that isn't
diff --git a/gcc/testsuite/gcc.dg/spec-barrier-3a.c b/gcc/testsuite/gcc.dg/spec-barrier-3a.c
new file mode 100644
index 0000000..ee98ad0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/spec-barrier-3a.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-Wpedantic" } */
+
+/* __builtin_speculation_safe_value returns a value with the same type
+ as its first argument. There should be an error if that isn't
+ type-compatible with the use. */
+int *
+f (int x)
+{
+ return __builtin_speculation_safe_value (x); /* { dg-error "returning 'int' from a function with return type 'int \\*' makes pointer from integer without a cast" } */
+}
+
+/* { dg-prune-output "this target does not define a speculation barrier;" } */
diff --git a/gcc/testsuite/gcc.dg/spellcheck-identifiers-1a.c b/gcc/testsuite/gcc.dg/spellcheck-identifiers-1a.c
new file mode 100644
index 0000000..f50c52b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/spellcheck-identifiers-1a.c
@@ -0,0 +1,136 @@
+/* { dg-do compile } */
+/* { dg-options "-fdiagnostics-show-caret" } */
+
+typedef struct GtkWidget { int dummy; } GtkWidget;
+
+extern void gtk_widget_show_all (GtkWidget *w);
+
+void
+test_1 (GtkWidget *w)
+{
+ gtk_widget_showall (w); /* { dg-error "3: implicit declaration of function .gtk_widget_showall.; did you mean .gtk_widget_show_all.?" } */
+ /* { dg-begin-multiline-output "" }
+ gtk_widget_showall (w);
+ ^~~~~~~~~~~~~~~~~~
+ gtk_widget_show_all
+ { dg-end-multiline-output "" } */
+
+ /* Ensure we don't try to suggest "gtk_widget_showall" for subsequent
+ corrections. */
+ gtk_widget_showall_ (w); /* { dg-error "3: implicit declaration of function .gtk_widget_showall_.; did you mean .gtk_widget_show_all.?" } */
+ /* { dg-begin-multiline-output "" }
+ gtk_widget_showall_ (w);
+ ^~~~~~~~~~~~~~~~~~~
+ gtk_widget_show_all
+ { dg-end-multiline-output "" } */
+
+ GtkWidgetShowAll (w); /* { dg-error "3: implicit declaration of function .GtkWidgetShowAll.; did you mean .gtk_widget_show_all.?" } */
+ /* { dg-begin-multiline-output "" }
+ GtkWidgetShowAll (w);
+ ^~~~~~~~~~~~~~~~
+ gtk_widget_show_all
+ { dg-end-multiline-output "" } */
+}
+
+int
+test_2 (int param)
+{
+ return parma * parma; /* { dg-error "10: .parma. undeclared .first use in this function.; did you mean .param." } */
+ /* { dg-begin-multiline-output "" }
+ return parma * parma;
+ ^~~~~
+ param
+ { dg-end-multiline-output "" } */
+}
+
+#define MACRO(X) ((X))
+
+int
+test_3 (int i)
+{
+ return MACRAME (i); /* { dg-error "10: implicit declaration of function .MACRAME.; did you mean .MACRO.?" } */
+ /* { dg-begin-multiline-output "" }
+ return MACRAME (i);
+ ^~~~~~~
+ MACRO
+ { dg-end-multiline-output "" } */
+}
+
+#define IDENTIFIER_POINTER(X) ((X))
+
+int
+test_4 (int node)
+{
+ return IDENTIFIER_PTR (node); /* { dg-error "10: implicit declaration of function .IDENTIFIER_PTR.; did you mean .IDENTIFIER_POINTER.?" } */
+ /* { dg-begin-multiline-output "" }
+ return IDENTIFIER_PTR (node);
+ ^~~~~~~~~~~~~~
+ IDENTIFIER_POINTER
+ { dg-end-multiline-output "" } */
+}
+
+
+int
+test_5 (void)
+{
+ return __LINE_; /* { dg-error "10: .__LINE_. undeclared .first use in this function.; did you mean .__LINE__." } */
+ /* { dg-begin-multiline-output "" }
+ return __LINE_;
+ ^~~~~~~
+ __LINE__
+ { dg-end-multiline-output "" } */
+}
+
+#define MAX_ITEMS 100
+int array[MAX_ITEM]; /* { dg-error "11: .MAX_ITEM. undeclared here .not in a function.; did you mean .MAX_ITEMS." } */
+ /* { dg-begin-multiline-output "" }
+ int array[MAX_ITEM];
+ ^~~~~~~~
+ MAX_ITEMS
+ { dg-end-multiline-output "" } */
+
+
+enum foo {
+ FOO_FIRST,
+ FOO_SECOND
+};
+
+int
+test_6 (enum foo f)
+{
+ switch (f)
+ {
+ case FOO_FURST: /* { dg-error "10: .FOO_FURST. undeclared .first use in this function.; did you mean .FOO_FIRST." } */
+ break;
+ /* { dg-begin-multiline-output "" }
+ case FOO_FURST:
+ ^~~~~~~~~
+ FOO_FIRST
+ { dg-end-multiline-output "" } */
+
+ case FOO_SECCOND: /* { dg-error "10: .FOO_SECCOND. undeclared .first use in this function.; did you mean .FOO_SECOND." } */
+ break;
+ /* { dg-begin-multiline-output "" }
+ case FOO_SECCOND:
+ ^~~~~~~~~~~
+ FOO_SECOND
+ { dg-end-multiline-output "" } */
+
+ default:
+ break;
+ }
+}
+
+int snprintf (char *, __SIZE_TYPE__, const char *, ...);
+
+void
+test_7 (int i, int j)
+{
+ int buffer[100];
+ snprint (buffer, 100, "%i of %i", i, j); /* { dg-error "3: implicit declaration of function .snprint.; did you mean .snprintf.." } */
+ /* { dg-begin-multiline-output "" }
+ snprint (buffer, 100, "%i of %i", i, j);
+ ^~~~~~~
+ snprintf
+ { dg-end-multiline-output "" } */
+}
diff --git a/gcc/testsuite/gcc.dg/spellcheck-identifiers-2.c b/gcc/testsuite/gcc.dg/spellcheck-identifiers-2.c
index ad6e9d3..b49709d 100644
--- a/gcc/testsuite/gcc.dg/spellcheck-identifiers-2.c
+++ b/gcc/testsuite/gcc.dg/spellcheck-identifiers-2.c
@@ -1,7 +1,7 @@
/* PR c/71858 */
/* Make sure anticipated builtins are not considered before they are declared. */
/* { dg-do compile } */
-/* { dg-options "-Wimplicit-function-declaration -fdiagnostics-show-caret" } */
+/* { dg-options "-fpermissive -Wimplicit-function-declaration -fdiagnostics-show-caret" } */
int sscafn (const char *, const char *, ...);
diff --git a/gcc/testsuite/gcc.dg/spellcheck-identifiers-2a.c b/gcc/testsuite/gcc.dg/spellcheck-identifiers-2a.c
new file mode 100644
index 0000000..1a3e68c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/spellcheck-identifiers-2a.c
@@ -0,0 +1,33 @@
+/* PR c/71858 */
+/* Make sure anticipated builtins are not considered before they are declared. */
+/* { dg-do compile } */
+/* { dg-options "-fdiagnostics-show-caret" } */
+
+int sscafn (const char *, const char *, ...);
+
+int
+test_1 (const char *p)
+{
+ int i;
+ return ssacnf (p, "%d", &i); /* { dg-error "10: implicit declaration of function .ssacnf.; did you mean .sscafn.?" } */
+ /* { dg-begin-multiline-output "" }
+ return ssacnf (p, "%d", &i);
+ ^~~~~~
+ sscafn
+ { dg-end-multiline-output "" } */
+}
+
+int scafn (const char *, ...);
+int scanf (const char *, ...);
+
+int
+test_2 (void)
+{
+ int i;
+ return sacnf ("%d", &i); /* { dg-error "10: implicit declaration of function .sacnf.; did you mean .scanf.?" } */
+ /* { dg-begin-multiline-output "" }
+ return sacnf ("%d", &i);
+ ^~~~~
+ scanf
+ { dg-end-multiline-output "" } */
+}
diff --git a/gcc/testsuite/gcc.dg/spellcheck-identifiers-3.c b/gcc/testsuite/gcc.dg/spellcheck-identifiers-3.c
index 94f4078..0b5982f 100644
--- a/gcc/testsuite/gcc.dg/spellcheck-identifiers-3.c
+++ b/gcc/testsuite/gcc.dg/spellcheck-identifiers-3.c
@@ -1,7 +1,7 @@
/* PR c/71858 */
/* Only consider function names, function pointers and macros for implicit function declarations. */
/* { dg-do compile } */
-/* { dg-options "-Wimplicit-function-declaration -fdiagnostics-show-caret" } */
+/* { dg-options "-fpermissive -Wimplicit-function-declaration -fdiagnostics-show-caret" } */
void fn1abcd (void);
diff --git a/gcc/testsuite/gcc.dg/spellcheck-identifiers-3a.c b/gcc/testsuite/gcc.dg/spellcheck-identifiers-3a.c
new file mode 100644
index 0000000..0c0a19e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/spellcheck-identifiers-3a.c
@@ -0,0 +1,45 @@
+/* PR c/71858 */
+/* Only consider function names, function pointers and macros for implicit function declarations. */
+/* { dg-do compile } */
+/* { dg-options "-fdiagnostics-show-caret" } */
+
+void fn1abcd (void);
+
+void
+test_1 (int fn1bacd)
+{
+ fn1badc (); /* { dg-error "3: implicit declaration of function .fn1badc.; did you mean .fn1abcd.?" } */
+ /* { dg-begin-multiline-output "" }
+ fn1badc ();
+ ^~~~~~~
+ fn1abcd
+ { dg-end-multiline-output "" } */
+}
+
+void fn2efgh (void);
+void (*fn2efhg) (void);
+
+void
+test_2 (void)
+{
+ fn2fehg (); /* { dg-error "3: implicit declaration of function .fn2fehg.; did you mean .fn2efhg.?" } */
+ /* { dg-begin-multiline-output "" }
+ fn2fehg ();
+ ^~~~~~~
+ fn2efhg
+ { dg-end-multiline-output "" } */
+}
+
+void fn3ijkl (void);
+typedef int fn3ijlk;
+
+void
+test_3 (void)
+{
+ fn3jilk (); /* { dg-error "3: implicit declaration of function .fn3jilk.; did you mean .fn3ijkl.?" } */
+ /* { dg-begin-multiline-output "" }
+ fn3jilk ();
+ ^~~~~~~
+ fn3ijkl
+ { dg-end-multiline-output "" } */
+}
diff --git a/gcc/testsuite/gcc.dg/spellcheck-identifiers-4.c b/gcc/testsuite/gcc.dg/spellcheck-identifiers-4.c
index f9b7d8d..f24d863 100644
--- a/gcc/testsuite/gcc.dg/spellcheck-identifiers-4.c
+++ b/gcc/testsuite/gcc.dg/spellcheck-identifiers-4.c
@@ -1,4 +1,4 @@
-/* { dg-options "-Wimplicit-function-declaration" } */
+/* { dg-options "-fpermissive -Wimplicit-function-declaration" } */
extern double sqrt (double);
diff --git a/gcc/testsuite/gcc.dg/spellcheck-identifiers-4a.c b/gcc/testsuite/gcc.dg/spellcheck-identifiers-4a.c
new file mode 100644
index 0000000..33a6b42
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/spellcheck-identifiers-4a.c
@@ -0,0 +1,10 @@
+/* { dg-options "" } */
+
+extern double sqrt (double);
+
+void test (float pf, float inff)
+{
+ assert (pf == inff); /* { dg-bogus "sqrt" } */
+ /* { dg-error "implicit declaration of function 'assert'" "" { target *-*-* } .-1 } */
+ /* { dg-message "header '<assert.h>'" "" { target *-*-* } .-2 } */
+}
diff --git a/gcc/testsuite/gcc.dg/spellcheck-identifiers.c b/gcc/testsuite/gcc.dg/spellcheck-identifiers.c
index 063e3f9..cd632c6 100644
--- a/gcc/testsuite/gcc.dg/spellcheck-identifiers.c
+++ b/gcc/testsuite/gcc.dg/spellcheck-identifiers.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-Wimplicit-function-declaration -fdiagnostics-show-caret" } */
+/* { dg-options "-fpermissive -fdiagnostics-show-caret" } */
typedef struct GtkWidget { int dummy; } GtkWidget;
diff --git a/gcc/testsuite/gcc.dg/torture/bitint-43.c b/gcc/testsuite/gcc.dg/torture/bitint-43.c
index 4265bff..a45f5c6 100644
--- a/gcc/testsuite/gcc.dg/torture/bitint-43.c
+++ b/gcc/testsuite/gcc.dg/torture/bitint-43.c
@@ -1,6 +1,6 @@
/* PR c/111309 */
/* { dg-do run { target bitint } } */
-/* { dg-options "-std=c2x -pedantic-errors" } */
+/* { dg-options "-std=c23 -pedantic-errors" } */
/* { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O0" "-O2" } } */
/* { dg-skip-if "" { ! run_expensive_tests } { "-flto" } { "" } } */
diff --git a/gcc/testsuite/gcc.dg/torture/bitint-44.c b/gcc/testsuite/gcc.dg/torture/bitint-44.c
index 938c0e9..fe5c168 100644
--- a/gcc/testsuite/gcc.dg/torture/bitint-44.c
+++ b/gcc/testsuite/gcc.dg/torture/bitint-44.c
@@ -1,6 +1,6 @@
/* PR c/111309 */
/* { dg-do run { target bitint } } */
-/* { dg-options "-std=c2x -pedantic-errors" } */
+/* { dg-options "-std=c23 -pedantic-errors" } */
/* { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O0" "-O2" } } */
/* { dg-skip-if "" { ! run_expensive_tests } { "-flto" } { "" } } */
diff --git a/gcc/testsuite/gcc.dg/torture/bitint-45.c b/gcc/testsuite/gcc.dg/torture/bitint-45.c
new file mode 100644
index 0000000..50c622d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/bitint-45.c
@@ -0,0 +1,32 @@
+/* PR middle-end/112750 */
+/* { dg-do run { target bitint } } */
+/* { dg-options "-std=c23 -pedantic-errors" } */
+/* { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O0" "-O2" } } */
+/* { dg-skip-if "" { ! run_expensive_tests } { "-flto" } { "" } } */
+
+#if __BITINT_MAXWIDTH__ >= 256
+_BitInt(256) a = __INT_MAX__ + (_BitInt(256)) 1;
+_BitInt(256) b = __INT_MAX__;
+#endif
+#if __BITINT_MAXWIDTH__ >= 512
+_BitInt(512) c = 0x7fffffffffffffffffffffffffffffffffffffffwb + (_BitInt(512)) 1;
+_BitInt(512) d = 0x7fffffffffffffffffffffffffffffffffffffffwb;
+#endif
+
+int
+main ()
+{
+#if __BITINT_MAXWIDTH__ >= 256
+ if (!__builtin_sub_overflow_p (a, 0, 0))
+ __builtin_abort ();
+ if (!__builtin_sub_overflow_p (b, -1, 0))
+ __builtin_abort ();
+#endif
+#if __BITINT_MAXWIDTH__ >= 512
+ if (!__builtin_sub_overflow_p (c, 0, (_BitInt(160)) 0))
+ __builtin_abort ();
+ if (!__builtin_sub_overflow_p (d, -1, 0))
+ __builtin_abort ();
+#endif
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool-5a.c b/gcc/testsuite/gcc.dg/torture/hardbool-5a.c
new file mode 100644
index 0000000..a03887c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool-5a.c
@@ -0,0 +1,6 @@
+/* { dg-do run } */
+/* { dg-options "-w" } */
+
+#define falseval 0x5a
+
+#include "hardbool.c"
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool-i-5a.c b/gcc/testsuite/gcc.dg/torture/hardbool-i-5a.c
new file mode 100644
index 0000000..c0ba2a8
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool-i-5a.c
@@ -0,0 +1,6 @@
+/* { dg-do run } */
+/* { dg-options "-w" } */
+
+#define falseval 0xa53cc35a
+
+#include "hardbool-i.c"
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool-i.c b/gcc/testsuite/gcc.dg/torture/hardbool-i.c
new file mode 100644
index 0000000..39214d2
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool-i.c
@@ -0,0 +1,5 @@
+/* { dg-do run } */
+
+#define basetype int
+
+#include "hardbool.c"
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool-ll-5a.c b/gcc/testsuite/gcc.dg/torture/hardbool-ll-5a.c
new file mode 100644
index 0000000..14438c5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool-ll-5a.c
@@ -0,0 +1,6 @@
+/* { dg-do run } */
+/* { dg-options "-w" } */
+
+#define falseval 0x781ee187a53cc35all
+
+#include "hardbool-ll.c"
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool-ll.c b/gcc/testsuite/gcc.dg/torture/hardbool-ll.c
new file mode 100644
index 0000000..d4d498c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool-ll.c
@@ -0,0 +1,5 @@
+/* { dg-do run } */
+
+#define basetype long long
+
+#include "hardbool.c"
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool-s-5a.c b/gcc/testsuite/gcc.dg/torture/hardbool-s-5a.c
new file mode 100644
index 0000000..e38a56b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool-s-5a.c
@@ -0,0 +1,6 @@
+/* { dg-do run } */
+/* { dg-options "-w" } */
+
+#define falseval 0x5aa5
+
+#include "hardbool-s.c"
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool-s.c b/gcc/testsuite/gcc.dg/torture/hardbool-s.c
new file mode 100644
index 0000000..942300b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool-s.c
@@ -0,0 +1,5 @@
+/* { dg-do run } */
+
+#define basetype short
+
+#include "hardbool.c"
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool-ul-5a.c b/gcc/testsuite/gcc.dg/torture/hardbool-ul-5a.c
new file mode 100644
index 0000000..7beec57
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool-ul-5a.c
@@ -0,0 +1,6 @@
+/* { dg-do run } */
+/* { dg-options "-w" } */
+
+#define falseval 0xa53cc35a
+
+#include "hardbool-ul.c"
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool-ul.c b/gcc/testsuite/gcc.dg/torture/hardbool-ul.c
new file mode 100644
index 0000000..841c1d4
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool-ul.c
@@ -0,0 +1,5 @@
+/* { dg-do run } */
+
+#define basetype unsigned long
+
+#include "hardbool.c"
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool-us-5a.c b/gcc/testsuite/gcc.dg/torture/hardbool-us-5a.c
new file mode 100644
index 0000000..5bfc922
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool-us-5a.c
@@ -0,0 +1,6 @@
+/* { dg-do run } */
+/* { dg-options "-w" } */
+
+#define falseval 0xa55a
+
+#include "hardbool-us.c"
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool-us.c b/gcc/testsuite/gcc.dg/torture/hardbool-us.c
new file mode 100644
index 0000000..e9feec6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool-us.c
@@ -0,0 +1,5 @@
+/* { dg-do run } */
+
+#define basetype unsigned short
+
+#include "hardbool.c"
diff --git a/gcc/testsuite/gcc.dg/torture/hardbool.c b/gcc/testsuite/gcc.dg/torture/hardbool.c
new file mode 100644
index 0000000..0168495
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/hardbool.c
@@ -0,0 +1,118 @@
+/* { dg-do run } */
+
+#include <assert.h>
+
+#ifndef basetype
+#define basetype char
+#endif
+
+#ifndef falseval
+#define falseval 0
+#endif
+
+#ifndef trueval
+#define trueval ~falseval
+#endif
+
+/* hardbool may be #defined so as to drop parms in other tests. */
+typedef basetype __attribute__ ((hardbool (falseval, trueval))) hbool;
+
+typedef unsigned char __attribute__ ((__hardbool__ (1, 0))) zbool;
+
+struct hs {
+ hbool a[2];
+ hbool x:2;
+ hbool y:5;
+ zbool z:1;
+};
+
+hbool var = 0;
+
+struct hs x = { { 1, 0 }, 2, 0, 2 };
+
+int f(hbool v) {
+ return !v;
+}
+
+int g(int i) {
+ return f(i);
+}
+
+hbool h(hbool x) {
+ return x;
+}
+
+hbool h2(hbool x) {
+ return h(x);
+}
+
+int hsx(struct hs v) {
+ return v.x;
+}
+
+int ghs(hbool s) {
+ struct hs v = { {s, !s}, s, !s, s };
+ return hsx (v);
+}
+
+int t = (hbool)2;
+
+void check_pfalse (hbool *p)
+{
+ assert (!*p);
+ assert (*(basetype*)p == (basetype)falseval);
+ assert (!(int)(hbool)*p);
+}
+
+void check_ptrue (hbool *p)
+{
+ assert (*p);
+ assert (*(basetype*)p == (basetype)trueval);
+ assert ((int)(hbool)*p);
+}
+
+void check_vfalse (hbool v)
+{
+ check_pfalse (&v);
+}
+
+void check_vtrue (hbool v)
+{
+ check_ptrue (&v);
+}
+
+int main () {
+ check_pfalse (&var);
+ var = !(int)(hbool)(_Bool)var;
+ check_ptrue (&var);
+ var = (zbool)var;
+ check_ptrue (&var);
+
+ check_ptrue (&x.a[0]);
+ check_pfalse (&x.a[1]);
+ check_vtrue (x.x);
+ check_vfalse (x.y);
+ check_vtrue (x.z);
+
+ check_vtrue (t);
+
+ check_vtrue (var && t);
+ check_vfalse (!var || x.y);
+
+ check_vfalse (f (2));
+ check_vfalse (f (1));
+ check_vtrue (f (0));
+
+ check_vfalse (g (2));
+ check_vfalse (g (1));
+ check_vtrue (g (0));
+
+ check_vtrue (h (2));
+ check_vtrue (h (1));
+ check_vfalse (h (0));
+
+ check_vtrue (h2 (2));
+ check_vtrue (h2 (1));
+ check_vfalse (h2 (0));
+}
+
diff --git a/gcc/testsuite/gcc.dg/torture/inline-mem-cmp-1.c b/gcc/testsuite/gcc.dg/torture/inline-mem-cmp-1.c
new file mode 100644
index 0000000..a368f07
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/inline-mem-cmp-1.c
@@ -0,0 +1,7 @@
+/* { dg-do run } */
+/* { dg-options "-finline-stringops=memcmp -save-temps -g0 -fno-lto" } */
+
+#include "../memcmp-1.c"
+
+/* Check that no memcmp calls remain, but allow for lib_memcmp calls. */
+/* { dg-final { scan-assembler-not {(^|\*)\mmemcmp\M} } } */
diff --git a/gcc/testsuite/gcc.dg/torture/inline-mem-cpy-1.c b/gcc/testsuite/gcc.dg/torture/inline-mem-cpy-1.c
new file mode 100644
index 0000000..c98e903
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/inline-mem-cpy-1.c
@@ -0,0 +1,8 @@
+/* { dg-do run } */
+/* { dg-options "-finline-stringops=memcpy -save-temps -g0 -fno-lto" } */
+
+#include "../memcmp-1.c"
+/* Yeah, this memcmp test exercises plenty of memcpy, more than any of the
+ memcpy tests. */
+
+/* { dg-final { scan-assembler-not {\mmemcpy\M} } } */
diff --git a/gcc/testsuite/gcc.dg/torture/inline-mem-cpy-cmp-1.c b/gcc/testsuite/gcc.dg/torture/inline-mem-cpy-cmp-1.c
new file mode 100644
index 0000000..2cd2057
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/inline-mem-cpy-cmp-1.c
@@ -0,0 +1,11 @@
+/* { dg-do run } */
+/* { dg-options "-finline-stringops -save-temps -g0 -fno-lto" } */
+/* { dg-require-effective-target ptr32plus } */
+/* { dg-timeout-factor 2 } */
+
+#include "../memcmp-1.c"
+/* Yeah, this memcmp test exercises plenty of memcpy, more than any of the
+ memcpy tests. */
+
+/* { dg-final { scan-assembler-not {\mmemcpy\M} } } */
+/* { dg-final { scan-assembler-not {(^|\*)\mmemcmp\M} } } */
diff --git a/gcc/testsuite/gcc.dg/torture/inline-mem-move-1.c b/gcc/testsuite/gcc.dg/torture/inline-mem-move-1.c
new file mode 100644
index 0000000..c0eca5b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/inline-mem-move-1.c
@@ -0,0 +1,8 @@
+/* { dg-do run } */
+/* { dg-options "-finline-stringops=memmove -save-temps -g0 -fno-lto" } */
+
+#include "../../gcc.c-torture/execute/builtins/memmove.c"
+
+#include "../../gcc.c-torture/execute/builtins/lib/main.c"
+
+/* { dg-final { scan-assembler-not {\mmemmove\M} } } */
diff --git a/gcc/testsuite/gcc.dg/torture/inline-mem-set-1.c b/gcc/testsuite/gcc.dg/torture/inline-mem-set-1.c
new file mode 100644
index 0000000..bdcf9bf
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/inline-mem-set-1.c
@@ -0,0 +1,84 @@
+/* { dg-do compile } */
+/* { dg-options "-finline-stringops -fno-lto" } */
+
+void *zero (unsigned long long (*p)[32], int n)
+{
+ return __builtin_memset (p, 0, n * sizeof (*p));
+}
+
+void *ones (char (*p)[128], int n)
+{
+ return __builtin_memset (p, -1, n * sizeof (*p));
+}
+
+void *opt2 (int *p, int i)
+{
+ return __builtin_memset (p, 0, (i ? 1024 : 2) * sizeof (*p));
+}
+
+void *opt8 (int *p, int i)
+{
+ return __builtin_memset (p, 0, (i ? 1024 : 8) * sizeof (*p));
+}
+
+void *opt32 (int *p, int i)
+{
+ return __builtin_memset (p, 0, (i ? 1024 : 32) * sizeof (*p));
+}
+
+void *opt128 (int *p, int i)
+{
+ return __builtin_memset (p, 0, (i ? 1024 : 128) * sizeof (*p));
+}
+
+void *opt512 (int *p, int i)
+{
+ return __builtin_memset (p, 0, (i ? 1024 : 512) * sizeof (*p));
+}
+
+void *opt_primes (int *p, int i)
+{
+ return __builtin_memset (p, 0, (i ? 509 : 7) * sizeof (*p));
+}
+
+void *opt_primes_blk (int *p, int i)
+{
+ return __builtin_memset (p, 0, (i ? 521 : 9) * sizeof (*p));
+}
+
+void *huge (long (*p)[16384])
+{
+ return __builtin_memset (p, 0, sizeof (*p));
+}
+
+void *hugep1 (long (*p)[16384+1])
+{
+ return __builtin_memset (p, 0, sizeof (*p));
+}
+
+void *hugep4 (long (*p)[16384+4])
+{
+ return __builtin_memset (p, 0, sizeof (*p));
+}
+
+void *hugep16 (long (*p)[16384+16])
+{
+ return __builtin_memset (p, 0, sizeof (*p));
+}
+
+void *hugep64 (long (*p)[16384+64])
+{
+ return __builtin_memset (p, 0, sizeof (*p));
+}
+
+void *hugep256 (long (*p)[16384+256])
+{
+ return __builtin_memset (p, 0, sizeof (*p));
+}
+
+void *hugep1024p256p64p16p4p1 (long (*p)[16384+1024+64+16+4+1])
+{
+ return __builtin_memset (p, 0, sizeof (*p));
+}
+
+/* { dg-final { scan-assembler-not {\mmemset\M} } } */
diff --git a/gcc/testsuite/gcc.dg/torture/pr109689.c b/gcc/testsuite/gcc.dg/torture/pr109689.c
new file mode 100644
index 0000000..5d2ce7e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr109689.c
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-ftree-vectorize" } */
+
+int a, b, c, d, e;
+int main() {
+ char f;
+ while (a) {
+ int g, h = 3;
+ if (b)
+ i:
+ if (d)
+ goto j;
+ k:
+ if (a) {
+ j:
+ if (!g)
+ goto k;
+ if (e) {
+ while (e)
+ e = f;
+ h = 0;
+ goto i;
+ }
+ if (!h)
+ for (; g < 1; g++)
+ ;
+ g = ~((~c & h & c) ^ ~g);
+ if (!g)
+ for (; a < 1; a++)
+ f++;
+ }
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr112827-1.c b/gcc/testsuite/gcc.dg/torture/pr112827-1.c
new file mode 100644
index 0000000..6838cbb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr112827-1.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+
+int a, b, c, d, e;
+int main() {
+ for (; c; c++) {
+ for (a = 0; a < 2; a++)
+ ;
+ for (; b; b++) {
+ e = d;
+ d = a;
+ }
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr112827-2.c b/gcc/testsuite/gcc.dg/torture/pr112827-2.c
new file mode 100644
index 0000000..a7a2a70
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr112827-2.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+
+short a, b[1], f;
+char c, g;
+int d, e;
+int main() {
+ for (; f; f++) {
+ for (d = 0; d < 2; d++)
+ ;
+ if (a)
+ for (g = 0; g < 2; g++)
+ for (c = 0; c < 2; c += b[d+g])
+ ;
+ for (; e; e++)
+ ;
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr112856.c b/gcc/testsuite/gcc.dg/torture/pr112856.c
new file mode 100644
index 0000000..67ab481
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr112856.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+
+double *SVD_A_0;
+int SVD_i, SVD_j, SVD_k, SVD_n;
+double SVD_f;
+void SVD() {
+ SVD_i = SVD_n - 1;
+ for (; SVD_i; SVD_i--) {
+ for (; SVD_j; SVD_j++) {
+ SVD_f = SVD_k = SVD_i;
+ for (; SVD_k < SVD_n; SVD_k++)
+ SVD_A_0[SVD_k] += SVD_f;
+ }
+ SVD_j = SVD_i;
+ for (; SVD_j < SVD_n; SVD_j++)
+ ;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/transparent-union-1.c b/gcc/testsuite/gcc.dg/transparent-union-1.c
index 3f0c260..518adfb 100644
--- a/gcc/testsuite/gcc.dg/transparent-union-1.c
+++ b/gcc/testsuite/gcc.dg/transparent-union-1.c
@@ -1,6 +1,6 @@
/* PR c/20043 */
/* { dg-do compile } */
-/* { dg-options "-std=gnu99" } */
+/* { dg-options "-fpermissive -std=gnu99" } */
extern void f0 (int *);
extern void f0 (int *__restrict);
diff --git a/gcc/testsuite/gcc.dg/transparent-union-1a.c b/gcc/testsuite/gcc.dg/transparent-union-1a.c
new file mode 100644
index 0000000..9796cea
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/transparent-union-1a.c
@@ -0,0 +1,85 @@
+/* PR c/20043 */
+/* { dg-do compile } */
+/* { dg-options "-std=gnu99" } */
+
+extern void f0 (int *);
+extern void f0 (int *__restrict);
+
+extern void f1 (int *__restrict);
+extern void f1 (int *);
+
+typedef union { int *i; long *l; } U2
+ __attribute__((transparent_union));
+extern void f2 (U2);
+extern void f2 (int *);
+
+typedef union { int *__restrict i; long *__restrict l; } U3
+ __attribute__((transparent_union));
+extern void f3 (U3);
+extern void f3 (int *__restrict);
+
+extern void f4 (U3);
+extern void f4 (int *);
+
+extern void f5 (U2);
+extern void f5 (int *__restrict);
+
+typedef union { long *l; int *i; } U6
+ __attribute__((transparent_union));
+extern void f6 (U6);
+extern void f6 (int *);
+
+typedef union { long *__restrict l; int *__restrict i; } U7
+ __attribute__((transparent_union));
+extern void f7 (U7);
+extern void f7 (int *__restrict);
+
+extern void f8 (U7);
+extern void f8 (int *);
+
+extern void f9 (U6);
+extern void f9 (int *__restrict);
+
+extern void f10 (U2);
+extern void f11 (U3);
+extern void f12 (U6);
+extern void f13 (U7);
+
+int i;
+long l;
+
+int
+main (void)
+{
+ f0 (&i);
+ f0 (&l); /* { dg-error "passing argument 1 of 'f0' from incompatible pointer type" } */
+ f1 (&i);
+ f1 (&l); /* { dg-error "passing argument 1 of 'f1' from incompatible pointer type" } */
+ f2 (&i);
+ f2 (&l); /* { dg-error "passing argument 1 of 'f2' from incompatible pointer type" } */
+ f3 (&i);
+ f3 (&l); /* { dg-error "passing argument 1 of 'f3' from incompatible pointer type" } */
+ f4 (&i);
+ f4 (&l); /* { dg-error "passing argument 1 of 'f4' from incompatible pointer type" } */
+ f5 (&i);
+ f5 (&l); /* { dg-error "passing argument 1 of 'f5' from incompatible pointer type" } */
+ f6 (&i);
+ f6 (&l); /* { dg-error "passing argument 1 of 'f6' from incompatible pointer type" } */
+ f7 (&i);
+ f7 (&l); /* { dg-error "passing argument 1 of 'f7' from incompatible pointer type" } */
+ f8 (&i);
+ f8 (&l); /* { dg-error "passing argument 1 of 'f8' from incompatible pointer type" } */
+ f9 (&i);
+ f9 (&l); /* { dg-error "passing argument 1 of 'f9' from incompatible pointer type" } */
+ f10 (&i);
+ f10 (&l);
+ f11 (&i);
+ f11 (&l);
+ f12 (&i);
+ f12 (&l);
+ f13 (&i);
+ f13 (&l);
+ return 0;
+}
+
+/* { dg-message "note: expected '\[^\n'\]*' but argument is of type '\[^\n'\]*'" "note: expected" { target *-*-* } 0 } */
diff --git a/gcc/testsuite/gcc.dg/tree-prof/time-profiler-3.c b/gcc/testsuite/gcc.dg/tree-prof/time-profiler-3.c
index 69ce026..e54a06a 100644
--- a/gcc/testsuite/gcc.dg/tree-prof/time-profiler-3.c
+++ b/gcc/testsuite/gcc.dg/tree-prof/time-profiler-3.c
@@ -1,4 +1,4 @@
-/* { dg-options "-O2 -fdump-ipa-profile -fprofile-update=atomic" } */
+/* { dg-options "-O2 -fdump-ipa-profile -fprofile-update=atomic -fno-ipa-vrp" } */
/* { dg-require-effective-target profile_update_atomic } */
__attribute__ ((noinline))
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr112721.c b/gcc/testsuite/gcc.dg/tree-ssa/pr112721.c
new file mode 100644
index 0000000..adf6261
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr112721.c
@@ -0,0 +1,26 @@
+/* { dg-do run } */
+/* { dg-options "-O1" } */
+
+unsigned * volatile gv;
+
+struct a {
+ int b;
+};
+int c, e;
+long d;
+unsigned * __attribute__((noinline))
+f(unsigned *g) {
+ for (; c;)
+ e = d;
+ return gv ? gv : g;
+}
+int main() {
+ int *h;
+ struct a i = {8};
+ int *j = &i.b;
+ h = (unsigned *) f(j);
+ *h = 0;
+ if (i.b != 0)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr112767.c b/gcc/testsuite/gcc.dg/tree-ssa/pr112767.c
new file mode 100644
index 0000000..3f9bc06
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr112767.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+
+double reg_dict[32];
+
+void foo(int);
+
+void initialize()
+{
+ int i=8;
+ for (int phi=0; phi<8; ++phi) {
+ reg_dict[i]=0; /* { dg-bogus "undefined behavior" } */
+ int sn = 0;
+ if (i < 16) sn = 20;
+ foo(sn);
+ ++i;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/ubsan/pr112741.c b/gcc/testsuite/gcc.dg/ubsan/pr112741.c
new file mode 100644
index 0000000..13994f6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ubsan/pr112741.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-fgimple -fsanitize=undefined" } */
+
+int __GIMPLE(ssa) foo(int j)
+{
+ int c[1][10][1];
+ int _1;
+
+__BB(2):
+ c[0][1][0] = 1;
+ c[0][1] = _Literal (int[1]) {};
+ _1 = c[0][j_2(D)][0];
+ return _1;
+}
+
+int main()
+{
+ if (foo (1) != 0)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vect/pr111754.c b/gcc/testsuite/gcc.dg/vect/pr111754.c
new file mode 100644
index 0000000..014472f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr111754.c
@@ -0,0 +1,14 @@
+/* PR middle-end/111754 */
+/* { dg-do compile } */
+/* { dg-additional-options "-O2 -fdump-tree-forwprop1 -Wno-psabi" } */
+
+typedef float __attribute__((__vector_size__ (16))) F;
+
+F foo (F a, F b)
+{
+ F v = (F) { 9 };
+ return __builtin_shufflevector (v, v, 1, 0, 1, 2);
+}
+
+/* { dg-final { scan-tree-dump-not "VEC_PERM_EXPR" "forwprop1" } } */
+/* { dg-final { scan-tree-dump "(return|<retval> =) \{ 0.0, 9.0e\\+0, 0.0, 0.0 \}" "forwprop1" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr112818.c b/gcc/testsuite/gcc.dg/vect/pr112818.c
new file mode 100644
index 0000000..61a30a5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr112818.c
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+
+extern char tag_data[];
+struct pppoe_tag {
+ unsigned short tag_type;
+ unsigned short tag_len;
+};
+
+char code;
+int *add_tag_pack;
+void *add_tag_data;
+short e;
+long c, d;
+
+static int add_tag(int type, int len) {
+ short a, b;
+ struct pppoe_tag *tag = (struct pppoe_tag *)add_tag_pack;
+ if (e + len || len < 0)
+ return 1;
+ b = __builtin_bswap16(type);
+ tag->tag_type = b;
+ a = __builtin_bswap16(len);
+ tag->tag_len = a;
+ if (add_tag_data)
+ __builtin___memcpy_chk(tag_data, add_tag_data, len, c);
+ return 0;
+}
+void pppoe_serv_read() {
+ switch (code)
+ case 9: {
+ add_tag(2, d);
+ add_tag(0, 2);
+ }
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-1.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-1.c
index 5405e1e..7bd7757 100644
--- a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-1.c
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-1.c
@@ -4,6 +4,7 @@
AAPCS64 \S 4.1. */
/* { dg-do run { target aarch64*-*-* } } */
+/* { dg-additional-options "-mbranch-protection=none" } */
/* { dg-additional-sources "abitest.S" } */
#ifndef IN_FRAMEWORK
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-2.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-2.c
index 6b171c4..85a822a 100644
--- a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-2.c
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-2.c
@@ -4,6 +4,7 @@
Homogeneous floating-point aggregate types are covered in func-ret-3.c. */
/* { dg-do run { target aarch64*-*-* } } */
+/* { dg-additional-options "-mbranch-protection=none" } */
/* { dg-additional-sources "abitest.S" } */
#ifndef IN_FRAMEWORK
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-3.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-3.c
index ad312b6..1d35ebf 100644
--- a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-3.c
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-3.c
@@ -4,6 +4,7 @@
in AAPCS64 \S 4.3.5. */
/* { dg-do run { target aarch64-*-* } } */
+/* { dg-additional-options "-mbranch-protection=none" } */
/* { dg-additional-sources "abitest.S" } */
/* { dg-require-effective-target aarch64_big_endian } */
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-4.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-4.c
index af05fbe..15e1408 100644
--- a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-4.c
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-4.c
@@ -5,6 +5,7 @@
are treated as general composite types. */
/* { dg-do run { target aarch64*-*-* } } */
+/* { dg-additional-options "-mbranch-protection=none" } */
/* { dg-additional-sources "abitest.S" } */
/* { dg-require-effective-target aarch64_big_endian } */
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-64x1_1.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-64x1_1.c
index 05957e2..fe7bbb6 100644
--- a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-64x1_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-64x1_1.c
@@ -3,6 +3,7 @@
Test 64-bit singleton vector types which should be in FP/SIMD registers. */
/* { dg-do run { target aarch64*-*-* } } */
+/* { dg-additional-options "-mbranch-protection=none" } */
/* { dg-additional-sources "abitest.S" } */
#ifndef IN_FRAMEWORK
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_1.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_1.c
index 906cceb..edc35db 100644
--- a/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_1.c
@@ -16,6 +16,6 @@ void unnamed (int, ...);
void foo ()
{
- name (0, aaaa);
+ named (0, aaaa);
unnamed (0, aaaa);
}
diff --git a/gcc/testsuite/gcc.target/aarch64/acle/memtag_2.c b/gcc/testsuite/gcc.target/aarch64/acle/memtag_2.c
index fcab05b..806e075 100644
--- a/gcc/testsuite/gcc.target/aarch64/acle/memtag_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/acle/memtag_2.c
@@ -2,7 +2,7 @@
/* { dg-do compile } */
/* { dg-require-effective-target lp64 } */
-/* { dg-options "-O3 -march=armv8.5-a+memtag" } */
+/* { dg-options "-fpermissive -O3 -march=armv8.5-a+memtag" } */
#include "arm_acle.h"
@@ -67,4 +67,4 @@ test_memtag_error_argument (void)
__arm_mte_ptrdiff(no_decl2, 0); /* { dg-error {} } */
__arm_mte_ptrdiff(0); /* { dg-error {} } */
__arm_mte_ptrdiff(); /* { dg-error {} } */
-} \ No newline at end of file
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/acle/memtag_2a.c b/gcc/testsuite/gcc.target/aarch64/acle/memtag_2a.c
new file mode 100644
index 0000000..9ae371d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/acle/memtag_2a.c
@@ -0,0 +1,71 @@
+/* Test the MEMTAG intrinsic qualifier warnings and argument errors. */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target lp64 } */
+/* { dg-options "-O3 -march=armv8.5-a+memtag" } */
+
+#include "arm_acle.h"
+
+void
+test_memtag_warning_return_qualifier (void)
+{
+ const char *c;
+ volatile char *v;
+ char *n;
+ int *i;
+ int64_t d;
+
+ v = __arm_mte_get_tag(c); /* { dg-warning {assignment} } */
+ n = __arm_mte_get_tag(c); /* { dg-warning {assignment} } */
+ i = __arm_mte_get_tag(c); /* { dg-error {assignment} } */
+ c = __arm_mte_get_tag(v); /* { dg-warning {assignment} } */
+ n = __arm_mte_get_tag(v); /* { dg-warning {assignment} } */
+
+ i = __arm_mte_create_random_tag (c, 0); /* { dg-error {assignment} } */
+ i = __arm_mte_increment_tag (c, 0); /* { dg-error {assignment} } */
+
+ c = __arm_mte_get_tag(n); /* No warning. */
+ d = __arm_mte_ptrdiff(c, i); /* No warning. */
+}
+
+void
+test_memtag_warning_argument (void)
+{
+ const char *c;
+ __arm_mte_exclude_tag(0, 0); /* No warning. */
+ __arm_mte_create_random_tag (0, 0); /* No warning. */
+ __arm_mte_set_tag(0); /* No warning. */
+ __arm_mte_get_tag(0); /* No warning. */
+ __arm_mte_increment_tag (0, 15); /* No warning. */
+ __arm_mte_ptrdiff(c, 0); /* No warning. */
+ __arm_mte_ptrdiff(0, c); /* No warning. */
+}
+
+void
+test_memtag_error_argument (void)
+{
+ /* Produce errors properly for invalid arguments. */
+ __arm_mte_exclude_tag(no_decl, 0); /* { dg-error {} } */
+ __arm_mte_exclude_tag(); /* { dg-error {} } */
+ __arm_mte_ptrdiff(no_decl2, 0); /* { dg-error {} } */
+ __arm_mte_ptrdiff(0); /* { dg-error {} } */
+ __arm_mte_ptrdiff(); /* { dg-error {} } */
+
+ const char *c;
+ uint64_t i;
+ __arm_mte_exclude_tag(i, 0); /* { dg-error {argument} } */
+ __arm_mte_create_random_tag (i, 0); /* { dg-error {argument} } */
+ __arm_mte_set_tag(i); /* { dg-error {argument} } */
+ __arm_mte_get_tag(i); /* { dg-error {argument} } */
+ __arm_mte_increment_tag (i, 15); /* { dg-error {argument} } */
+ __arm_mte_ptrdiff(c, i); /* { dg-error {argument} } */
+ __arm_mte_ptrdiff(i, c); /* { dg-error {argument} } */
+
+ __arm_mte_exclude_tag(1, 0); /* { dg-error {argument} } */
+ __arm_mte_create_random_tag (1, 0); /* { dg-error {argument} } */
+ __arm_mte_set_tag(1); /* { dg-error {argument} } */
+ __arm_mte_get_tag(1); /* { dg-error {argument} } */
+ __arm_mte_increment_tag (1, 15); /* { dg-error {argument} } */
+ __arm_mte_ptrdiff(c, 1); /* { dg-error {argument} } */
+ __arm_mte_ptrdiff(1, c); /* { dg-error {argument} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/acle/rwsr-1.c b/gcc/testsuite/gcc.target/aarch64/acle/rwsr-1.c
new file mode 100644
index 0000000..c99805d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/acle/rwsr-1.c
@@ -0,0 +1,28 @@
+/* Test the __arm_[r,w]sr ACLE intrinsics family. */
+/* Ensure that illegal behavior is rejected by the compiler. */
+
+/* { dg-do compile } */
+/* { dg-options "-O3 -march=armv8-a" } */
+
+#include <arm_acle.h>
+
+/* Ensure that read/write-only register attributes are respected by the compiler. */
+void
+test_rwsr_read_write_only ()
+{
+ /* Attempt to write to read-only registers. */
+ long long a = __arm_rsr64 ("aidr_el1"); /* Read ok. */
+ __arm_wsr64 ("aidr_el1", a); /* { dg-error {invalid system register name 'aidr_el1'} } */
+
+ /* Attempt to read from write-only registers. */
+ __arm_wsr64 ("icc_asgi1r_el1", a); /* Write ok. */
+ long long b = __arm_rsr64 ("icc_asgi1r_el1"); /* { dg-error {invalid system register name 'icc_asgi1r_el1'} } */
+}
+
+/* Ensure that empty strings are rejected. */
+void
+test_empty_string ()
+{
+ long long c = __arm_rsr64 (""); /* { dg-error "invalid system register name ''" } */
+ __arm_wsr64 ("", c); /* { dg-error "invalid system register name ''" } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/acle/rwsr-2.c b/gcc/testsuite/gcc.target/aarch64/acle/rwsr-2.c
new file mode 100644
index 0000000..cca8892
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/acle/rwsr-2.c
@@ -0,0 +1,25 @@
+/* Test the __arm_[r,w]sr ACLE intrinsics family. */
+/* Ensure correctness of the system register encoding parser. */
+
+/* { dg-do compile } */
+/* { dg-options "-std=c2x -O3 -march=armv8-a" } */
+
+#include <arm_acle.h>
+
+void
+test_leading_zeros ()
+{
+ __uint64_t b = __arm_rsr64 ("S1_2_C03_C04_5"); /* { dg-error "invalid system register name 's1_2_c03_c04_5'" } */
+ __arm_wsr64 ("S1_2_C03_C04_5", b); /* { dg-error "invalid system register name 's1_2_c03_c04_5'" } */
+}
+
+void
+test_bounds ()
+{
+ __uint64_t b;
+ b = __arm_rsr64 ("s4_2_c3_c4_5"); /* { dg-error "invalid system register name 's4_2_c3_c4_5'" } */
+ b = __arm_rsr64 ("s1_8_c3_c4_5"); /* { dg-error "invalid system register name 's1_8_c3_c4_5'" } */
+ b = __arm_rsr64 ("s1_2_c16_c4_5"); /* { dg-error "invalid system register name 's1_2_c16_c4_5'" } */
+ b = __arm_rsr64 ("s1_2_c3_c16_5"); /* { dg-error "invalid system register name 's1_2_c3_c16_5'" } */
+ b = __arm_rsr64 ("s1_2_c3_c4_8"); /* { dg-error "invalid system register name 's1_2_c3_c4_8'" } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/acle/rwsr-3.c b/gcc/testsuite/gcc.target/aarch64/acle/rwsr-3.c
new file mode 100644
index 0000000..aadd04a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/acle/rwsr-3.c
@@ -0,0 +1,18 @@
+/* Test the __arm_[r,w]sr ACLE intrinsics family. */
+/* Ensure that illegal behavior is rejected by the compiler. */
+
+/* { dg-do compile } */
+/* { dg-options "-std=c2x -O3 -march=armv8-a" } */
+
+#include <arm_acle.h>
+
+void
+test_non_const_sysreg_name ()
+{
+ const char *regname = "trcseqstr";
+ long long a = __arm_rsr64 (regname); /* { dg-error "first argument to '__builtin_aarch64_rsr64' must be a string literal" } */
+ __arm_wsr64 (regname, a); /* { dg-error "first argument to '__builtin_aarch64_wsr64' must be a string literal" } */
+
+ long long b = __arm_rsr64 (nullptr); /* { dg-error "first argument to '__builtin_aarch64_rsr64' must be a string literal" } */
+ __arm_wsr64 (nullptr, b); /* { dg-error "first argument to '__builtin_aarch64_wsr64' must be a string literal" } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/acle/rwsr.c b/gcc/testsuite/gcc.target/aarch64/acle/rwsr.c
new file mode 100644
index 0000000..93c48c4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/acle/rwsr.c
@@ -0,0 +1,144 @@
+/* Test the __arm_[r,w]sr ACLE intrinsics family. */
+/* Check that function variants for different data types handle types correctly. */
+/* { dg-do compile } */
+/* { dg-options "-O1 -march=armv8-a" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <arm_acle.h>
+
+/*
+** get_rsr:
+** ...
+** mrs x([0-9]+), s2_1_c0_c7_4
+** add w\1, w\1, 1
+** ...
+*/
+int
+get_rsr ()
+{
+ int a = __arm_rsr ("trcseqstr");
+ return a+1;
+}
+
+/*
+** get_rsrf:
+** mrs x([0-9]+), s2_1_c0_c7_4
+** fmov s[0-9]+, w\1
+** ...
+*/
+float
+get_rsrf ()
+{
+ return __arm_rsrf ("trcseqstr");
+}
+
+/*
+** get_rsrp:
+** mrs x0, s2_1_c0_c7_4
+** ret
+*/
+void *
+get_rsrp ()
+{
+ return __arm_rsrp ("trcseqstr");
+}
+
+/*
+** get_rsr64:
+** mrs x0, s2_1_c0_c7_4
+** ret
+*/
+long long
+get_rsr64 ()
+{
+ return __arm_rsr64 ("trcseqstr");
+}
+
+/*
+** get_rsrf64:
+** mrs x([0-9]+), s2_1_c0_c7_4
+** fmov d[0-9]+, x\1
+** ...
+*/
+double
+get_rsrf64 ()
+{
+ return __arm_rsrf64 ("trcseqstr");
+}
+
+/*
+** set_wsr32:
+** ...
+** add w([0-9]+), w\1, 1
+** msr s2_1_c0_c7_4, x\1
+** ...
+*/
+void
+set_wsr32 (int a)
+{
+ __arm_wsr ("trcseqstr", a+1);
+}
+
+/*
+** set_wsrp:
+** ...
+** msr s2_1_c0_c7_4, x[0-9]+
+** ...
+*/
+void
+set_wsrp (void *a)
+{
+ __arm_wsrp ("trcseqstr", a);
+}
+
+/*
+** set_wsr64:
+** ...
+** msr s2_1_c0_c7_4, x[0-9]+
+** ...
+*/
+void
+set_wsr64 (long long a)
+{
+ __arm_wsr64 ("trcseqstr", a);
+}
+
+/*
+** set_wsrf32:
+** ...
+** fmov w([0-9]+), s[0-9]+
+** msr s2_1_c0_c7_4, x\1
+** ...
+*/
+void
+set_wsrf32 (float a)
+{
+ __arm_wsrf ("trcseqstr", a);
+}
+
+/*
+** set_wsrf64:
+** ...
+** fmov x([0-9]+), d[0-9]+
+** msr s2_1_c0_c7_4, x\1
+** ...
+*/
+void
+set_wsrf64 (double a)
+{
+ __arm_wsrf64 ("trcseqstr", a);
+}
+
+/*
+** set_custom:
+** ...
+** mrs x0, s1_2_c3_c4_5
+** ...
+** msr s1_2_c3_c4_5, x0
+** ...
+*/
+void set_custom ()
+{
+ __uint64_t b = __arm_rsr64 ("S1_2_C3_C4_5");
+ __arm_wsr64 ("S1_2_C3_C4_5", b);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/auto-init-1.c b/gcc/testsuite/gcc.target/aarch64/auto-init-1.c
index 0fa4708..45bb025 100644
--- a/gcc/testsuite/gcc.target/aarch64/auto-init-1.c
+++ b/gcc/testsuite/gcc.target/aarch64/auto-init-1.c
@@ -29,4 +29,5 @@ void foo()
return;
}
-/* { dg-final { scan-rtl-dump-times "const_int 0" 11 "expand" } } */
+/* Includes 1 for the call instruction and 1 for a nop. */
+/* { dg-final { scan-rtl-dump-times "const_int 0" 10 "expand" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/csinc-3.c b/gcc/testsuite/gcc.target/aarch64/csinc-3.c
new file mode 100644
index 0000000..bde131a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/csinc-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fno-tree-vectorize" } */
+
+int f(int *a, int n, int *b, int d)
+{
+ for(int i = 0; i < n; i++)
+ b[i] = a[i] == 100 ? 1 : d;
+ /* { dg-final { scan-assembler "csinc\tw\[0-9\].*wzr" } } */
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/eh_return-2.c b/gcc/testsuite/gcc.target/aarch64/eh_return-2.c
new file mode 100644
index 0000000..4a9d124
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/eh_return-2.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-final { scan-assembler "add\tsp, sp, x5" } } */
+/* { dg-final { scan-assembler "br\tx6" } } */
+
+void
+foo (unsigned long off, void *handler)
+{
+ __builtin_eh_return (off, handler);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/eh_return-3.c b/gcc/testsuite/gcc.target/aarch64/eh_return-3.c
new file mode 100644
index 0000000..d180fa7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/eh_return-3.c
@@ -0,0 +1,32 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mbranch-protection=pac-ret+leaf -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+/*
+**foo:
+** hint 25 // paciasp
+** ...
+** cbz w2, .*
+** mov x4, 0
+** ...
+** cbz x4, .*
+** add sp, sp, x5
+** br x6
+** (
+** hint 29 // autiasp
+** ret
+** |
+** retaa
+** )
+** mov x5, x0
+** mov x4, 1
+** mov x6, x1
+** b .*
+*/
+void
+foo (unsigned long off, void *handler, int c)
+{
+ if (c)
+ return;
+ __builtin_eh_return (off, handler);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movdf_2.c b/gcc/testsuite/gcc.target/aarch64/movdf_2.c
new file mode 100644
index 0000000..0d459d3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movdf_2.c
@@ -0,0 +1,51 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+/*
+** fpr_to_fpr:
+** fmov d0, d1
+** ret
+*/
+double
+fpr_to_fpr (double q0, double q1) [[arm::streaming_compatible]]
+{
+ return q1;
+}
+
+/*
+** gpr_to_fpr:
+** fmov d0, x0
+** ret
+*/
+double
+gpr_to_fpr () [[arm::streaming_compatible]]
+{
+ register double x0 asm ("x0");
+ asm volatile ("" : "=r" (x0));
+ return x0;
+}
+
+/*
+** zero_to_fpr:
+** fmov d0, xzr
+** ret
+*/
+double
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ return 0;
+}
+
+/*
+** fpr_to_gpr:
+** fmov x0, d0
+** ret
+*/
+void
+fpr_to_gpr (double q0) [[arm::streaming_compatible]]
+{
+ register double x0 asm ("x0");
+ x0 = q0;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movdi_3.c b/gcc/testsuite/gcc.target/aarch64/movdi_3.c
new file mode 100644
index 0000000..31b2cbb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movdi_3.c
@@ -0,0 +1,59 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#include <stdint.h>
+
+/*
+** fpr_to_fpr:
+** fmov d0, d1
+** ret
+*/
+void
+fpr_to_fpr (void) [[arm::streaming_compatible]]
+{
+ register uint64_t q0 asm ("q0");
+ register uint64_t q1 asm ("q1");
+ asm volatile ("" : "=w" (q1));
+ q0 = q1;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** gpr_to_fpr:
+** fmov d0, x0
+** ret
+*/
+void
+gpr_to_fpr (uint64_t x0) [[arm::streaming_compatible]]
+{
+ register uint64_t q0 asm ("q0");
+ q0 = x0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** zero_to_fpr:
+** fmov d0, xzr
+** ret
+*/
+void
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ register uint64_t q0 asm ("q0");
+ q0 = 0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** fpr_to_gpr:
+** fmov x0, d0
+** ret
+*/
+uint64_t
+fpr_to_gpr () [[arm::streaming_compatible]]
+{
+ register uint64_t q0 asm ("q0");
+ asm volatile ("" : "=w" (q0));
+ return q0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movhf_2.c b/gcc/testsuite/gcc.target/aarch64/movhf_2.c
new file mode 100644
index 0000000..3292b0d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movhf_2.c
@@ -0,0 +1,53 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nothing+simd"
+
+/*
+** fpr_to_fpr:
+** fmov s0, s1
+** ret
+*/
+_Float16
+fpr_to_fpr (_Float16 q0, _Float16 q1) [[arm::streaming_compatible]]
+{
+ return q1;
+}
+
+/*
+** gpr_to_fpr:
+** fmov s0, w0
+** ret
+*/
+_Float16
+gpr_to_fpr () [[arm::streaming_compatible]]
+{
+ register _Float16 w0 asm ("w0");
+ asm volatile ("" : "=r" (w0));
+ return w0;
+}
+
+/*
+** zero_to_fpr:
+** fmov s0, wzr
+** ret
+*/
+_Float16
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ return 0;
+}
+
+/*
+** fpr_to_gpr:
+** fmov w0, s0
+** ret
+*/
+void
+fpr_to_gpr (_Float16 q0) [[arm::streaming_compatible]]
+{
+ register _Float16 w0 asm ("w0");
+ w0 = q0;
+ asm volatile ("" :: "r" (w0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movhi_2.c b/gcc/testsuite/gcc.target/aarch64/movhi_2.c
new file mode 100644
index 0000000..dbbf348
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movhi_2.c
@@ -0,0 +1,61 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nothing+simd"
+
+#include <stdint.h>
+
+/*
+** fpr_to_fpr:
+** fmov s0, s1
+** ret
+*/
+void
+fpr_to_fpr (void) [[arm::streaming_compatible]]
+{
+ register uint16_t q0 asm ("q0");
+ register uint16_t q1 asm ("q1");
+ asm volatile ("" : "=w" (q1));
+ q0 = q1;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** gpr_to_fpr:
+** fmov s0, w0
+** ret
+*/
+void
+gpr_to_fpr (uint16_t w0) [[arm::streaming_compatible]]
+{
+ register uint16_t q0 asm ("q0");
+ q0 = w0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** zero_to_fpr:
+** fmov s0, wzr
+** ret
+*/
+void
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ register uint16_t q0 asm ("q0");
+ q0 = 0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** fpr_to_gpr:
+** umov w0, v0.h\[0\]
+** ret
+*/
+uint16_t
+fpr_to_gpr () [[arm::streaming_compatible]]
+{
+ register uint16_t q0 asm ("q0");
+ asm volatile ("" : "=w" (q0));
+ return q0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movqi_2.c b/gcc/testsuite/gcc.target/aarch64/movqi_2.c
new file mode 100644
index 0000000..aec087e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movqi_2.c
@@ -0,0 +1,59 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#include <stdint.h>
+
+/*
+** fpr_to_fpr:
+** fmov s0, s1
+** ret
+*/
+void
+fpr_to_fpr (void) [[arm::streaming_compatible]]
+{
+ register uint8_t q0 asm ("q0");
+ register uint8_t q1 asm ("q1");
+ asm volatile ("" : "=w" (q1));
+ q0 = q1;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** gpr_to_fpr:
+** fmov s0, w0
+** ret
+*/
+void
+gpr_to_fpr (uint8_t w0) [[arm::streaming_compatible]]
+{
+ register uint8_t q0 asm ("q0");
+ q0 = w0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** zero_to_fpr:
+** fmov s0, wzr
+** ret
+*/
+void
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ register uint8_t q0 asm ("q0");
+ q0 = 0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** fpr_to_gpr:
+** umov w0, v0.b\[0\]
+** ret
+*/
+uint8_t
+fpr_to_gpr () [[arm::streaming_compatible]]
+{
+ register uint8_t q0 asm ("q0");
+ asm volatile ("" : "=w" (q0));
+ return q0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movsf_2.c b/gcc/testsuite/gcc.target/aarch64/movsf_2.c
new file mode 100644
index 0000000..7fed4b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movsf_2.c
@@ -0,0 +1,51 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+/*
+** fpr_to_fpr:
+** fmov s0, s1
+** ret
+*/
+float
+fpr_to_fpr (float q0, float q1) [[arm::streaming_compatible]]
+{
+ return q1;
+}
+
+/*
+** gpr_to_fpr:
+** fmov s0, w0
+** ret
+*/
+float
+gpr_to_fpr () [[arm::streaming_compatible]]
+{
+ register float w0 asm ("w0");
+ asm volatile ("" : "=r" (w0));
+ return w0;
+}
+
+/*
+** zero_to_fpr:
+** fmov s0, wzr
+** ret
+*/
+float
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ return 0;
+}
+
+/*
+** fpr_to_gpr:
+** fmov w0, s0
+** ret
+*/
+void
+fpr_to_gpr (float q0) [[arm::streaming_compatible]]
+{
+ register float w0 asm ("w0");
+ w0 = q0;
+ asm volatile ("" :: "r" (w0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movsi_2.c b/gcc/testsuite/gcc.target/aarch64/movsi_2.c
new file mode 100644
index 0000000..c14d246
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movsi_2.c
@@ -0,0 +1,59 @@
+/* { dg-do assemble } */
+/* { dg-options "-O --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#include <stdint.h>
+
+/*
+** fpr_to_fpr:
+** fmov s0, s1
+** ret
+*/
+void
+fpr_to_fpr (void) [[arm::streaming_compatible]]
+{
+ register uint32_t q0 asm ("q0");
+ register uint32_t q1 asm ("q1");
+ asm volatile ("" : "=w" (q1));
+ q0 = q1;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** gpr_to_fpr:
+** fmov s0, w0
+** ret
+*/
+void
+gpr_to_fpr (uint32_t w0) [[arm::streaming_compatible]]
+{
+ register uint32_t q0 asm ("q0");
+ q0 = w0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** zero_to_fpr:
+** fmov s0, wzr
+** ret
+*/
+void
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ register uint32_t q0 asm ("q0");
+ q0 = 0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** fpr_to_gpr:
+** fmov w0, s0
+** ret
+*/
+uint32_t
+fpr_to_gpr () [[arm::streaming_compatible]]
+{
+ register uint32_t q0 asm ("q0");
+ asm volatile ("" : "=w" (q0));
+ return q0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movtf_3.c b/gcc/testsuite/gcc.target/aarch64/movtf_3.c
new file mode 100644
index 0000000..dd164a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movtf_3.c
@@ -0,0 +1,81 @@
+/* { dg-do assemble } */
+/* { dg-require-effective-target large_long_double } */
+/* { dg-options "-O -mtune=neoverse-v1 --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nosve"
+
+/*
+** fpr_to_fpr:
+** sub sp, sp, #16
+** str q1, \[sp\]
+** ldr q0, \[sp\]
+** add sp, sp, #?16
+** ret
+*/
+long double
+fpr_to_fpr (long double q0, long double q1) [[arm::streaming_compatible]]
+{
+ return q1;
+}
+
+/*
+** gpr_to_fpr: { target aarch64_little_endian }
+** fmov d0, x0
+** fmov v0.d\[1\], x1
+** ret
+*/
+/*
+** gpr_to_fpr: { target aarch64_big_endian }
+** fmov d0, x1
+** fmov v0.d\[1\], x0
+** ret
+*/
+long double
+gpr_to_fpr () [[arm::streaming_compatible]]
+{
+ register long double x0 asm ("x0");
+ asm volatile ("" : "=r" (x0));
+ return x0;
+}
+
+/*
+** zero_to_fpr:
+** fmov s0, wzr
+** ret
+*/
+long double
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ return 0;
+}
+
+/*
+** fpr_to_gpr: { target aarch64_little_endian }
+** (
+** fmov x0, d0
+** fmov x1, v0.d\[1\]
+** |
+** fmov x1, v0.d\[1\]
+** fmov x0, d0
+** )
+** ret
+*/
+/*
+** fpr_to_gpr: { target aarch64_big_endian }
+** (
+** fmov x1, d0
+** fmov x0, v0.d\[1\]
+** |
+** fmov x0, v0.d\[1\]
+** fmov x1, d0
+** )
+** ret
+*/
+void
+fpr_to_gpr (long double q0) [[arm::streaming_compatible]]
+{
+ register long double x0 asm ("x0");
+ x0 = q0;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movtf_4.c b/gcc/testsuite/gcc.target/aarch64/movtf_4.c
new file mode 100644
index 0000000..faf9703
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movtf_4.c
@@ -0,0 +1,78 @@
+/* { dg-do assemble } */
+/* { dg-require-effective-target large_long_double } */
+/* { dg-options "-O -mtune=neoverse-v1 --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+sve"
+
+/*
+** fpr_to_fpr:
+** mov z0.d, z1.d
+** ret
+*/
+long double
+fpr_to_fpr (long double q0, long double q1) [[arm::streaming_compatible]]
+{
+ return q1;
+}
+
+/*
+** gpr_to_fpr: { target aarch64_little_endian }
+** fmov d0, x0
+** fmov v0.d\[1\], x1
+** ret
+*/
+/*
+** gpr_to_fpr: { target aarch64_big_endian }
+** fmov d0, x1
+** fmov v0.d\[1\], x0
+** ret
+*/
+long double
+gpr_to_fpr () [[arm::streaming_compatible]]
+{
+ register long double x0 asm ("x0");
+ asm volatile ("" : "=r" (x0));
+ return x0;
+}
+
+/*
+** zero_to_fpr:
+** fmov s0, wzr
+** ret
+*/
+long double
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ return 0;
+}
+
+/*
+** fpr_to_gpr: { target aarch64_little_endian }
+** (
+** fmov x0, d0
+** fmov x1, v0.d\[1\]
+** |
+** fmov x1, v0.d\[1\]
+** fmov x0, d0
+** )
+** ret
+*/
+/*
+** fpr_to_gpr: { target aarch64_big_endian }
+** (
+** fmov x1, d0
+** fmov x0, v0.d\[1\]
+** |
+** fmov x0, v0.d\[1\]
+** fmov x1, d0
+** )
+** ret
+*/
+void
+fpr_to_gpr (long double q0) [[arm::streaming_compatible]]
+{
+ register long double x0 asm ("x0");
+ x0 = q0;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movti_3.c b/gcc/testsuite/gcc.target/aarch64/movti_3.c
new file mode 100644
index 0000000..2431091
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movti_3.c
@@ -0,0 +1,86 @@
+/* { dg-do assemble } */
+/* { dg-options "-O -mtune=neoverse-v1 --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nosve"
+
+/*
+** fpr_to_fpr:
+** sub sp, sp, #16
+** str q1, \[sp\]
+** ldr q0, \[sp\]
+** add sp, sp, #?16
+** ret
+*/
+void
+fpr_to_fpr (void) [[arm::streaming_compatible]]
+{
+ register __int128_t q0 asm ("q0");
+ register __int128_t q1 asm ("q1");
+ asm volatile ("" : "=w" (q1));
+ q0 = q1;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** gpr_to_fpr: { target aarch64_little_endian }
+** fmov d0, x0
+** fmov v0.d\[1\], x1
+** ret
+*/
+/*
+** gpr_to_fpr: { target aarch64_big_endian }
+** fmov d0, x1
+** fmov v0.d\[1\], x0
+** ret
+*/
+void
+gpr_to_fpr (__int128_t x0) [[arm::streaming_compatible]]
+{
+ register __int128_t q0 asm ("q0");
+ q0 = x0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** zero_to_fpr:
+** fmov d0, xzr
+** ret
+*/
+void
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ register __int128_t q0 asm ("q0");
+ q0 = 0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** fpr_to_gpr: { target aarch64_little_endian }
+** (
+** fmov x0, d0
+** fmov x1, v0.d\[1\]
+** |
+** fmov x1, v0.d\[1\]
+** fmov x0, d0
+** )
+** ret
+*/
+/*
+** fpr_to_gpr: { target aarch64_big_endian }
+** (
+** fmov x1, d0
+** fmov x0, v0.d\[1\]
+** |
+** fmov x0, v0.d\[1\]
+** fmov x1, d0
+** )
+** ret
+*/
+__int128_t
+fpr_to_gpr () [[arm::streaming_compatible]]
+{
+ register __int128_t q0 asm ("q0");
+ asm volatile ("" : "=w" (q0));
+ return q0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movti_4.c b/gcc/testsuite/gcc.target/aarch64/movti_4.c
new file mode 100644
index 0000000..a70fecc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movti_4.c
@@ -0,0 +1,83 @@
+/* { dg-do assemble } */
+/* { dg-options "-O -mtune=neoverse-v1 --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+sve"
+
+/*
+** fpr_to_fpr:
+** mov z0\.d, z1\.d
+** ret
+*/
+void
+fpr_to_fpr (void) [[arm::streaming_compatible]]
+{
+ register __int128_t q0 asm ("q0");
+ register __int128_t q1 asm ("q1");
+ asm volatile ("" : "=w" (q1));
+ q0 = q1;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** gpr_to_fpr: { target aarch64_little_endian }
+** fmov d0, x0
+** fmov v0.d\[1\], x1
+** ret
+*/
+/*
+** gpr_to_fpr: { target aarch64_big_endian }
+** fmov d0, x1
+** fmov v0.d\[1\], x0
+** ret
+*/
+void
+gpr_to_fpr (__int128_t x0) [[arm::streaming_compatible]]
+{
+ register __int128_t q0 asm ("q0");
+ q0 = x0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** zero_to_fpr:
+** fmov d0, xzr
+** ret
+*/
+void
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ register __int128_t q0 asm ("q0");
+ q0 = 0;
+ asm volatile ("" :: "w" (q0));
+}
+
+/*
+** fpr_to_gpr: { target aarch64_little_endian }
+** (
+** fmov x0, d0
+** fmov x1, v0.d\[1\]
+** |
+** fmov x1, v0.d\[1\]
+** fmov x0, d0
+** )
+** ret
+*/
+/*
+** fpr_to_gpr: { target aarch64_big_endian }
+** (
+** fmov x1, d0
+** fmov x0, v0.d\[1\]
+** |
+** fmov x0, v0.d\[1\]
+** fmov x1, d0
+** )
+** ret
+*/
+__int128_t
+fpr_to_gpr () [[arm::streaming_compatible]]
+{
+ register __int128_t q0 asm ("q0");
+ asm volatile ("" : "=w" (q0));
+ return q0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movv16qi_4.c b/gcc/testsuite/gcc.target/aarch64/movv16qi_4.c
new file mode 100644
index 0000000..7bec888
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv16qi_4.c
@@ -0,0 +1,82 @@
+/* { dg-do assemble } */
+/* { dg-options "-O -mtune=neoverse-v1 --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nosve"
+
+typedef unsigned char v16qi __attribute__((vector_size(16)));
+
+/*
+** fpr_to_fpr:
+** sub sp, sp, #16
+** str q1, \[sp\]
+** ldr q0, \[sp\]
+** add sp, sp, #?16
+** ret
+*/
+v16qi
+fpr_to_fpr (v16qi q0, v16qi q1) [[arm::streaming_compatible]]
+{
+ return q1;
+}
+
+/*
+** gpr_to_fpr: { target aarch64_little_endian }
+** fmov d0, x0
+** fmov v0.d\[1\], x1
+** ret
+*/
+/*
+** gpr_to_fpr: { target aarch64_big_endian }
+** fmov d0, x1
+** fmov v0.d\[1\], x0
+** ret
+*/
+v16qi
+gpr_to_fpr () [[arm::streaming_compatible]]
+{
+ register v16qi x0 asm ("x0");
+ asm volatile ("" : "=r" (x0));
+ return x0;
+}
+
+/*
+** zero_to_fpr:
+** fmov d0, xzr
+** ret
+*/
+v16qi
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ return (v16qi) {};
+}
+
+/*
+** fpr_to_gpr: { target aarch64_little_endian }
+** (
+** umov x0, v0.d\[0\]
+** fmov x1, v0.d\[1\]
+** |
+** fmov x1, v0.d\[1\]
+** umov x0, v0.d\[0\]
+** )
+** ret
+*/
+/*
+** fpr_to_gpr: { target aarch64_big_endian }
+** (
+** umov x1, v0.d\[0\]
+** fmov x0, v0.d\[1\]
+** |
+** fmov x0, v0.d\[1\]
+** umov x1, v0.d\[0\]
+** )
+** ret
+*/
+void
+fpr_to_gpr (v16qi q0) [[arm::streaming_compatible]]
+{
+ register v16qi x0 asm ("x0");
+ x0 = q0;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movv16qi_5.c b/gcc/testsuite/gcc.target/aarch64/movv16qi_5.c
new file mode 100644
index 0000000..2d36342
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv16qi_5.c
@@ -0,0 +1,79 @@
+/* { dg-do assemble } */
+/* { dg-options "-O -mtune=neoverse-v1 --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+sve"
+
+typedef unsigned char v16qi __attribute__((vector_size(16)));
+
+/*
+** fpr_to_fpr:
+** mov z0.d, z1.d
+** ret
+*/
+v16qi
+fpr_to_fpr (v16qi q0, v16qi q1) [[arm::streaming_compatible]]
+{
+ return q1;
+}
+
+/*
+** gpr_to_fpr: { target aarch64_little_endian }
+** fmov d0, x0
+** fmov v0.d\[1\], x1
+** ret
+*/
+/*
+** gpr_to_fpr: { target aarch64_big_endian }
+** fmov d0, x1
+** fmov v0.d\[1\], x0
+** ret
+*/
+v16qi
+gpr_to_fpr () [[arm::streaming_compatible]]
+{
+ register v16qi x0 asm ("x0");
+ asm volatile ("" : "=r" (x0));
+ return x0;
+}
+
+/*
+** zero_to_fpr:
+** fmov d0, xzr
+** ret
+*/
+v16qi
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ return (v16qi) {};
+}
+
+/*
+** fpr_to_gpr: { target aarch64_little_endian }
+** (
+** umov x0, v0.d\[0\]
+** fmov x1, v0.d\[1\]
+** |
+** fmov x1, v0.d\[1\]
+** umov x0, v0.d\[0\]
+** )
+** ret
+*/
+/*
+** fpr_to_gpr: { target aarch64_big_endian }
+** (
+** umov x1, v0.d\[0\]
+** fmov x0, v0.d\[1\]
+** |
+** fmov x0, v0.d\[1\]
+** umov x1, v0.d\[0\]
+** )
+** ret
+*/
+void
+fpr_to_gpr (v16qi q0) [[arm::streaming_compatible]]
+{
+ register v16qi x0 asm ("x0");
+ x0 = q0;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/movv8qi_4.c b/gcc/testsuite/gcc.target/aarch64/movv8qi_4.c
new file mode 100644
index 0000000..12ae25a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/movv8qi_4.c
@@ -0,0 +1,55 @@
+/* { dg-do assemble } */
+/* { dg-options "-O -mtune=neoverse-v1 --save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#pragma GCC target "+nosve"
+
+typedef unsigned char v8qi __attribute__((vector_size(8)));
+
+/*
+** fpr_to_fpr:
+** fmov d0, d1
+** ret
+*/
+v8qi
+fpr_to_fpr (v8qi q0, v8qi q1) [[arm::streaming_compatible]]
+{
+ return q1;
+}
+
+/*
+** gpr_to_fpr:
+** fmov d0, x0
+** ret
+*/
+v8qi
+gpr_to_fpr () [[arm::streaming_compatible]]
+{
+ register v8qi x0 asm ("x0");
+ asm volatile ("" : "=r" (x0));
+ return x0;
+}
+
+/*
+** zero_to_fpr:
+** fmov d0, xzr
+** ret
+*/
+v8qi
+zero_to_fpr () [[arm::streaming_compatible]]
+{
+ return (v8qi) {};
+}
+
+/*
+** fpr_to_gpr:
+** umov x0, v0\.d\[0\]
+** ret
+*/
+void
+fpr_to_gpr (v8qi q0) [[arm::streaming_compatible]]
+{
+ register v8qi x0 asm ("x0");
+ x0 = q0;
+ asm volatile ("" :: "r" (x0));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/pr112406.c b/gcc/testsuite/gcc.target/aarch64/pr112406.c
index 46459c6..f418c05 100644
--- a/gcc/testsuite/gcc.target/aarch64/pr112406.c
+++ b/gcc/testsuite/gcc.target/aarch64/pr112406.c
@@ -2,10 +2,10 @@
/* { dg-options "-march=armv8-a+sve -w -Ofast" } */
typedef struct {
- int red
+ int red;
} MagickPixelPacket;
-GetImageChannelMoments_image, GetImageChannelMoments_image_0,
+int GetImageChannelMoments_image, GetImageChannelMoments_image_0,
GetImageChannelMoments___trans_tmp_1, GetImageChannelMoments_M11_0,
GetImageChannelMoments_pixel_3, GetImageChannelMoments_y,
GetImageChannelMoments_p;
@@ -15,10 +15,12 @@ double GetImageChannelMoments_M00_0, GetImageChannelMoments_M00_1,
MagickPixelPacket GetImageChannelMoments_pixel;
+void
SetMagickPixelPacket(int color, MagickPixelPacket *pixel) {
pixel->red = color;
}
+void
GetImageChannelMoments() {
for (; GetImageChannelMoments_y; GetImageChannelMoments_y++) {
SetMagickPixelPacket(GetImageChannelMoments_p,
@@ -33,5 +35,5 @@ GetImageChannelMoments() {
GetImageChannelMoments_M01_1 +=
GetImageChannelMoments_y * GetImageChannelMoments_p++;
}
- GetImageChannelMoments___trans_tmp_1 = atan(GetImageChannelMoments_M11_0);
+ GetImageChannelMoments___trans_tmp_1 = __builtin_atan(GetImageChannelMoments_M11_0);
}
diff --git a/gcc/testsuite/gcc.target/aarch64/pragma_cpp_predefs_4.c b/gcc/testsuite/gcc.target/aarch64/pragma_cpp_predefs_4.c
index 0e6461f..23ebe5e 100644
--- a/gcc/testsuite/gcc.target/aarch64/pragma_cpp_predefs_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/pragma_cpp_predefs_4.c
@@ -45,3 +45,41 @@
#ifdef __ARM_FEATURE_SVE2_SHA3
#error Foo
#endif
+
+#pragma GCC target "+sme"
+#ifndef __ARM_FEATURE_SME
+#error Foo
+#endif
+
+#pragma GCC target "+sme+nofp"
+#ifdef __ARM_FEATURE_SME
+#error Foo
+#endif
+
+#pragma GCC target "+sme+nosimd"
+#ifdef __ARM_FEATURE_SME
+#error Foo
+#endif
+
+#pragma GCC target "+sme+nobf16"
+#ifdef __ARM_FEATURE_SME
+#error Foo
+#endif
+
+#pragma GCC target "+nothing+sme"
+#ifdef __ARM_FEATURE_SME_I16I64
+#error Foo
+#endif
+#ifdef __ARM_FEATURE_SME_F64F64
+#error Foo
+#endif
+
+#pragma GCC target "+sme-i16i64"
+#ifndef __ARM_FEATURE_SME_I16I64
+#error Foo
+#endif
+
+#pragma GCC target "+sme-f64f64"
+#ifndef __ARM_FEATURE_SME_F64F64
+#error Foo
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/return_address_sign_1.c b/gcc/testsuite/gcc.target/aarch64/return_address_sign_1.c
index 232ba67..114a9da 100644
--- a/gcc/testsuite/gcc.target/aarch64/return_address_sign_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/return_address_sign_1.c
@@ -37,16 +37,5 @@ func3 (int a, int b, int c)
/* autiasp */
}
-/* eh_return. */
-void __attribute__ ((target ("arch=armv8.3-a")))
-func4 (long offset, void *handler, int *ptr, int imm1, int imm2)
-{
- /* no paciasp */
- *ptr = imm1 + foo (imm1) + imm2;
- __builtin_eh_return (offset, handler);
- /* no autiasp */
- return;
-}
-
-/* { dg-final { scan-assembler-times "autiasp" 3 } } */
/* { dg-final { scan-assembler-times "paciasp" 3 } } */
+/* { dg-final { scan-assembler-times "autiasp" 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/return_address_sign_2.c b/gcc/testsuite/gcc.target/aarch64/return_address_sign_2.c
index a4bc5b4..d93492c 100644
--- a/gcc/testsuite/gcc.target/aarch64/return_address_sign_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/return_address_sign_2.c
@@ -14,5 +14,18 @@ func1 (int a, int b, int c)
/* retaa */
}
-/* { dg-final { scan-assembler-times "paciasp" 1 } } */
-/* { dg-final { scan-assembler-times "retaa" 1 } } */
+/* eh_return. */
+void __attribute__ ((target ("arch=armv8.3-a")))
+func4 (long offset, void *handler, int *ptr, int imm1, int imm2)
+{
+ /* paciasp */
+ *ptr = imm1 + foo (imm1) + imm2;
+ if (handler)
+ /* br */
+ __builtin_eh_return (offset, handler);
+ /* retaa */
+ return;
+}
+
+/* { dg-final { scan-assembler-times "paciasp" 2 } } */
+/* { dg-final { scan-assembler-times "retaa" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/return_address_sign_b_1.c b/gcc/testsuite/gcc.target/aarch64/return_address_sign_b_1.c
index 43e32ab..697fa30 100644
--- a/gcc/testsuite/gcc.target/aarch64/return_address_sign_b_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/return_address_sign_b_1.c
@@ -37,16 +37,5 @@ func3 (int a, int b, int c)
/* autibsp */
}
-/* eh_return. */
-void __attribute__ ((target ("arch=armv8.3-a")))
-func4 (long offset, void *handler, int *ptr, int imm1, int imm2)
-{
- /* no pacibsp */
- *ptr = imm1 + foo (imm1) + imm2;
- __builtin_eh_return (offset, handler);
- /* no autibsp */
- return;
-}
-
/* { dg-final { scan-assembler-times "pacibsp" 3 } } */
/* { dg-final { scan-assembler-times "autibsp" 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/return_address_sign_b_2.c b/gcc/testsuite/gcc.target/aarch64/return_address_sign_b_2.c
index 9ed64ce..452240b 100644
--- a/gcc/testsuite/gcc.target/aarch64/return_address_sign_b_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/return_address_sign_b_2.c
@@ -14,5 +14,18 @@ func1 (int a, int b, int c)
/* retab */
}
-/* { dg-final { scan-assembler-times "pacibsp" 1 } } */
-/* { dg-final { scan-assembler-times "retab" 1 } } */
+/* eh_return. */
+void __attribute__ ((target ("arch=armv8.3-a")))
+func4 (long offset, void *handler, int *ptr, int imm1, int imm2)
+{
+ /* pacibsp */
+ *ptr = imm1 + foo (imm1) + imm2;
+ if (handler)
+ /* br */
+ __builtin_eh_return (offset, handler);
+ /* retab */
+ return;
+}
+
+/* { dg-final { scan-assembler-times "pacibsp" 2 } } */
+/* { dg-final { scan-assembler-times "retab" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/aarch64-sme-acle-asm.exp b/gcc/testsuite/gcc.target/aarch64/sme/aarch64-sme-acle-asm.exp
new file mode 100644
index 0000000..e2d002f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/aarch64-sme-acle-asm.exp
@@ -0,0 +1,81 @@
+# Assembly-based regression-test driver for the SME ACLE.
+# Copyright (C) 2009-2023 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>. */
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't an AArch64 target.
+if {![istarget aarch64*-*-*] } {
+ return
+}
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# Initialize `dg'.
+dg-init
+
+# Force SME if we're not testing it already.
+if { [check_effective_target_aarch64_sme] } {
+ set sme_flags ""
+} else {
+ set sme_flags "-march=armv9-a+sme"
+}
+
+# Turn off any codegen tweaks by default that may affect expected assembly.
+# Tests relying on those should turn them on explicitly.
+set sme_flags "$sme_flags -mtune=generic -moverride=tune=none"
+
+global gcc_runtest_parallelize_limit_minor
+if { [info exists gcc_runtest_parallelize_limit_minor] } {
+ set old_limit_minor $gcc_runtest_parallelize_limit_minor
+ set gcc_runtest_parallelize_limit_minor 1
+}
+
+torture-init
+set-torture-options {
+ "-std=c90 -O0 -g"
+ "-std=c99 -Og -g"
+ "-std=c11 -Os -g"
+ "-std=c23 -O2 -fno-schedule-insns -fno-schedule-insns2 -DCHECK_ASM --save-temps"
+ "-std=gnu90 -O3 -g"
+ "-std=gnu23 -Ofast -g"
+} {
+ "-DTEST_FULL"
+ "-DTEST_OVERLOADS"
+}
+
+# Main loop.
+set files [glob -nocomplain $srcdir/$subdir/acle-asm/*.c]
+set save-dg-do-what-default ${dg-do-what-default}
+if { [check_effective_target_aarch64_asm_sme-i16i64_ok] } {
+ set dg-do-what-default assemble
+} else {
+ set dg-do-what-default compile
+}
+gcc-dg-runtest [lsort $files] "" "$sme_flags -fno-ipa-icf"
+set dg-do-what-default ${save-dg-do-what-default}
+
+torture-finish
+
+if { [info exists gcc_runtest_parallelize_limit_minor] } {
+ set gcc_runtest_parallelize_limit_minor $old_limit_minor
+}
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/aarch64-sme.exp b/gcc/testsuite/gcc.target/aarch64/sme/aarch64-sme.exp
new file mode 100644
index 0000000..011310e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/aarch64-sme.exp
@@ -0,0 +1,46 @@
+# Specific regression driver for AArch64 SME.
+# Copyright (C) 2009-2023 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>. */
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't an AArch64 target.
+if {![istarget aarch64*-*-*] } {
+ return
+}
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# Initialize `dg'.
+dg-init
+
+if { [check_effective_target_aarch64_sme] } {
+ set sme_flags ""
+} else {
+ set sme_flags "-march=armv9-a+sme"
+}
+
+aarch64-with-arch-dg-options $sme_flags {
+ # Main loop.
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
+ "" $sme_flags
+}
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addha_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addha_za32.c
new file mode 100644
index 0000000..8dee401
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addha_za32.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** addha_za32_s32_0_p0_p1_z0:
+** addha za0\.s, p0/m, p1/m, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZA (addha_za32_s32_0_p0_p1_z0, svint32_t,
+ svaddha_za32_s32_m (0, p0, p1, z0),
+ svaddha_za32_m (0, p0, p1, z0))
+
+/*
+** addha_za32_s32_0_p1_p0_z1:
+** addha za0\.s, p1/m, p0/m, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZA (addha_za32_s32_0_p1_p0_z1, svint32_t,
+ svaddha_za32_s32_m (0, p1, p0, z1),
+ svaddha_za32_m (0, p1, p0, z1))
+
+/*
+** addha_za32_s32_1_p0_p1_z0:
+** addha za1\.s, p0/m, p1/m, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZA (addha_za32_s32_1_p0_p1_z0, svint32_t,
+ svaddha_za32_s32_m (1, p0, p1, z0),
+ svaddha_za32_m (1, p0, p1, z0))
+
+/*
+** addha_za32_s32_3_p0_p1_z0:
+** addha za3\.s, p0/m, p1/m, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZA (addha_za32_s32_3_p0_p1_z0, svint32_t,
+ svaddha_za32_s32_m (3, p0, p1, z0),
+ svaddha_za32_m (3, p0, p1, z0))
+
+/*
+** addha_za32_u32_0_p0_p1_z0:
+** addha za0\.s, p0/m, p1/m, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZA (addha_za32_u32_0_p0_p1_z0, svuint32_t,
+ svaddha_za32_u32_m (0, p0, p1, z0),
+ svaddha_za32_m (0, p0, p1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addha_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addha_za64.c
new file mode 100644
index 0000000..363ff1a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addha_za64.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+#pragma GCC target "+sme-i16i64"
+
+/*
+** addha_za64_s64_0_p0_p1_z0:
+** addha za0\.d, p0/m, p1/m, z0\.d
+** ret
+*/
+TEST_UNIFORM_ZA (addha_za64_s64_0_p0_p1_z0, svint64_t,
+ svaddha_za64_s64_m (0, p0, p1, z0),
+ svaddha_za64_m (0, p0, p1, z0))
+
+/*
+** addha_za64_s64_0_p1_p0_z1:
+** addha za0\.d, p1/m, p0/m, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZA (addha_za64_s64_0_p1_p0_z1, svint64_t,
+ svaddha_za64_s64_m (0, p1, p0, z1),
+ svaddha_za64_m (0, p1, p0, z1))
+
+/*
+** addha_za64_s64_1_p0_p1_z0:
+** addha za1\.d, p0/m, p1/m, z0\.d
+** ret
+*/
+TEST_UNIFORM_ZA (addha_za64_s64_1_p0_p1_z0, svint64_t,
+ svaddha_za64_s64_m (1, p0, p1, z0),
+ svaddha_za64_m (1, p0, p1, z0))
+
+/*
+** addha_za64_s64_7_p0_p1_z0:
+** addha za7\.d, p0/m, p1/m, z0\.d
+** ret
+*/
+TEST_UNIFORM_ZA (addha_za64_s64_7_p0_p1_z0, svint64_t,
+ svaddha_za64_s64_m (7, p0, p1, z0),
+ svaddha_za64_m (7, p0, p1, z0))
+
+/*
+** addha_za64_u64_0_p0_p1_z0:
+** addha za0\.d, p0/m, p1/m, z0\.d
+** ret
+*/
+TEST_UNIFORM_ZA (addha_za64_u64_0_p0_p1_z0, svuint64_t,
+ svaddha_za64_u64_m (0, p0, p1, z0),
+ svaddha_za64_m (0, p0, p1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addva_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addva_za32.c
new file mode 100644
index 0000000..0de019a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addva_za32.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** addva_za32_s32_0_p0_p1_z0:
+** addva za0\.s, p0/m, p1/m, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZA (addva_za32_s32_0_p0_p1_z0, svint32_t,
+ svaddva_za32_s32_m (0, p0, p1, z0),
+ svaddva_za32_m (0, p0, p1, z0))
+
+/*
+** addva_za32_s32_0_p1_p0_z1:
+** addva za0\.s, p1/m, p0/m, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZA (addva_za32_s32_0_p1_p0_z1, svint32_t,
+ svaddva_za32_s32_m (0, p1, p0, z1),
+ svaddva_za32_m (0, p1, p0, z1))
+
+/*
+** addva_za32_s32_1_p0_p1_z0:
+** addva za1\.s, p0/m, p1/m, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZA (addva_za32_s32_1_p0_p1_z0, svint32_t,
+ svaddva_za32_s32_m (1, p0, p1, z0),
+ svaddva_za32_m (1, p0, p1, z0))
+
+/*
+** addva_za32_s32_3_p0_p1_z0:
+** addva za3\.s, p0/m, p1/m, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZA (addva_za32_s32_3_p0_p1_z0, svint32_t,
+ svaddva_za32_s32_m (3, p0, p1, z0),
+ svaddva_za32_m (3, p0, p1, z0))
+
+/*
+** addva_za32_u32_0_p0_p1_z0:
+** addva za0\.s, p0/m, p1/m, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZA (addva_za32_u32_0_p0_p1_z0, svuint32_t,
+ svaddva_za32_u32_m (0, p0, p1, z0),
+ svaddva_za32_m (0, p0, p1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addva_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addva_za64.c
new file mode 100644
index 0000000..d83d4e0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/addva_za64.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+#pragma GCC target "+sme-i16i64"
+
+/*
+** addva_za64_s64_0_p0_p1_z0:
+** addva za0\.d, p0/m, p1/m, z0\.d
+** ret
+*/
+TEST_UNIFORM_ZA (addva_za64_s64_0_p0_p1_z0, svint64_t,
+ svaddva_za64_s64_m (0, p0, p1, z0),
+ svaddva_za64_m (0, p0, p1, z0))
+
+/*
+** addva_za64_s64_0_p1_p0_z1:
+** addva za0\.d, p1/m, p0/m, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZA (addva_za64_s64_0_p1_p0_z1, svint64_t,
+ svaddva_za64_s64_m (0, p1, p0, z1),
+ svaddva_za64_m (0, p1, p0, z1))
+
+/*
+** addva_za64_s64_1_p0_p1_z0:
+** addva za1\.d, p0/m, p1/m, z0\.d
+** ret
+*/
+TEST_UNIFORM_ZA (addva_za64_s64_1_p0_p1_z0, svint64_t,
+ svaddva_za64_s64_m (1, p0, p1, z0),
+ svaddva_za64_m (1, p0, p1, z0))
+
+/*
+** addva_za64_s64_7_p0_p1_z0:
+** addva za7\.d, p0/m, p1/m, z0\.d
+** ret
+*/
+TEST_UNIFORM_ZA (addva_za64_s64_7_p0_p1_z0, svint64_t,
+ svaddva_za64_s64_m (7, p0, p1, z0),
+ svaddva_za64_m (7, p0, p1, z0))
+
+/*
+** addva_za64_u64_0_p0_p1_z0:
+** addva za0\.d, p0/m, p1/m, z0\.d
+** ret
+*/
+TEST_UNIFORM_ZA (addva_za64_u64_0_p0_p1_z0, svuint64_t,
+ svaddva_za64_u64_m (0, p0, p1, z0),
+ svaddva_za64_m (0, p0, p1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_has_sme_sc.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_has_sme_sc.c
new file mode 100644
index 0000000..e37793f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_has_sme_sc.c
@@ -0,0 +1,25 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#define NO_SHARED_ZA
+#include "test_sme_acle.h"
+
+#pragma GCC target "+nosme"
+
+/*
+** test_nosme:
+** ...
+** bl __arm_sme_state
+** lsr x0, x0, #?63
+** ...
+*/
+PROTO (test_nosme, int, ()) { return __arm_has_sme (); }
+
+#pragma GCC target "+sme"
+
+/*
+** test_sme:
+** mov w0, #?1
+** ret
+*/
+PROTO (test_sme, int, ()) { return __arm_has_sme (); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_ns.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_ns.c
new file mode 100644
index 0000000..ba475d6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_ns.c
@@ -0,0 +1,11 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define NON_STREAMING
+#include "test_sme_acle.h"
+
+/*
+** test_sme:
+** mov w0, #?0
+** ret
+*/
+PROTO (test_sme, int, ()) { return __arm_in_streaming_mode (); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_s.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_s.c
new file mode 100644
index 0000000..b88d479
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_s.c
@@ -0,0 +1,11 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define NO_SHARED_ZA
+#include "test_sme_acle.h"
+
+/*
+** test_sme:
+** mov w0, #?1
+** ret
+*/
+PROTO (test_sme, int, ()) { return __arm_in_streaming_mode (); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_sc.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_sc.c
new file mode 100644
index 0000000..fb3588a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/arm_in_streaming_mode_sc.c
@@ -0,0 +1,26 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#define NO_SHARED_ZA
+#include "test_sme_acle.h"
+
+#pragma GCC target "+nosme"
+
+/*
+** test_nosme:
+** ...
+** bl __arm_sme_state
+** and w0, w0, #?1
+** ...
+*/
+PROTO (test_nosme, int, ()) { return __arm_in_streaming_mode (); }
+
+#pragma GCC target "+sme"
+
+/*
+** test_sme:
+** mrs x([0-9]+), svcr
+** and w0, w\1, #?1
+** ret
+*/
+PROTO (test_sme, int, ()) { return __arm_in_streaming_mode (); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s16.c
new file mode 100644
index 0000000..8609b13
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s16.c
@@ -0,0 +1,42 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** clamp_s16_tied1:
+** sclamp z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s16_tied1, svint16_t,
+ z0 = svclamp_s16 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_s16_tied2:
+** sclamp z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s16_tied2, svint16_t,
+ z0 = svclamp_s16 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_s16_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** sclamp z0\.h, z2\.h, \1\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s16_tied3, svint16_t,
+ z0 = svclamp_s16 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_s16_untied:
+** movprfx z0, z1
+** sclamp z0\.h, z2\.h, z3\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s16_untied, svint16_t,
+ z0 = svclamp_s16 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s32.c
new file mode 100644
index 0000000..a8d4305
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s32.c
@@ -0,0 +1,42 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** clamp_s32_tied1:
+** sclamp z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s32_tied1, svint32_t,
+ z0 = svclamp_s32 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_s32_tied2:
+** sclamp z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s32_tied2, svint32_t,
+ z0 = svclamp_s32 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_s32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** sclamp z0\.s, z2\.s, \1\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s32_tied3, svint32_t,
+ z0 = svclamp_s32 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_s32_untied:
+** movprfx z0, z1
+** sclamp z0\.s, z2\.s, z3\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s32_untied, svint32_t,
+ z0 = svclamp_s32 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s64.c
new file mode 100644
index 0000000..364d185
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s64.c
@@ -0,0 +1,42 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** clamp_s64_tied1:
+** sclamp z0\.d, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s64_tied1, svint64_t,
+ z0 = svclamp_s64 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_s64_tied2:
+** sclamp z0\.d, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s64_tied2, svint64_t,
+ z0 = svclamp_s64 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_s64_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** sclamp z0\.d, z2\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s64_tied3, svint64_t,
+ z0 = svclamp_s64 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_s64_untied:
+** movprfx z0, z1
+** sclamp z0\.d, z2\.d, z3\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s64_untied, svint64_t,
+ z0 = svclamp_s64 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s8.c
new file mode 100644
index 0000000..cabb40b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_s8.c
@@ -0,0 +1,42 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** clamp_s8_tied1:
+** sclamp z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s8_tied1, svint8_t,
+ z0 = svclamp_s8 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_s8_tied2:
+** sclamp z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s8_tied2, svint8_t,
+ z0 = svclamp_s8 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_s8_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** sclamp z0\.b, z2\.b, \1\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s8_tied3, svint8_t,
+ z0 = svclamp_s8 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_s8_untied:
+** movprfx z0, z1
+** sclamp z0\.b, z2\.b, z3\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s8_untied, svint8_t,
+ z0 = svclamp_s8 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u16.c
new file mode 100644
index 0000000..af8c712
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u16.c
@@ -0,0 +1,42 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** clamp_u16_tied1:
+** uclamp z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u16_tied1, svuint16_t,
+ z0 = svclamp_u16 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_u16_tied2:
+** uclamp z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u16_tied2, svuint16_t,
+ z0 = svclamp_u16 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_u16_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** uclamp z0\.h, z2\.h, \1\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u16_tied3, svuint16_t,
+ z0 = svclamp_u16 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_u16_untied:
+** movprfx z0, z1
+** uclamp z0\.h, z2\.h, z3\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u16_untied, svuint16_t,
+ z0 = svclamp_u16 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u32.c
new file mode 100644
index 0000000..cca413a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u32.c
@@ -0,0 +1,42 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** clamp_u32_tied1:
+** uclamp z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u32_tied1, svuint32_t,
+ z0 = svclamp_u32 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_u32_tied2:
+** uclamp z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u32_tied2, svuint32_t,
+ z0 = svclamp_u32 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_u32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** uclamp z0\.s, z2\.s, \1\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u32_tied3, svuint32_t,
+ z0 = svclamp_u32 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_u32_untied:
+** movprfx z0, z1
+** uclamp z0\.s, z2\.s, z3\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u32_untied, svuint32_t,
+ z0 = svclamp_u32 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u64.c
new file mode 100644
index 0000000..93d3757
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u64.c
@@ -0,0 +1,42 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** clamp_u64_tied1:
+** uclamp z0\.d, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u64_tied1, svuint64_t,
+ z0 = svclamp_u64 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_u64_tied2:
+** uclamp z0\.d, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u64_tied2, svuint64_t,
+ z0 = svclamp_u64 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_u64_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** uclamp z0\.d, z2\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u64_tied3, svuint64_t,
+ z0 = svclamp_u64 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_u64_untied:
+** movprfx z0, z1
+** uclamp z0\.d, z2\.d, z3\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u64_untied, svuint64_t,
+ z0 = svclamp_u64 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u8.c
new file mode 100644
index 0000000..092b336
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/clamp_u8.c
@@ -0,0 +1,42 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** clamp_u8_tied1:
+** uclamp z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u8_tied1, svuint8_t,
+ z0 = svclamp_u8 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_u8_tied2:
+** uclamp z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u8_tied2, svuint8_t,
+ z0 = svclamp_u8 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_u8_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** uclamp z0\.b, z2\.b, \1\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u8_tied3, svuint8_t,
+ z0 = svclamp_u8 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_u8_untied:
+** movprfx z0, z1
+** uclamp z0\.b, z2\.b, z3\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u8_untied, svuint8_t,
+ z0 = svclamp_u8 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsb_s.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsb_s.c
new file mode 100644
index 0000000..0a8de45
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsb_s.c
@@ -0,0 +1,310 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define NO_SHARED_ZA
+#include "test_sme_acle.h"
+
+/*
+** cntb_1:
+** cntb x0
+** ret
+*/
+PROTO (cntb_1, uint64_t, ()) { return svcntsb (); }
+
+/*
+** cntb_2:
+** cntb x0, all, mul #2
+** ret
+*/
+PROTO (cntb_2, uint64_t, ()) { return svcntsb () * 2; }
+
+/*
+** cntb_3:
+** cntb x0, all, mul #3
+** ret
+*/
+PROTO (cntb_3, uint64_t, ()) { return svcntsb () * 3; }
+
+/*
+** cntb_4:
+** cntb x0, all, mul #4
+** ret
+*/
+PROTO (cntb_4, uint64_t, ()) { return svcntsb () * 4; }
+
+/*
+** cntb_8:
+** cntb x0, all, mul #8
+** ret
+*/
+PROTO (cntb_8, uint64_t, ()) { return svcntsb () * 8; }
+
+/*
+** cntb_15:
+** cntb x0, all, mul #15
+** ret
+*/
+PROTO (cntb_15, uint64_t, ()) { return svcntsb () * 15; }
+
+/*
+** cntb_16:
+** cntb x0, all, mul #16
+** ret
+*/
+PROTO (cntb_16, uint64_t, ()) { return svcntsb () * 16; }
+
+/*
+** cntb_17:
+** rdvl x0, #17
+** ret
+*/
+PROTO (cntb_17, uint64_t, ()) { return svcntsb () * 17; }
+
+/*
+** cntb_31:
+** rdvl x0, #31
+** ret
+*/
+PROTO (cntb_31, uint64_t, ()) { return svcntsb () * 31; }
+
+/*
+** cntb_32:
+** cntb (x[0-9]+)
+** lsl x0, \1, 5
+** ret
+*/
+PROTO (cntb_32, uint64_t, ()) { return svcntsb () * 32; }
+
+/* Other sequences would be OK. */
+/*
+** cntb_33:
+** cntb (x[0-9]+)
+** lsl x0, \1, 5
+** incb x0
+** ret
+*/
+PROTO (cntb_33, uint64_t, ()) { return svcntsb () * 33; }
+
+/*
+** cntb_64:
+** cntb (x[0-9]+)
+** lsl x0, \1, 6
+** ret
+*/
+PROTO (cntb_64, uint64_t, ()) { return svcntsb () * 64; }
+
+/*
+** cntb_128:
+** cntb (x[0-9]+)
+** lsl x0, \1, 7
+** ret
+*/
+PROTO (cntb_128, uint64_t, ()) { return svcntsb () * 128; }
+
+/* Other sequences would be OK. */
+/*
+** cntb_129:
+** cntb (x[0-9]+)
+** lsl x0, \1, 7
+** incb x0
+** ret
+*/
+PROTO (cntb_129, uint64_t, ()) { return svcntsb () * 129; }
+
+/*
+** cntb_m1:
+** rdvl x0, #-1
+** ret
+*/
+PROTO (cntb_m1, uint64_t, ()) { return -svcntsb (); }
+
+/*
+** cntb_m13:
+** rdvl x0, #-13
+** ret
+*/
+PROTO (cntb_m13, uint64_t, ()) { return -svcntsb () * 13; }
+
+/*
+** cntb_m15:
+** rdvl x0, #-15
+** ret
+*/
+PROTO (cntb_m15, uint64_t, ()) { return -svcntsb () * 15; }
+
+/*
+** cntb_m16:
+** rdvl x0, #-16
+** ret
+*/
+PROTO (cntb_m16, uint64_t, ()) { return -svcntsb () * 16; }
+
+/*
+** cntb_m17:
+** rdvl x0, #-17
+** ret
+*/
+PROTO (cntb_m17, uint64_t, ()) { return -svcntsb () * 17; }
+
+/*
+** cntb_m32:
+** rdvl x0, #-32
+** ret
+*/
+PROTO (cntb_m32, uint64_t, ()) { return -svcntsb () * 32; }
+
+/*
+** cntb_m33:
+** rdvl x0, #-32
+** decb x0
+** ret
+*/
+PROTO (cntb_m33, uint64_t, ()) { return -svcntsb () * 33; }
+
+/*
+** cntb_m34:
+** rdvl (x[0-9]+), #-17
+** lsl x0, \1, #?1
+** ret
+*/
+PROTO (cntb_m34, uint64_t, ()) { return -svcntsb () * 34; }
+
+/*
+** cntb_m64:
+** rdvl (x[0-9]+), #-1
+** lsl x0, \1, #?6
+** ret
+*/
+PROTO (cntb_m64, uint64_t, ()) { return -svcntsb () * 64; }
+
+/*
+** incb_1:
+** incb x0
+** ret
+*/
+PROTO (incb_1, uint64_t, (uint64_t x0)) { return x0 + svcntsb (); }
+
+/*
+** incb_2:
+** incb x0, all, mul #2
+** ret
+*/
+PROTO (incb_2, uint64_t, (uint64_t x0)) { return x0 + svcntsb () * 2; }
+
+/*
+** incb_3:
+** incb x0, all, mul #3
+** ret
+*/
+PROTO (incb_3, uint64_t, (uint64_t x0)) { return x0 + svcntsb () * 3; }
+
+/*
+** incb_4:
+** incb x0, all, mul #4
+** ret
+*/
+PROTO (incb_4, uint64_t, (uint64_t x0)) { return x0 + svcntsb () * 4; }
+
+/*
+** incb_8:
+** incb x0, all, mul #8
+** ret
+*/
+PROTO (incb_8, uint64_t, (uint64_t x0)) { return x0 + svcntsb () * 8; }
+
+/*
+** incb_15:
+** incb x0, all, mul #15
+** ret
+*/
+PROTO (incb_15, uint64_t, (uint64_t x0)) { return x0 + svcntsb () * 15; }
+
+/*
+** incb_16:
+** incb x0, all, mul #16
+** ret
+*/
+PROTO (incb_16, uint64_t, (uint64_t x0)) { return x0 + svcntsb () * 16; }
+
+/*
+** incb_17:
+** addvl x0, x0, #17
+** ret
+*/
+PROTO (incb_17, uint64_t, (uint64_t x0)) { return x0 + svcntsb () * 17; }
+
+/*
+** incb_31:
+** addvl x0, x0, #31
+** ret
+*/
+PROTO (incb_31, uint64_t, (uint64_t x0)) { return x0 + svcntsb () * 31; }
+
+/*
+** decb_1:
+** decb x0
+** ret
+*/
+PROTO (decb_1, uint64_t, (uint64_t x0)) { return x0 - svcntsb (); }
+
+/*
+** decb_2:
+** decb x0, all, mul #2
+** ret
+*/
+PROTO (decb_2, uint64_t, (uint64_t x0)) { return x0 - svcntsb () * 2; }
+
+/*
+** decb_3:
+** decb x0, all, mul #3
+** ret
+*/
+PROTO (decb_3, uint64_t, (uint64_t x0)) { return x0 - svcntsb () * 3; }
+
+/*
+** decb_4:
+** decb x0, all, mul #4
+** ret
+*/
+PROTO (decb_4, uint64_t, (uint64_t x0)) { return x0 - svcntsb () * 4; }
+
+/*
+** decb_8:
+** decb x0, all, mul #8
+** ret
+*/
+PROTO (decb_8, uint64_t, (uint64_t x0)) { return x0 - svcntsb () * 8; }
+
+/*
+** decb_15:
+** decb x0, all, mul #15
+** ret
+*/
+PROTO (decb_15, uint64_t, (uint64_t x0)) { return x0 - svcntsb () * 15; }
+
+/*
+** decb_16:
+** decb x0, all, mul #16
+** ret
+*/
+PROTO (decb_16, uint64_t, (uint64_t x0)) { return x0 - svcntsb () * 16; }
+
+/*
+** decb_17:
+** addvl x0, x0, #-17
+** ret
+*/
+PROTO (decb_17, uint64_t, (uint64_t x0)) { return x0 - svcntsb () * 17; }
+
+/*
+** decb_31:
+** addvl x0, x0, #-31
+** ret
+*/
+PROTO (decb_31, uint64_t, (uint64_t x0)) { return x0 - svcntsb () * 31; }
+
+/*
+** decb_32:
+** addvl x0, x0, #-32
+** ret
+*/
+PROTO (decb_32, uint64_t, (uint64_t x0)) { return x0 - svcntsb () * 32; }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsb_sc.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsb_sc.c
new file mode 100644
index 0000000..9ee4c8a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsb_sc.c
@@ -0,0 +1,12 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#define NO_SHARED_ZA
+#include "test_sme_acle.h"
+
+/*
+** cntsb:
+** rdsvl x0, #1
+** ret
+*/
+PROTO (cntsb, uint64_t, ()) { return svcntsb (); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsd_s.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsd_s.c
new file mode 100644
index 0000000..3bf9498
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsd_s.c
@@ -0,0 +1,277 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define NO_SHARED_ZA
+#include "test_sme_acle.h"
+
+/*
+** cntd_1:
+** cntd x0
+** ret
+*/
+PROTO (cntd_1, uint64_t, ()) { return svcntsd (); }
+
+/*
+** cntd_2:
+** cntw x0
+** ret
+*/
+PROTO (cntd_2, uint64_t, ()) { return svcntsd () * 2; }
+
+/*
+** cntd_3:
+** cntd x0, all, mul #3
+** ret
+*/
+PROTO (cntd_3, uint64_t, ()) { return svcntsd () * 3; }
+
+/*
+** cntd_4:
+** cnth x0
+** ret
+*/
+PROTO (cntd_4, uint64_t, ()) { return svcntsd () * 4; }
+
+/*
+** cntd_8:
+** cntb x0
+** ret
+*/
+PROTO (cntd_8, uint64_t, ()) { return svcntsd () * 8; }
+
+/*
+** cntd_15:
+** cntd x0, all, mul #15
+** ret
+*/
+PROTO (cntd_15, uint64_t, ()) { return svcntsd () * 15; }
+
+/*
+** cntd_16:
+** cntb x0, all, mul #2
+** ret
+*/
+PROTO (cntd_16, uint64_t, ()) { return svcntsd () * 16; }
+
+/* Other sequences would be OK. */
+/*
+** cntd_17:
+** rdvl (x[0-9]+), #17
+** asr x0, \1, 3
+** ret
+*/
+PROTO (cntd_17, uint64_t, ()) { return svcntsd () * 17; }
+
+/*
+** cntd_32:
+** cntb x0, all, mul #4
+** ret
+*/
+PROTO (cntd_32, uint64_t, ()) { return svcntsd () * 32; }
+
+/*
+** cntd_64:
+** cntb x0, all, mul #8
+** ret
+*/
+PROTO (cntd_64, uint64_t, ()) { return svcntsd () * 64; }
+
+/*
+** cntd_128:
+** cntb x0, all, mul #16
+** ret
+*/
+PROTO (cntd_128, uint64_t, ()) { return svcntsd () * 128; }
+
+/*
+** cntd_m1:
+** cntd (x[0-9]+)
+** neg x0, \1
+** ret
+*/
+PROTO (cntd_m1, uint64_t, ()) { return -svcntsd (); }
+
+/*
+** cntd_m13:
+** cntd (x[0-9]+), all, mul #13
+** neg x0, \1
+** ret
+*/
+PROTO (cntd_m13, uint64_t, ()) { return -svcntsd () * 13; }
+
+/*
+** cntd_m15:
+** cntd (x[0-9]+), all, mul #15
+** neg x0, \1
+** ret
+*/
+PROTO (cntd_m15, uint64_t, ()) { return -svcntsd () * 15; }
+
+/*
+** cntd_m16:
+** rdvl x0, #-2
+** ret
+*/
+PROTO (cntd_m16, uint64_t, ()) { return -svcntsd () * 16; }
+
+/* Other sequences would be OK. */
+/*
+** cntd_m17:
+** rdvl (x[0-9]+), #-17
+** asr x0, \1, 3
+** ret
+*/
+PROTO (cntd_m17, uint64_t, ()) { return -svcntsd () * 17; }
+
+/*
+** incd_1:
+** incd x0
+** ret
+*/
+PROTO (incd_1, uint64_t, (uint64_t x0)) { return x0 + svcntsd (); }
+
+/*
+** incd_2:
+** incw x0
+** ret
+*/
+PROTO (incd_2, uint64_t, (uint64_t x0)) { return x0 + svcntsd () * 2; }
+
+/*
+** incd_3:
+** incd x0, all, mul #3
+** ret
+*/
+PROTO (incd_3, uint64_t, (uint64_t x0)) { return x0 + svcntsd () * 3; }
+
+/*
+** incd_4:
+** inch x0
+** ret
+*/
+PROTO (incd_4, uint64_t, (uint64_t x0)) { return x0 + svcntsd () * 4; }
+
+/*
+** incd_7:
+** incd x0, all, mul #7
+** ret
+*/
+PROTO (incd_7, uint64_t, (uint64_t x0)) { return x0 + svcntsd () * 7; }
+
+/*
+** incd_8:
+** incb x0
+** ret
+*/
+PROTO (incd_8, uint64_t, (uint64_t x0)) { return x0 + svcntsd () * 8; }
+
+/*
+** incd_9:
+** incd x0, all, mul #9
+** ret
+*/
+PROTO (incd_9, uint64_t, (uint64_t x0)) { return x0 + svcntsd () * 9; }
+
+/*
+** incd_15:
+** incd x0, all, mul #15
+** ret
+*/
+PROTO (incd_15, uint64_t, (uint64_t x0)) { return x0 + svcntsd () * 15; }
+
+/*
+** incd_16:
+** incb x0, all, mul #2
+** ret
+*/
+PROTO (incd_16, uint64_t, (uint64_t x0)) { return x0 + svcntsd () * 16; }
+
+/*
+** incd_18:
+** incw x0, all, mul #9
+** ret
+*/
+PROTO (incd_18, uint64_t, (uint64_t x0)) { return x0 + svcntsd () * 18; }
+
+/*
+** incd_30:
+** incw x0, all, mul #15
+** ret
+*/
+PROTO (incd_30, uint64_t, (uint64_t x0)) { return x0 + svcntsd () * 30; }
+
+/*
+** decd_1:
+** decd x0
+** ret
+*/
+PROTO (decd_1, uint64_t, (uint64_t x0)) { return x0 - svcntsd (); }
+
+/*
+** decd_2:
+** decw x0
+** ret
+*/
+PROTO (decd_2, uint64_t, (uint64_t x0)) { return x0 - svcntsd () * 2; }
+
+/*
+** decd_3:
+** decd x0, all, mul #3
+** ret
+*/
+PROTO (decd_3, uint64_t, (uint64_t x0)) { return x0 - svcntsd () * 3; }
+
+/*
+** decd_4:
+** dech x0
+** ret
+*/
+PROTO (decd_4, uint64_t, (uint64_t x0)) { return x0 - svcntsd () * 4; }
+
+/*
+** decd_7:
+** decd x0, all, mul #7
+** ret
+*/
+PROTO (decd_7, uint64_t, (uint64_t x0)) { return x0 - svcntsd () * 7; }
+
+/*
+** decd_8:
+** decb x0
+** ret
+*/
+PROTO (decd_8, uint64_t, (uint64_t x0)) { return x0 - svcntsd () * 8; }
+
+/*
+** decd_9:
+** decd x0, all, mul #9
+** ret
+*/
+PROTO (decd_9, uint64_t, (uint64_t x0)) { return x0 - svcntsd () * 9; }
+
+/*
+** decd_15:
+** decd x0, all, mul #15
+** ret
+*/
+PROTO (decd_15, uint64_t, (uint64_t x0)) { return x0 - svcntsd () * 15; }
+
+/*
+** decd_16:
+** decb x0, all, mul #2
+** ret
+*/
+PROTO (decd_16, uint64_t, (uint64_t x0)) { return x0 - svcntsd () * 16; }
+
+/*
+** decd_18:
+** decw x0, all, mul #9
+** ret
+*/
+PROTO (decd_18, uint64_t, (uint64_t x0)) { return x0 - svcntsd () * 18; }
+
+/*
+** decd_30:
+** decw x0, all, mul #15
+** ret
+*/
+PROTO (decd_30, uint64_t, (uint64_t x0)) { return x0 - svcntsd () * 30; }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsd_sc.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsd_sc.c
new file mode 100644
index 0000000..90fb374
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsd_sc.c
@@ -0,0 +1,13 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#define NO_SHARED_ZA
+#include "test_sme_acle.h"
+
+/*
+** cntsd:
+** rdsvl (x[0-9])+, #1
+** lsr x0, \1, #?3
+** ret
+*/
+PROTO (cntsd, uint64_t, ()) { return svcntsd (); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsh_s.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsh_s.c
new file mode 100644
index 0000000..021c39a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsh_s.c
@@ -0,0 +1,279 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define NO_SHARED_ZA
+#include "test_sme_acle.h"
+
+/*
+** cnth_1:
+** cnth x0
+** ret
+*/
+PROTO (cnth_1, uint64_t, ()) { return svcntsh (); }
+
+/*
+** cnth_2:
+** cntb x0
+** ret
+*/
+PROTO (cnth_2, uint64_t, ()) { return svcntsh () * 2; }
+
+/*
+** cnth_3:
+** cnth x0, all, mul #3
+** ret
+*/
+PROTO (cnth_3, uint64_t, ()) { return svcntsh () * 3; }
+
+/*
+** cnth_4:
+** cntb x0, all, mul #2
+** ret
+*/
+PROTO (cnth_4, uint64_t, ()) { return svcntsh () * 4; }
+
+/*
+** cnth_8:
+** cntb x0, all, mul #4
+** ret
+*/
+PROTO (cnth_8, uint64_t, ()) { return svcntsh () * 8; }
+
+/*
+** cnth_15:
+** cnth x0, all, mul #15
+** ret
+*/
+PROTO (cnth_15, uint64_t, ()) { return svcntsh () * 15; }
+
+/*
+** cnth_16:
+** cntb x0, all, mul #8
+** ret
+*/
+PROTO (cnth_16, uint64_t, ()) { return svcntsh () * 16; }
+
+/* Other sequences would be OK. */
+/*
+** cnth_17:
+** rdvl (x[0-9]+), #17
+** asr x0, \1, 1
+** ret
+*/
+PROTO (cnth_17, uint64_t, ()) { return svcntsh () * 17; }
+
+/*
+** cnth_32:
+** cntb x0, all, mul #16
+** ret
+*/
+PROTO (cnth_32, uint64_t, ()) { return svcntsh () * 32; }
+
+/*
+** cnth_64:
+** cntb (x[0-9]+)
+** lsl x0, \1, 5
+** ret
+*/
+PROTO (cnth_64, uint64_t, ()) { return svcntsh () * 64; }
+
+/*
+** cnth_128:
+** cntb (x[0-9]+)
+** lsl x0, \1, 6
+** ret
+*/
+PROTO (cnth_128, uint64_t, ()) { return svcntsh () * 128; }
+
+/*
+** cnth_m1:
+** cnth (x[0-9]+)
+** neg x0, \1
+** ret
+*/
+PROTO (cnth_m1, uint64_t, ()) { return -svcntsh (); }
+
+/*
+** cnth_m13:
+** cnth (x[0-9]+), all, mul #13
+** neg x0, \1
+** ret
+*/
+PROTO (cnth_m13, uint64_t, ()) { return -svcntsh () * 13; }
+
+/*
+** cnth_m15:
+** cnth (x[0-9]+), all, mul #15
+** neg x0, \1
+** ret
+*/
+PROTO (cnth_m15, uint64_t, ()) { return -svcntsh () * 15; }
+
+/*
+** cnth_m16:
+** rdvl x0, #-8
+** ret
+*/
+PROTO (cnth_m16, uint64_t, ()) { return -svcntsh () * 16; }
+
+/* Other sequences would be OK. */
+/*
+** cnth_m17:
+** rdvl (x[0-9]+), #-17
+** asr x0, \1, 1
+** ret
+*/
+PROTO (cnth_m17, uint64_t, ()) { return -svcntsh () * 17; }
+
+/*
+** inch_1:
+** inch x0
+** ret
+*/
+PROTO (inch_1, uint64_t, (uint64_t x0)) { return x0 + svcntsh (); }
+
+/*
+** inch_2:
+** incb x0
+** ret
+*/
+PROTO (inch_2, uint64_t, (uint64_t x0)) { return x0 + svcntsh () * 2; }
+
+/*
+** inch_3:
+** inch x0, all, mul #3
+** ret
+*/
+PROTO (inch_3, uint64_t, (uint64_t x0)) { return x0 + svcntsh () * 3; }
+
+/*
+** inch_4:
+** incb x0, all, mul #2
+** ret
+*/
+PROTO (inch_4, uint64_t, (uint64_t x0)) { return x0 + svcntsh () * 4; }
+
+/*
+** inch_7:
+** inch x0, all, mul #7
+** ret
+*/
+PROTO (inch_7, uint64_t, (uint64_t x0)) { return x0 + svcntsh () * 7; }
+
+/*
+** inch_8:
+** incb x0, all, mul #4
+** ret
+*/
+PROTO (inch_8, uint64_t, (uint64_t x0)) { return x0 + svcntsh () * 8; }
+
+/*
+** inch_9:
+** inch x0, all, mul #9
+** ret
+*/
+PROTO (inch_9, uint64_t, (uint64_t x0)) { return x0 + svcntsh () * 9; }
+
+/*
+** inch_15:
+** inch x0, all, mul #15
+** ret
+*/
+PROTO (inch_15, uint64_t, (uint64_t x0)) { return x0 + svcntsh () * 15; }
+
+/*
+** inch_16:
+** incb x0, all, mul #8
+** ret
+*/
+PROTO (inch_16, uint64_t, (uint64_t x0)) { return x0 + svcntsh () * 16; }
+
+/*
+** inch_18:
+** incb x0, all, mul #9
+** ret
+*/
+PROTO (inch_18, uint64_t, (uint64_t x0)) { return x0 + svcntsh () * 18; }
+
+/*
+** inch_30:
+** incb x0, all, mul #15
+** ret
+*/
+PROTO (inch_30, uint64_t, (uint64_t x0)) { return x0 + svcntsh () * 30; }
+
+/*
+** dech_1:
+** dech x0
+** ret
+*/
+PROTO (dech_1, uint64_t, (uint64_t x0)) { return x0 - svcntsh (); }
+
+/*
+** dech_2:
+** decb x0
+** ret
+*/
+PROTO (dech_2, uint64_t, (uint64_t x0)) { return x0 - svcntsh () * 2; }
+
+/*
+** dech_3:
+** dech x0, all, mul #3
+** ret
+*/
+PROTO (dech_3, uint64_t, (uint64_t x0)) { return x0 - svcntsh () * 3; }
+
+/*
+** dech_4:
+** decb x0, all, mul #2
+** ret
+*/
+PROTO (dech_4, uint64_t, (uint64_t x0)) { return x0 - svcntsh () * 4; }
+
+/*
+** dech_7:
+** dech x0, all, mul #7
+** ret
+*/
+PROTO (dech_7, uint64_t, (uint64_t x0)) { return x0 - svcntsh () * 7; }
+
+/*
+** dech_8:
+** decb x0, all, mul #4
+** ret
+*/
+PROTO (dech_8, uint64_t, (uint64_t x0)) { return x0 - svcntsh () * 8; }
+
+/*
+** dech_9:
+** dech x0, all, mul #9
+** ret
+*/
+PROTO (dech_9, uint64_t, (uint64_t x0)) { return x0 - svcntsh () * 9; }
+
+/*
+** dech_15:
+** dech x0, all, mul #15
+** ret
+*/
+PROTO (dech_15, uint64_t, (uint64_t x0)) { return x0 - svcntsh () * 15; }
+
+/*
+** dech_16:
+** decb x0, all, mul #8
+** ret
+*/
+PROTO (dech_16, uint64_t, (uint64_t x0)) { return x0 - svcntsh () * 16; }
+
+/*
+** dech_18:
+** decb x0, all, mul #9
+** ret
+*/
+PROTO (dech_18, uint64_t, (uint64_t x0)) { return x0 - svcntsh () * 18; }
+
+/*
+** dech_30:
+** decb x0, all, mul #15
+** ret
+*/
+PROTO (dech_30, uint64_t, (uint64_t x0)) { return x0 - svcntsh () * 30; }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsh_sc.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsh_sc.c
new file mode 100644
index 0000000..9f6c852
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsh_sc.c
@@ -0,0 +1,13 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#define NO_SHARED_ZA
+#include "test_sme_acle.h"
+
+/*
+** cntsh:
+** rdsvl (x[0-9])+, #1
+** lsr x0, \1, #?1
+** ret
+*/
+PROTO (cntsh, uint64_t, ()) { return svcntsh (); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsw_s.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsw_s.c
new file mode 100644
index 0000000..c421e1b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsw_s.c
@@ -0,0 +1,278 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define NO_SHARED_ZA
+#include "test_sme_acle.h"
+
+/*
+** cntw_1:
+** cntw x0
+** ret
+*/
+PROTO (cntw_1, uint64_t, ()) { return svcntsw (); }
+
+/*
+** cntw_2:
+** cnth x0
+** ret
+*/
+PROTO (cntw_2, uint64_t, ()) { return svcntsw () * 2; }
+
+/*
+** cntw_3:
+** cntw x0, all, mul #3
+** ret
+*/
+PROTO (cntw_3, uint64_t, ()) { return svcntsw () * 3; }
+
+/*
+** cntw_4:
+** cntb x0
+** ret
+*/
+PROTO (cntw_4, uint64_t, ()) { return svcntsw () * 4; }
+
+/*
+** cntw_8:
+** cntb x0, all, mul #2
+** ret
+*/
+PROTO (cntw_8, uint64_t, ()) { return svcntsw () * 8; }
+
+/*
+** cntw_15:
+** cntw x0, all, mul #15
+** ret
+*/
+PROTO (cntw_15, uint64_t, ()) { return svcntsw () * 15; }
+
+/*
+** cntw_16:
+** cntb x0, all, mul #4
+** ret
+*/
+PROTO (cntw_16, uint64_t, ()) { return svcntsw () * 16; }
+
+/* Other sequences would be OK. */
+/*
+** cntw_17:
+** rdvl (x[0-9]+), #17
+** asr x0, \1, 2
+** ret
+*/
+PROTO (cntw_17, uint64_t, ()) { return svcntsw () * 17; }
+
+/*
+** cntw_32:
+** cntb x0, all, mul #8
+** ret
+*/
+PROTO (cntw_32, uint64_t, ()) { return svcntsw () * 32; }
+
+/*
+** cntw_64:
+** cntb x0, all, mul #16
+** ret
+*/
+PROTO (cntw_64, uint64_t, ()) { return svcntsw () * 64; }
+
+/*
+** cntw_128:
+** cntb (x[0-9]+)
+** lsl x0, \1, 5
+** ret
+*/
+PROTO (cntw_128, uint64_t, ()) { return svcntsw () * 128; }
+
+/*
+** cntw_m1:
+** cntw (x[0-9]+)
+** neg x0, \1
+** ret
+*/
+PROTO (cntw_m1, uint64_t, ()) { return -svcntsw (); }
+
+/*
+** cntw_m13:
+** cntw (x[0-9]+), all, mul #13
+** neg x0, \1
+** ret
+*/
+PROTO (cntw_m13, uint64_t, ()) { return -svcntsw () * 13; }
+
+/*
+** cntw_m15:
+** cntw (x[0-9]+), all, mul #15
+** neg x0, \1
+** ret
+*/
+PROTO (cntw_m15, uint64_t, ()) { return -svcntsw () * 15; }
+
+/*
+** cntw_m16:
+** rdvl (x[0-9]+), #-4
+** ret
+*/
+PROTO (cntw_m16, uint64_t, ()) { return -svcntsw () * 16; }
+
+/* Other sequences would be OK. */
+/*
+** cntw_m17:
+** rdvl (x[0-9]+), #-17
+** asr x0, \1, 2
+** ret
+*/
+PROTO (cntw_m17, uint64_t, ()) { return -svcntsw () * 17; }
+
+/*
+** incw_1:
+** incw x0
+** ret
+*/
+PROTO (incw_1, uint64_t, (uint64_t x0)) { return x0 + svcntsw (); }
+
+/*
+** incw_2:
+** inch x0
+** ret
+*/
+PROTO (incw_2, uint64_t, (uint64_t x0)) { return x0 + svcntsw () * 2; }
+
+/*
+** incw_3:
+** incw x0, all, mul #3
+** ret
+*/
+PROTO (incw_3, uint64_t, (uint64_t x0)) { return x0 + svcntsw () * 3; }
+
+/*
+** incw_4:
+** incb x0
+** ret
+*/
+PROTO (incw_4, uint64_t, (uint64_t x0)) { return x0 + svcntsw () * 4; }
+
+/*
+** incw_7:
+** incw x0, all, mul #7
+** ret
+*/
+PROTO (incw_7, uint64_t, (uint64_t x0)) { return x0 + svcntsw () * 7; }
+
+/*
+** incw_8:
+** incb x0, all, mul #2
+** ret
+*/
+PROTO (incw_8, uint64_t, (uint64_t x0)) { return x0 + svcntsw () * 8; }
+
+/*
+** incw_9:
+** incw x0, all, mul #9
+** ret
+*/
+PROTO (incw_9, uint64_t, (uint64_t x0)) { return x0 + svcntsw () * 9; }
+
+/*
+** incw_15:
+** incw x0, all, mul #15
+** ret
+*/
+PROTO (incw_15, uint64_t, (uint64_t x0)) { return x0 + svcntsw () * 15; }
+
+/*
+** incw_16:
+** incb x0, all, mul #4
+** ret
+*/
+PROTO (incw_16, uint64_t, (uint64_t x0)) { return x0 + svcntsw () * 16; }
+
+/*
+** incw_18:
+** inch x0, all, mul #9
+** ret
+*/
+PROTO (incw_18, uint64_t, (uint64_t x0)) { return x0 + svcntsw () * 18; }
+
+/*
+** incw_30:
+** inch x0, all, mul #15
+** ret
+*/
+PROTO (incw_30, uint64_t, (uint64_t x0)) { return x0 + svcntsw () * 30; }
+
+/*
+** decw_1:
+** decw x0
+** ret
+*/
+PROTO (decw_1, uint64_t, (uint64_t x0)) { return x0 - svcntsw (); }
+
+/*
+** decw_2:
+** dech x0
+** ret
+*/
+PROTO (decw_2, uint64_t, (uint64_t x0)) { return x0 - svcntsw () * 2; }
+
+/*
+** decw_3:
+** decw x0, all, mul #3
+** ret
+*/
+PROTO (decw_3, uint64_t, (uint64_t x0)) { return x0 - svcntsw () * 3; }
+
+/*
+** decw_4:
+** decb x0
+** ret
+*/
+PROTO (decw_4, uint64_t, (uint64_t x0)) { return x0 - svcntsw () * 4; }
+
+/*
+** decw_7:
+** decw x0, all, mul #7
+** ret
+*/
+PROTO (decw_7, uint64_t, (uint64_t x0)) { return x0 - svcntsw () * 7; }
+
+/*
+** decw_8:
+** decb x0, all, mul #2
+** ret
+*/
+PROTO (decw_8, uint64_t, (uint64_t x0)) { return x0 - svcntsw () * 8; }
+
+/*
+** decw_9:
+** decw x0, all, mul #9
+** ret
+*/
+PROTO (decw_9, uint64_t, (uint64_t x0)) { return x0 - svcntsw () * 9; }
+
+/*
+** decw_15:
+** decw x0, all, mul #15
+** ret
+*/
+PROTO (decw_15, uint64_t, (uint64_t x0)) { return x0 - svcntsw () * 15; }
+
+/*
+** decw_16:
+** decb x0, all, mul #4
+** ret
+*/
+PROTO (decw_16, uint64_t, (uint64_t x0)) { return x0 - svcntsw () * 16; }
+
+/*
+** decw_18:
+** dech x0, all, mul #9
+** ret
+*/
+PROTO (decw_18, uint64_t, (uint64_t x0)) { return x0 - svcntsw () * 18; }
+
+/*
+** decw_30:
+** dech x0, all, mul #15
+** ret
+*/
+PROTO (decw_30, uint64_t, (uint64_t x0)) { return x0 - svcntsw () * 30; }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsw_sc.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsw_sc.c
new file mode 100644
index 0000000..75ca937
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/cntsw_sc.c
@@ -0,0 +1,13 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#define NO_SHARED_ZA
+#include "test_sme_acle.h"
+
+/*
+** cntsw:
+** rdsvl (x[0-9])+, #1
+** lsr x0, \1, #?2
+** ret
+*/
+PROTO (cntsw, uint64_t, ()) { return svcntsw (); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za128.c
new file mode 100644
index 0000000..fbbeb4f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za128.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_vnum_za128_0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1q { za0h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_0_0_0,
+ svld1_hor_vnum_za128 (0, 0, p0, x1, 0),
+ svld1_hor_vnum_za128 (0, 0, p0, x1, 0))
+
+/*
+** ld1_vnum_za128_7_1_0:
+** mov (w1[2-5]), #?1
+** ld1q { za7h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_7_1_0,
+ svld1_hor_vnum_za128 (7, 1, p0, x1, 0),
+ svld1_hor_vnum_za128 (7, 1, p0, x1, 0))
+
+/*
+** ld1_vnum_za128_11_1_5:
+** incb x1, all, mul #5
+** mov (w1[2-5]), #?6
+** ld1q { za11h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_11_1_5,
+ svld1_hor_vnum_za128 (11, 1, p0, x1, 5),
+ svld1_hor_vnum_za128 (11, 1, p0, x1, 5))
+
+/*
+** ld1_vnum_za128_3_w0_0:
+** mov (w1[2-5]), w0
+** ld1q { za3h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_3_w0_0,
+ svld1_hor_vnum_za128 (3, w0, p0, x1, 0),
+ svld1_hor_vnum_za128 (3, w0, p0, x1, 0))
+
+/*
+** ld1_vnum_za128_5_w0_0:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** ld1q { za5h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_5_w0_0,
+ svld1_hor_vnum_za128 (5, w0, p0, x1, 13),
+ svld1_hor_vnum_za128 (5, w0, p0, x1, 13))
+
+/*
+** ld1_vnum_za128_11_w0_0:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** ld1q { za11h\.q\[\3, 0\] }, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_11_w0_0,
+ svld1_hor_vnum_za128 (11, w0, p0, x1, x2),
+ svld1_hor_vnum_za128 (11, w0, p0, x1, x2))
+
+/*
+** ld1_vnum_za128_15_w0p1_0:
+** add (w1[2-5]), w0, #?1
+** ld1q { za15h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_15_w0p1_0,
+ svld1_hor_vnum_za128 (15, w0 + 1, p0, x1, 0),
+ svld1_hor_vnum_za128 (15, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za16.c
new file mode 100644
index 0000000..30e7a71
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za16.c
@@ -0,0 +1,123 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_vnum_za16_1_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1h { za1h\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_1_0_1,
+ svld1_hor_vnum_za16 (1, 0, p0, x1, 1),
+ svld1_hor_vnum_za16 (1, 0, p0, x1, 1))
+
+/*
+** ld1_vnum_za16_1_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** ld1h { za1h\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_1_1_1,
+ svld1_hor_vnum_za16 (1, 1, p0, x1, 1),
+ svld1_hor_vnum_za16 (1, 1, p0, x1, 1))
+
+/*
+** ld1_vnum_za16_0_0_8:
+** incb x1, all, mul #8
+** mov (w1[2-5]), #?8
+** ld1h { za0h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_0_8,
+ svld1_hor_vnum_za16 (0, 0, p0, x1, 8),
+ svld1_hor_vnum_za16 (0, 0, p0, x1, 8))
+
+/*
+** ld1_vnum_za16_0_1_8:
+** incb x1, all, mul #8
+** mov (w1[2-5]), #?9
+** ld1h { za0h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_1_8,
+ svld1_hor_vnum_za16 (0, 1, p0, x1, 8),
+ svld1_hor_vnum_za16 (0, 1, p0, x1, 8))
+
+/*
+** ld1_vnum_za16_0_w0_0:
+** mov (w1[2-5]), w0
+** ld1h { za0h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_w0_0,
+ svld1_hor_vnum_za16 (0, w0, p0, x1, 0),
+ svld1_hor_vnum_za16 (0, w0, p0, x1, 0))
+
+/*
+** ld1_vnum_za16_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** ld1h { za0h\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_w0_1,
+ svld1_hor_vnum_za16 (0, w0, p0, x1, 1),
+ svld1_hor_vnum_za16 (0, w0, p0, x1, 1))
+
+/*
+** ld1_vnum_za16_0_w0_7:
+** incb x1, all, mul #7
+** mov (w1[2-5]), w0
+** ld1h { za0h\.h\[\1, 7\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_w0_7,
+ svld1_hor_vnum_za16 (0, w0, p0, x1, 7),
+ svld1_hor_vnum_za16 (0, w0, p0, x1, 7))
+
+/*
+** ld1_vnum_za16_1_w0_8:
+** incb x1, all, mul #8
+** add (w1[2-5]), w0, #?8
+** ld1h { za1h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_1_w0_8,
+ svld1_hor_vnum_za16 (1, w0, p0, x1, 8),
+ svld1_hor_vnum_za16 (1, w0, p0, x1, 8))
+
+/*
+** ld1_vnum_za16_1_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** ld1h { za1h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_1_w0_13,
+ svld1_hor_vnum_za16 (1, w0, p0, x1, 13),
+ svld1_hor_vnum_za16 (1, w0, p0, x1, 13))
+
+/*
+** ld1_vnum_za16_0_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** ld1h { za0h\.h\[\3, 0\] }, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_w0_x2,
+ svld1_hor_vnum_za16 (0, w0, p0, x1, x2),
+ svld1_hor_vnum_za16 (0, w0, p0, x1, x2))
+
+/*
+** ld1_vnum_za16_1_w0p1_0:
+** mov (w1[2-5]), w0
+** ld1h { za1h\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_1_w0p1_0,
+ svld1_hor_vnum_za16 (1, w0 + 1, p0, x1, 0),
+ svld1_hor_vnum_za16 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za32.c
new file mode 100644
index 0000000..49ffaed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za32.c
@@ -0,0 +1,123 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_vnum_za32_3_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1w { za3h\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_3_0_1,
+ svld1_hor_vnum_za32 (3, 0, p0, x1, 1),
+ svld1_hor_vnum_za32 (3, 0, p0, x1, 1))
+
+/*
+** ld1_vnum_za32_2_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** ld1w { za2h\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_2_1_1,
+ svld1_hor_vnum_za32 (2, 1, p0, x1, 1),
+ svld1_hor_vnum_za32 (2, 1, p0, x1, 1))
+
+/*
+** ld1_vnum_za32_0_0_4:
+** incb x1, all, mul #4
+** mov (w1[2-5]), #?4
+** ld1w { za0h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_0_0_4,
+ svld1_hor_vnum_za32 (0, 0, p0, x1, 4),
+ svld1_hor_vnum_za32 (0, 0, p0, x1, 4))
+
+/*
+** ld1_vnum_za32_2_1_4:
+** incb x1, all, mul #4
+** mov (w1[2-5]), #?5
+** ld1w { za2h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_2_1_4,
+ svld1_hor_vnum_za32 (2, 1, p0, x1, 4),
+ svld1_hor_vnum_za32 (2, 1, p0, x1, 4))
+
+/*
+** ld1_vnum_za32_0_w0_0:
+** mov (w1[2-5]), w0
+** ld1w { za0h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_0_w0_0,
+ svld1_hor_vnum_za32 (0, w0, p0, x1, 0),
+ svld1_hor_vnum_za32 (0, w0, p0, x1, 0))
+
+/*
+** ld1_vnum_za32_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** ld1w { za0h\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_0_w0_1,
+ svld1_hor_vnum_za32 (0, w0, p0, x1, 1),
+ svld1_hor_vnum_za32 (0, w0, p0, x1, 1))
+
+/*
+** ld1_vnum_za32_0_w0_3:
+** incb x1, all, mul #3
+** mov (w1[2-5]), w0
+** ld1w { za0h\.s\[\1, 3\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_0_w0_3,
+ svld1_hor_vnum_za32 (0, w0, p0, x1, 3),
+ svld1_hor_vnum_za32 (0, w0, p0, x1, 3))
+
+/*
+** ld1_vnum_za32_1_w0_4:
+** incb x1, all, mul #4
+** add (w1[2-5]), w0, #?4
+** ld1w { za1h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_1_w0_4,
+ svld1_hor_vnum_za32 (1, w0, p0, x1, 4),
+ svld1_hor_vnum_za32 (1, w0, p0, x1, 4))
+
+/*
+** ld1_vnum_za32_3_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** ld1w { za3h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_3_w0_13,
+ svld1_hor_vnum_za32 (3, w0, p0, x1, 13),
+ svld1_hor_vnum_za32 (3, w0, p0, x1, 13))
+
+/*
+** ld1_vnum_za32_0_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** ld1w { za0h\.s\[\3, 0\] }, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_0_w0_x2,
+ svld1_hor_vnum_za32 (0, w0, p0, x1, x2),
+ svld1_hor_vnum_za32 (0, w0, p0, x1, x2))
+
+/*
+** ld1_vnum_za32_1_w0p1_0:
+** mov (w1[2-5]), w0
+** ld1w { za1h\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_1_w0p1_0,
+ svld1_hor_vnum_za32 (1, w0 + 1, p0, x1, 0),
+ svld1_hor_vnum_za32 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za64.c
new file mode 100644
index 0000000..df09b1c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za64.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_vnum_za64_3_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1d { za3h\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_3_0_1,
+ svld1_hor_vnum_za64 (3, 0, p0, x1, 1),
+ svld1_hor_vnum_za64 (3, 0, p0, x1, 1))
+
+/*
+** ld1_vnum_za64_7_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** ld1d { za7h\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_7_1_1,
+ svld1_hor_vnum_za64 (7, 1, p0, x1, 1),
+ svld1_hor_vnum_za64 (7, 1, p0, x1, 1))
+
+/*
+** ld1_vnum_za64_0_0_2:
+** incb x1, all, mul #2
+** mov (w1[2-5]), #?2
+** ld1d { za0h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_0_0_2,
+ svld1_hor_vnum_za64 (0, 0, p0, x1, 2),
+ svld1_hor_vnum_za64 (0, 0, p0, x1, 2))
+
+/*
+** ld1_vnum_za64_5_1_2:
+** incb x1, all, mul #2
+** mov (w1[2-5]), #?3
+** ld1d { za5h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_5_1_2,
+ svld1_hor_vnum_za64 (5, 1, p0, x1, 2),
+ svld1_hor_vnum_za64 (5, 1, p0, x1, 2))
+
+/*
+** ld1_vnum_za64_0_w0_0:
+** mov (w1[2-5]), w0
+** ld1d { za0h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_0_w0_0,
+ svld1_hor_vnum_za64 (0, w0, p0, x1, 0),
+ svld1_hor_vnum_za64 (0, w0, p0, x1, 0))
+
+/*
+** ld1_vnum_za64_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** ld1d { za0h\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_0_w0_1,
+ svld1_hor_vnum_za64 (0, w0, p0, x1, 1),
+ svld1_hor_vnum_za64 (0, w0, p0, x1, 1))
+
+/*
+** ld1_vnum_za64_6_w0_2:
+** incb x1, all, mul #2
+** add (w1[2-5]), w0, #?2
+** ld1d { za6h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_6_w0_2,
+ svld1_hor_vnum_za64 (6, w0, p0, x1, 2),
+ svld1_hor_vnum_za64 (6, w0, p0, x1, 2))
+
+/*
+** ld1_vnum_za64_2_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** ld1d { za2h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_2_w0_13,
+ svld1_hor_vnum_za64 (2, w0, p0, x1, 13),
+ svld1_hor_vnum_za64 (2, w0, p0, x1, 13))
+
+/*
+** ld1_vnum_za64_4_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** ld1d { za4h\.d\[\3, 0\] }, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_4_w0_x2,
+ svld1_hor_vnum_za64 (4, w0, p0, x1, x2),
+ svld1_hor_vnum_za64 (4, w0, p0, x1, x2))
+
+/*
+** ld1_vnum_za64_1_w0p1_0:
+** mov (w1[2-5]), w0
+** ld1d { za1h\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_1_w0p1_0,
+ svld1_hor_vnum_za64 (1, w0 + 1, p0, x1, 0),
+ svld1_hor_vnum_za64 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za8.c
new file mode 100644
index 0000000..c42931d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_vnum_za8.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_vnum_za8_0_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1b { za0h\.b\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_0_1,
+ svld1_hor_vnum_za8 (0, 0, p0, x1, 1),
+ svld1_hor_vnum_za8 (0, 0, p0, x1, 1))
+
+/*
+** ld1_vnum_za8_0_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** ld1b { za0h\.b\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_1_1,
+ svld1_hor_vnum_za8 (0, 1, p0, x1, 1),
+ svld1_hor_vnum_za8 (0, 1, p0, x1, 1))
+
+/*
+** ld1_vnum_za8_0_0_16:
+** incb x1, all, mul #16
+** mov (w1[2-5]), #?16
+** ld1b { za0h\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_0_16,
+ svld1_hor_vnum_za8 (0, 0, p0, x1, 16),
+ svld1_hor_vnum_za8 (0, 0, p0, x1, 16))
+
+/*
+** ld1_vnum_za8_0_1_16:
+** incb x1, all, mul #16
+** mov (w1[2-5]), #?17
+** ld1b { za0h\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_1_16,
+ svld1_hor_vnum_za8 (0, 1, p0, x1, 16),
+ svld1_hor_vnum_za8 (0, 1, p0, x1, 16))
+
+/*
+** ld1_vnum_za8_0_w0_0:
+** mov (w1[2-5]), w0
+** ld1b { za0h\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0_0,
+ svld1_hor_vnum_za8 (0, w0, p0, x1, 0),
+ svld1_hor_vnum_za8 (0, w0, p0, x1, 0))
+
+/*
+** ld1_vnum_za8_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** ld1b { za0h\.b\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0_1,
+ svld1_hor_vnum_za8 (0, w0, p0, x1, 1),
+ svld1_hor_vnum_za8 (0, w0, p0, x1, 1))
+
+/*
+** ld1_vnum_za8_0_w0_15:
+** incb x1, all, mul #15
+** mov (w1[2-5]), w0
+** ld1b { za0h\.b\[\1, 15\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0_15,
+ svld1_hor_vnum_za8 (0, w0, p0, x1, 15),
+ svld1_hor_vnum_za8 (0, w0, p0, x1, 15))
+
+/*
+** ld1_vnum_za8_0_w0_16:
+** incb x1, all, mul #16
+** add (w1[2-5]), w0, #?16
+** ld1b { za0h\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0_16,
+ svld1_hor_vnum_za8 (0, w0, p0, x1, 16),
+ svld1_hor_vnum_za8 (0, w0, p0, x1, 16))
+
+/*
+** ld1_vnum_za8_0_w0_x2:
+** cntb (x[0-9]+)
+** mul (x[0-9]+), (?:\1, x2|x2, \1)
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** ld1b { za0h\.b\[\3, 0\] }, p0/z, \[x1, \2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0_x2,
+ svld1_hor_vnum_za8 (0, w0, p0, x1, x2),
+ svld1_hor_vnum_za8 (0, w0, p0, x1, x2))
+
+/*
+** ld1_vnum_za8_0_w0p1_0:
+** mov (w1[2-5]), w0
+** ld1b { za0h\.b\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0p1_0,
+ svld1_hor_vnum_za8 (0, w0 + 1, p0, x1, 0),
+ svld1_hor_vnum_za8 (0, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za128.c
new file mode 100644
index 0000000..2c62922
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za128.c
@@ -0,0 +1,83 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_za128_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1q { za0h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_0_0,
+ svld1_hor_za128 (0, 0, p0, x1),
+ svld1_hor_za128 (0, 0, p0, x1))
+
+/*
+** ld1_za128_0_1:
+** mov (w1[2-5]), #?1
+** ld1q { za0h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_0_1,
+ svld1_hor_za128 (0, 1, p0, x1),
+ svld1_hor_za128 (0, 1, p0, x1))
+
+/*
+** ld1_za128_0_w0:
+** mov (w1[2-5]), w0
+** ld1q { za0h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_0_w0,
+ svld1_hor_za128 (0, w0, p0, x1),
+ svld1_hor_za128 (0, w0, p0, x1))
+
+/*
+** ld1_za128_0_w0_p1:
+** add (w1[2-5]), w0, #?1
+** ld1q { za0h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_0_w0_p1,
+ svld1_hor_za128 (0, w0 + 1, p0, x1),
+ svld1_hor_za128 (0, w0 + 1, p0, x1))
+
+/*
+** ld1_za128_7_w0:
+** mov (w1[2-5]), w0
+** ld1q { za7h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_7_w0,
+ svld1_hor_za128 (7, w0, p0, x1),
+ svld1_hor_za128 (7, w0, p0, x1))
+
+/*
+** ld1_za128_13_w0:
+** mov (w1[2-5]), w0
+** ld1q { za13h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_13_w0,
+ svld1_hor_za128 (13, w0, p0, x1),
+ svld1_hor_za128 (13, w0, p0, x1))
+
+/*
+** ld1_za128_15_w0:
+** mov (w1[2-5]), w0
+** ld1q { za15h\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_15_w0,
+ svld1_hor_za128 (15, w0, p0, x1),
+ svld1_hor_za128 (15, w0, p0, x1))
+
+/*
+** ld1_za128_9_w0_index:
+** mov (w1[2-5]), w0
+** ld1q { za9h\.q\[\1, 0\] }, p0/z, \[x1, x2, lsl #?4\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_9_w0_index,
+ svld1_hor_za128 (9, w0, p0, x1 + x2 * 16),
+ svld1_hor_za128 (9, w0, p0, x1 + x2 * 16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za16.c
new file mode 100644
index 0000000..3570bea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za16.c
@@ -0,0 +1,126 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_za16_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1h { za0h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_0,
+ svld1_hor_za16 (0, 0, p0, x1),
+ svld1_hor_za16 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 7. */
+/*
+** ld1_za16_0_7:
+** mov (w1[2-5]), #?7
+** ld1h { za0h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_7,
+ svld1_hor_za16 (0, 7, p0, x1),
+ svld1_hor_za16 (0, 7, p0, x1))
+
+/*
+** ld1_za16_0_8:
+** mov (w1[2-5]), #?8
+** ld1h { za0h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_8,
+ svld1_hor_za16 (0, 8, p0, x1),
+ svld1_hor_za16 (0, 8, p0, x1))
+
+/*
+** ld1_za16_0_w0:
+** mov (w1[2-5]), w0
+** ld1h { za0h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_w0,
+ svld1_hor_za16 (0, w0, p0, x1),
+ svld1_hor_za16 (0, w0, p0, x1))
+
+/*
+** ld1_za16_0_w0_p1:
+** mov (w1[2-5]), w0
+** ld1h { za0h\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_w0_p1,
+ svld1_hor_za16 (0, w0 + 1, p0, x1),
+ svld1_hor_za16 (0, w0 + 1, p0, x1))
+
+/*
+** ld1_za16_0_w0_p7:
+** mov (w1[2-5]), w0
+** ld1h { za0h\.h\[\1, 7\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_w0_p7,
+ svld1_hor_za16 (0, w0 + 7, p0, x1),
+ svld1_hor_za16 (0, w0 + 7, p0, x1))
+
+/*
+** ld1_za16_1_w0:
+** mov (w1[2-5]), w0
+** ld1h { za1h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_1_w0,
+ svld1_hor_za16 (1, w0, p0, x1),
+ svld1_hor_za16 (1, w0, p0, x1))
+
+
+/*
+** ld1_za16_1_w0_p1:
+** mov (w1[2-5]), w0
+** ld1h { za1h\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_1_w0_p1,
+ svld1_hor_za16 (1, w0 + 1, p0, x1),
+ svld1_hor_za16 (1, w0 + 1, p0, x1))
+
+/*
+** ld1_za16_1_w0_p7:
+** mov (w1[2-5]), w0
+** ld1h { za1h\.h\[\1, 7\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_1_w0_p7,
+ svld1_hor_za16 (1, w0 + 7, p0, x1),
+ svld1_hor_za16 (1, w0 + 7, p0, x1))
+
+/*
+** ld1_za16_1_w0_p5_index:
+** mov (w1[2-5]), w0
+** ld1h { za1h\.h\[\1, 5\] }, p0/z, \[x1, x2, lsl #?1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_1_w0_p5_index,
+ svld1_hor_za16 (1, w0 + 5, p0, x1 + x2 * 2),
+ svld1_hor_za16 (1, w0 + 5, p0, x1 + x2 * 2))
+
+/*
+** ld1_za16_0_w0_p8:
+** add (w1[2-5]), w0, #?8
+** ld1h { za0h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_w0_p8,
+ svld1_hor_za16 (0, w0 + 8, p0, x1),
+ svld1_hor_za16 (0, w0 + 8, p0, x1))
+
+/*
+** ld1_za16_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** ld1h { za0h\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_w0_m1,
+ svld1_hor_za16 (0, w0 - 1, p0, x1),
+ svld1_hor_za16 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za32.c
new file mode 100644
index 0000000..a8f6606b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za32.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_za32_0_0:
+** mov (w1[2-5]), (?:w0|#?0)
+** ld1w { za0h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_0,
+ svld1_hor_za32 (0, 0, p0, x1),
+ svld1_hor_za32 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 3. */
+/*
+** ld1_za32_0_3:
+** mov (w1[2-5]), #?3
+** ld1w { za0h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_3,
+ svld1_hor_za32 (0, 3, p0, x1),
+ svld1_hor_za32 (0, 3, p0, x1))
+
+/*
+** ld1_za32_0_4:
+** mov (w1[2-5]), #?4
+** ld1w { za0h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_4,
+ svld1_hor_za32 (0, 4, p0, x1),
+ svld1_hor_za32 (0, 4, p0, x1))
+
+/*
+** ld1_za32_0_w0:
+** mov (w1[2-5]), w0
+** ld1w { za0h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_w0,
+ svld1_hor_za32 (0, w0, p0, x1),
+ svld1_hor_za32 (0, w0, p0, x1))
+
+/*
+** ld1_za32_0_w0_p1:
+** mov (w1[2-5]), w0
+** ld1w { za0h\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_w0_p1,
+ svld1_hor_za32 (0, w0 + 1, p0, x1),
+ svld1_hor_za32 (0, w0 + 1, p0, x1))
+
+/*
+** ld1_za32_0_w0_p3:
+** mov (w1[2-5]), w0
+** ld1w { za0h\.s\[\1, 3\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_w0_p3,
+ svld1_hor_za32 (0, w0 + 3, p0, x1),
+ svld1_hor_za32 (0, w0 + 3, p0, x1))
+
+/*
+** ld1_za32_3_w0:
+** mov (w1[2-5]), w0
+** ld1w { za3h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_3_w0,
+ svld1_hor_za32 (3, w0, p0, x1),
+ svld1_hor_za32 (3, w0, p0, x1))
+
+/*
+** ld1_za32_3_w0_p1:
+** mov (w1[2-5]), w0
+** ld1w { za3h\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_3_w0_p1,
+ svld1_hor_za32 (3, w0 + 1, p0, x1),
+ svld1_hor_za32 (3, w0 + 1, p0, x1))
+
+/*
+** ld1_za32_3_w0_p3:
+** mov (w1[2-5]), w0
+** ld1w { za3h\.s\[\1, 3\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_3_w0_p3,
+ svld1_hor_za32 (3, w0 + 3, p0, x1),
+ svld1_hor_za32 (3, w0 + 3, p0, x1))
+
+/*
+** ld1_za32_1_w0_p2_index:
+** mov (w1[2-5]), w0
+** ld1w { za1h\.s\[\1, 2\] }, p0/z, \[x1, x2, lsl #?2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_1_w0_p2_index,
+ svld1_hor_za32 (1, w0 + 2, p0, x1 + x2 * 4),
+ svld1_hor_za32 (1, w0 + 2, p0, x1 + x2 * 4))
+
+/*
+** ld1_za32_0_w0_p4:
+** add (w1[2-5]), w0, #?4
+** ld1w { za0h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_w0_p4,
+ svld1_hor_za32 (0, w0 + 4, p0, x1),
+ svld1_hor_za32 (0, w0 + 4, p0, x1))
+
+/*
+** ld1_za32_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** ld1w { za0h\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_w0_m1,
+ svld1_hor_za32 (0, w0 - 1, p0, x1),
+ svld1_hor_za32 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za64.c
new file mode 100644
index 0000000..f4573eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za64.c
@@ -0,0 +1,105 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_za64_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1d { za0h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_0,
+ svld1_hor_za64 (0, 0, p0, x1),
+ svld1_hor_za64 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 1. */
+/*
+** ld1_za64_0_1:
+** mov (w1[2-5]), #?1
+** ld1d { za0h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_1,
+ svld1_hor_za64 (0, 1, p0, x1),
+ svld1_hor_za64 (0, 1, p0, x1))
+
+/*
+** ld1_za64_0_2:
+** mov (w1[2-5]), #?2
+** ld1d { za0h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_2,
+ svld1_hor_za64 (0, 2, p0, x1),
+ svld1_hor_za64 (0, 2, p0, x1))
+
+/*
+** ld1_za64_0_w0:
+** mov (w1[2-5]), w0
+** ld1d { za0h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_w0,
+ svld1_hor_za64 (0, w0, p0, x1),
+ svld1_hor_za64 (0, w0, p0, x1))
+
+/*
+** ld1_za64_0_w0_p1:
+** mov (w1[2-5]), w0
+** ld1d { za0h\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_w0_p1,
+ svld1_hor_za64 (0, w0 + 1, p0, x1),
+ svld1_hor_za64 (0, w0 + 1, p0, x1))
+
+/*
+** ld1_za64_7_w0:
+** mov (w1[2-5]), w0
+** ld1d { za7h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_7_w0,
+ svld1_hor_za64 (7, w0, p0, x1),
+ svld1_hor_za64 (7, w0, p0, x1))
+
+/*
+** ld1_za64_7_w0_p1:
+** mov (w1[2-5]), w0
+** ld1d { za7h\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_7_w0_p1,
+ svld1_hor_za64 (7, w0 + 1, p0, x1),
+ svld1_hor_za64 (7, w0 + 1, p0, x1))
+
+/*
+** ld1_za64_5_w0_p1_index:
+** mov (w1[2-5]), w0
+** ld1d { za5h\.d\[\1, 1\] }, p0/z, \[x1, x2, lsl #?3\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_5_w0_p1_index,
+ svld1_hor_za64 (5, w0 + 1, p0, x1 + x2 * 8),
+ svld1_hor_za64 (5, w0 + 1, p0, x1 + x2 * 8))
+
+/*
+** ld1_za64_0_w0_p2:
+** add (w1[2-5]), w0, #?2
+** ld1d { za0h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_w0_p2,
+ svld1_hor_za64 (0, w0 + 2, p0, x1),
+ svld1_hor_za64 (0, w0 + 2, p0, x1))
+
+/*
+** ld1_za64_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** ld1d { za0h\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_w0_m1,
+ svld1_hor_za64 (0, w0 - 1, p0, x1),
+ svld1_hor_za64 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za8.c
new file mode 100644
index 0000000..eef0927
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_hor_za8.c
@@ -0,0 +1,95 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_za8_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1b { za0h\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_0,
+ svld1_hor_za8 (0, 0, p0, x1),
+ svld1_hor_za8 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 15. */
+/*
+** ld1_za8_0_15:
+** mov (w1[2-5]), #?15
+** ld1b { za0h\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_15,
+ svld1_hor_za8 (0, 15, p0, x1),
+ svld1_hor_za8 (0, 15, p0, x1))
+
+/*
+** ld1_za8_0_16:
+** mov (w1[2-5]), #?16
+** ld1b { za0h\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_16,
+ svld1_hor_za8 (0, 16, p0, x1),
+ svld1_hor_za8 (0, 16, p0, x1))
+
+/*
+** ld1_za8_0_w0:
+** mov (w1[2-5]), w0
+** ld1b { za0h\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0,
+ svld1_hor_za8 (0, w0, p0, x1),
+ svld1_hor_za8 (0, w0, p0, x1))
+
+/*
+** ld1_za8_0_w0_p1:
+** mov (w1[2-5]), w0
+** ld1b { za0h\.b\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0_p1,
+ svld1_hor_za8 (0, w0 + 1, p0, x1),
+ svld1_hor_za8 (0, w0 + 1, p0, x1))
+
+/*
+** ld1_za8_0_w0_p15:
+** mov (w1[2-5]), w0
+** ld1b { za0h\.b\[\1, 15\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0_p15,
+ svld1_hor_za8 (0, w0 + 15, p0, x1),
+ svld1_hor_za8 (0, w0 + 15, p0, x1))
+
+/*
+** ld1_za8_0_w0_p13_index:
+** mov (w1[2-5]), w0
+** ld1b { za0h\.b\[\1, 15\] }, p0/z, \[x1, x2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0_p13_index,
+ svld1_hor_za8 (0, w0 + 15, p0, x1 + x2),
+ svld1_hor_za8 (0, w0 + 15, p0, x1 + x2))
+
+/*
+** ld1_za8_0_w0_p16:
+** add (w1[2-5]), w0, #?16
+** ld1b { za0h\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0_p16,
+ svld1_hor_za8 (0, w0 + 16, p0, x1),
+ svld1_hor_za8 (0, w0 + 16, p0, x1))
+
+/*
+** ld1_za8_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** ld1b { za0h\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0_m1,
+ svld1_hor_za8 (0, w0 - 1, p0, x1),
+ svld1_hor_za8 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za128.c
new file mode 100644
index 0000000..e90da4b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za128.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_vnum_za128_0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1q { za0v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_0_0_0,
+ svld1_ver_vnum_za128 (0, 0, p0, x1, 0),
+ svld1_ver_vnum_za128 (0, 0, p0, x1, 0))
+
+/*
+** ld1_vnum_za128_7_1_0:
+** mov (w1[2-5]), #?1
+** ld1q { za7v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_7_1_0,
+ svld1_ver_vnum_za128 (7, 1, p0, x1, 0),
+ svld1_ver_vnum_za128 (7, 1, p0, x1, 0))
+
+/*
+** ld1_vnum_za128_11_1_5:
+** incb x1, all, mul #5
+** mov (w1[2-5]), #?6
+** ld1q { za11v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_11_1_5,
+ svld1_ver_vnum_za128 (11, 1, p0, x1, 5),
+ svld1_ver_vnum_za128 (11, 1, p0, x1, 5))
+
+/*
+** ld1_vnum_za128_3_w0_0:
+** mov (w1[2-5]), w0
+** ld1q { za3v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_3_w0_0,
+ svld1_ver_vnum_za128 (3, w0, p0, x1, 0),
+ svld1_ver_vnum_za128 (3, w0, p0, x1, 0))
+
+/*
+** ld1_vnum_za128_5_w0_0:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** ld1q { za5v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_5_w0_0,
+ svld1_ver_vnum_za128 (5, w0, p0, x1, 13),
+ svld1_ver_vnum_za128 (5, w0, p0, x1, 13))
+
+/*
+** ld1_vnum_za128_11_w0_0:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** ld1q { za11v\.q\[\3, 0\] }, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_11_w0_0,
+ svld1_ver_vnum_za128 (11, w0, p0, x1, x2),
+ svld1_ver_vnum_za128 (11, w0, p0, x1, x2))
+
+/*
+** ld1_vnum_za128_15_w0p1_0:
+** add (w1[2-5]), w0, #?1
+** ld1q { za15v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za128_15_w0p1_0,
+ svld1_ver_vnum_za128 (15, w0 + 1, p0, x1, 0),
+ svld1_ver_vnum_za128 (15, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za16.c
new file mode 100644
index 0000000..7868cf4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za16.c
@@ -0,0 +1,123 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_vnum_za16_1_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1h { za1v\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_1_0_1,
+ svld1_ver_vnum_za16 (1, 0, p0, x1, 1),
+ svld1_ver_vnum_za16 (1, 0, p0, x1, 1))
+
+/*
+** ld1_vnum_za16_1_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** ld1h { za1v\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_1_1_1,
+ svld1_ver_vnum_za16 (1, 1, p0, x1, 1),
+ svld1_ver_vnum_za16 (1, 1, p0, x1, 1))
+
+/*
+** ld1_vnum_za16_0_0_8:
+** incb x1, all, mul #8
+** mov (w1[2-5]), #?8
+** ld1h { za0v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_0_8,
+ svld1_ver_vnum_za16 (0, 0, p0, x1, 8),
+ svld1_ver_vnum_za16 (0, 0, p0, x1, 8))
+
+/*
+** ld1_vnum_za16_0_1_8:
+** incb x1, all, mul #8
+** mov (w1[2-5]), #?9
+** ld1h { za0v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_1_8,
+ svld1_ver_vnum_za16 (0, 1, p0, x1, 8),
+ svld1_ver_vnum_za16 (0, 1, p0, x1, 8))
+
+/*
+** ld1_vnum_za16_0_w0_0:
+** mov (w1[2-5]), w0
+** ld1h { za0v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_w0_0,
+ svld1_ver_vnum_za16 (0, w0, p0, x1, 0),
+ svld1_ver_vnum_za16 (0, w0, p0, x1, 0))
+
+/*
+** ld1_vnum_za16_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** ld1h { za0v\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_w0_1,
+ svld1_ver_vnum_za16 (0, w0, p0, x1, 1),
+ svld1_ver_vnum_za16 (0, w0, p0, x1, 1))
+
+/*
+** ld1_vnum_za16_0_w0_7:
+** incb x1, all, mul #7
+** mov (w1[2-5]), w0
+** ld1h { za0v\.h\[\1, 7\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_w0_7,
+ svld1_ver_vnum_za16 (0, w0, p0, x1, 7),
+ svld1_ver_vnum_za16 (0, w0, p0, x1, 7))
+
+/*
+** ld1_vnum_za16_1_w0_8:
+** incb x1, all, mul #8
+** add (w1[2-5]), w0, #?8
+** ld1h { za1v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_1_w0_8,
+ svld1_ver_vnum_za16 (1, w0, p0, x1, 8),
+ svld1_ver_vnum_za16 (1, w0, p0, x1, 8))
+
+/*
+** ld1_vnum_za16_1_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** ld1h { za1v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_1_w0_13,
+ svld1_ver_vnum_za16 (1, w0, p0, x1, 13),
+ svld1_ver_vnum_za16 (1, w0, p0, x1, 13))
+
+/*
+** ld1_vnum_za16_0_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** ld1h { za0v\.h\[\3, 0\] }, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_0_w0_x2,
+ svld1_ver_vnum_za16 (0, w0, p0, x1, x2),
+ svld1_ver_vnum_za16 (0, w0, p0, x1, x2))
+
+/*
+** ld1_vnum_za16_1_w0p1_0:
+** mov (w1[2-5]), w0
+** ld1h { za1v\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za16_1_w0p1_0,
+ svld1_ver_vnum_za16 (1, w0 + 1, p0, x1, 0),
+ svld1_ver_vnum_za16 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za32.c
new file mode 100644
index 0000000..053b601
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za32.c
@@ -0,0 +1,123 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_vnum_za32_3_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1w { za3v\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_3_0_1,
+ svld1_ver_vnum_za32 (3, 0, p0, x1, 1),
+ svld1_ver_vnum_za32 (3, 0, p0, x1, 1))
+
+/*
+** ld1_vnum_za32_2_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** ld1w { za2v\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_2_1_1,
+ svld1_ver_vnum_za32 (2, 1, p0, x1, 1),
+ svld1_ver_vnum_za32 (2, 1, p0, x1, 1))
+
+/*
+** ld1_vnum_za32_0_0_4:
+** incb x1, all, mul #4
+** mov (w1[2-5]), #?4
+** ld1w { za0v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_0_0_4,
+ svld1_ver_vnum_za32 (0, 0, p0, x1, 4),
+ svld1_ver_vnum_za32 (0, 0, p0, x1, 4))
+
+/*
+** ld1_vnum_za32_2_1_4:
+** incb x1, all, mul #4
+** mov (w1[2-5]), #?5
+** ld1w { za2v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_2_1_4,
+ svld1_ver_vnum_za32 (2, 1, p0, x1, 4),
+ svld1_ver_vnum_za32 (2, 1, p0, x1, 4))
+
+/*
+** ld1_vnum_za32_0_w0_0:
+** mov (w1[2-5]), w0
+** ld1w { za0v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_0_w0_0,
+ svld1_ver_vnum_za32 (0, w0, p0, x1, 0),
+ svld1_ver_vnum_za32 (0, w0, p0, x1, 0))
+
+/*
+** ld1_vnum_za32_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** ld1w { za0v\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_0_w0_1,
+ svld1_ver_vnum_za32 (0, w0, p0, x1, 1),
+ svld1_ver_vnum_za32 (0, w0, p0, x1, 1))
+
+/*
+** ld1_vnum_za32_0_w0_3:
+** incb x1, all, mul #3
+** mov (w1[2-5]), w0
+** ld1w { za0v\.s\[\1, 3\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_0_w0_3,
+ svld1_ver_vnum_za32 (0, w0, p0, x1, 3),
+ svld1_ver_vnum_za32 (0, w0, p0, x1, 3))
+
+/*
+** ld1_vnum_za32_1_w0_4:
+** incb x1, all, mul #4
+** add (w1[2-5]), w0, #?4
+** ld1w { za1v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_1_w0_4,
+ svld1_ver_vnum_za32 (1, w0, p0, x1, 4),
+ svld1_ver_vnum_za32 (1, w0, p0, x1, 4))
+
+/*
+** ld1_vnum_za32_3_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** ld1w { za3v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_3_w0_13,
+ svld1_ver_vnum_za32 (3, w0, p0, x1, 13),
+ svld1_ver_vnum_za32 (3, w0, p0, x1, 13))
+
+/*
+** ld1_vnum_za32_0_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** ld1w { za0v\.s\[\3, 0\] }, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_0_w0_x2,
+ svld1_ver_vnum_za32 (0, w0, p0, x1, x2),
+ svld1_ver_vnum_za32 (0, w0, p0, x1, x2))
+
+/*
+** ld1_vnum_za32_1_w0p1_0:
+** mov (w1[2-5]), w0
+** ld1w { za1v\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za32_1_w0p1_0,
+ svld1_ver_vnum_za32 (1, w0 + 1, p0, x1, 0),
+ svld1_ver_vnum_za32 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za64.c
new file mode 100644
index 0000000..d047649
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za64.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_vnum_za64_3_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1d { za3v\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_3_0_1,
+ svld1_ver_vnum_za64 (3, 0, p0, x1, 1),
+ svld1_ver_vnum_za64 (3, 0, p0, x1, 1))
+
+/*
+** ld1_vnum_za64_7_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** ld1d { za7v\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_7_1_1,
+ svld1_ver_vnum_za64 (7, 1, p0, x1, 1),
+ svld1_ver_vnum_za64 (7, 1, p0, x1, 1))
+
+/*
+** ld1_vnum_za64_0_0_2:
+** incb x1, all, mul #2
+** mov (w1[2-5]), #?2
+** ld1d { za0v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_0_0_2,
+ svld1_ver_vnum_za64 (0, 0, p0, x1, 2),
+ svld1_ver_vnum_za64 (0, 0, p0, x1, 2))
+
+/*
+** ld1_vnum_za64_5_1_2:
+** incb x1, all, mul #2
+** mov (w1[2-5]), #?3
+** ld1d { za5v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_5_1_2,
+ svld1_ver_vnum_za64 (5, 1, p0, x1, 2),
+ svld1_ver_vnum_za64 (5, 1, p0, x1, 2))
+
+/*
+** ld1_vnum_za64_0_w0_0:
+** mov (w1[2-5]), w0
+** ld1d { za0v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_0_w0_0,
+ svld1_ver_vnum_za64 (0, w0, p0, x1, 0),
+ svld1_ver_vnum_za64 (0, w0, p0, x1, 0))
+
+/*
+** ld1_vnum_za64_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** ld1d { za0v\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_0_w0_1,
+ svld1_ver_vnum_za64 (0, w0, p0, x1, 1),
+ svld1_ver_vnum_za64 (0, w0, p0, x1, 1))
+
+/*
+** ld1_vnum_za64_6_w0_2:
+** incb x1, all, mul #2
+** add (w1[2-5]), w0, #?2
+** ld1d { za6v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_6_w0_2,
+ svld1_ver_vnum_za64 (6, w0, p0, x1, 2),
+ svld1_ver_vnum_za64 (6, w0, p0, x1, 2))
+
+/*
+** ld1_vnum_za64_2_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** ld1d { za2v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_2_w0_13,
+ svld1_ver_vnum_za64 (2, w0, p0, x1, 13),
+ svld1_ver_vnum_za64 (2, w0, p0, x1, 13))
+
+/*
+** ld1_vnum_za64_4_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** ld1d { za4v\.d\[\3, 0\] }, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_4_w0_x2,
+ svld1_ver_vnum_za64 (4, w0, p0, x1, x2),
+ svld1_ver_vnum_za64 (4, w0, p0, x1, x2))
+
+/*
+** ld1_vnum_za64_1_w0p1_0:
+** mov (w1[2-5]), w0
+** ld1d { za1v\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za64_1_w0p1_0,
+ svld1_ver_vnum_za64 (1, w0 + 1, p0, x1, 0),
+ svld1_ver_vnum_za64 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za8.c
new file mode 100644
index 0000000..e99d95e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_vnum_za8.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_vnum_za8_0_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1b { za0v\.b\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_0_1,
+ svld1_ver_vnum_za8 (0, 0, p0, x1, 1),
+ svld1_ver_vnum_za8 (0, 0, p0, x1, 1))
+
+/*
+** ld1_vnum_za8_0_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** ld1b { za0v\.b\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_1_1,
+ svld1_ver_vnum_za8 (0, 1, p0, x1, 1),
+ svld1_ver_vnum_za8 (0, 1, p0, x1, 1))
+
+/*
+** ld1_vnum_za8_0_0_16:
+** incb x1, all, mul #16
+** mov (w1[2-5]), #?16
+** ld1b { za0v\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_0_16,
+ svld1_ver_vnum_za8 (0, 0, p0, x1, 16),
+ svld1_ver_vnum_za8 (0, 0, p0, x1, 16))
+
+/*
+** ld1_vnum_za8_0_1_16:
+** incb x1, all, mul #16
+** mov (w1[2-5]), #?17
+** ld1b { za0v\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_1_16,
+ svld1_ver_vnum_za8 (0, 1, p0, x1, 16),
+ svld1_ver_vnum_za8 (0, 1, p0, x1, 16))
+
+/*
+** ld1_vnum_za8_0_w0_0:
+** mov (w1[2-5]), w0
+** ld1b { za0v\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0_0,
+ svld1_ver_vnum_za8 (0, w0, p0, x1, 0),
+ svld1_ver_vnum_za8 (0, w0, p0, x1, 0))
+
+/*
+** ld1_vnum_za8_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** ld1b { za0v\.b\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0_1,
+ svld1_ver_vnum_za8 (0, w0, p0, x1, 1),
+ svld1_ver_vnum_za8 (0, w0, p0, x1, 1))
+
+/*
+** ld1_vnum_za8_0_w0_15:
+** incb x1, all, mul #15
+** mov (w1[2-5]), w0
+** ld1b { za0v\.b\[\1, 15\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0_15,
+ svld1_ver_vnum_za8 (0, w0, p0, x1, 15),
+ svld1_ver_vnum_za8 (0, w0, p0, x1, 15))
+
+/*
+** ld1_vnum_za8_0_w0_16:
+** incb x1, all, mul #16
+** add (w1[2-5]), w0, #?16
+** ld1b { za0v\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0_16,
+ svld1_ver_vnum_za8 (0, w0, p0, x1, 16),
+ svld1_ver_vnum_za8 (0, w0, p0, x1, 16))
+
+/*
+** ld1_vnum_za8_0_w0_x2:
+** cntb (x[0-9]+)
+** mul (x[0-9]+), (?:\1, x2|x2, \1)
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** ld1b { za0v\.b\[\3, 0\] }, p0/z, \[x1, \2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0_x2,
+ svld1_ver_vnum_za8 (0, w0, p0, x1, x2),
+ svld1_ver_vnum_za8 (0, w0, p0, x1, x2))
+
+/*
+** ld1_vnum_za8_0_w0p1_0:
+** mov (w1[2-5]), w0
+** ld1b { za0v\.b\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_vnum_za8_0_w0p1_0,
+ svld1_ver_vnum_za8 (0, w0 + 1, p0, x1, 0),
+ svld1_ver_vnum_za8 (0, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za128.c
new file mode 100644
index 0000000..e81f402
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za128.c
@@ -0,0 +1,83 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_za128_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1q { za0v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_0_0,
+ svld1_ver_za128 (0, 0, p0, x1),
+ svld1_ver_za128 (0, 0, p0, x1))
+
+/*
+** ld1_za128_0_1:
+** mov (w1[2-5]), #?1
+** ld1q { za0v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_0_1,
+ svld1_ver_za128 (0, 1, p0, x1),
+ svld1_ver_za128 (0, 1, p0, x1))
+
+/*
+** ld1_za128_0_w0:
+** mov (w1[2-5]), w0
+** ld1q { za0v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_0_w0,
+ svld1_ver_za128 (0, w0, p0, x1),
+ svld1_ver_za128 (0, w0, p0, x1))
+
+/*
+** ld1_za128_0_w0_p1:
+** add (w1[2-5]), w0, #?1
+** ld1q { za0v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_0_w0_p1,
+ svld1_ver_za128 (0, w0 + 1, p0, x1),
+ svld1_ver_za128 (0, w0 + 1, p0, x1))
+
+/*
+** ld1_za128_7_w0:
+** mov (w1[2-5]), w0
+** ld1q { za7v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_7_w0,
+ svld1_ver_za128 (7, w0, p0, x1),
+ svld1_ver_za128 (7, w0, p0, x1))
+
+/*
+** ld1_za128_13_w0:
+** mov (w1[2-5]), w0
+** ld1q { za13v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_13_w0,
+ svld1_ver_za128 (13, w0, p0, x1),
+ svld1_ver_za128 (13, w0, p0, x1))
+
+/*
+** ld1_za128_15_w0:
+** mov (w1[2-5]), w0
+** ld1q { za15v\.q\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_15_w0,
+ svld1_ver_za128 (15, w0, p0, x1),
+ svld1_ver_za128 (15, w0, p0, x1))
+
+/*
+** ld1_za128_9_w0_index:
+** mov (w1[2-5]), w0
+** ld1q { za9v\.q\[\1, 0\] }, p0/z, \[x1, x2, lsl #?4\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za128_9_w0_index,
+ svld1_ver_za128 (9, w0, p0, x1 + x2 * 16),
+ svld1_ver_za128 (9, w0, p0, x1 + x2 * 16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za16.c
new file mode 100644
index 0000000..0938b1e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za16.c
@@ -0,0 +1,126 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_za16_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1h { za0v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_0,
+ svld1_ver_za16 (0, 0, p0, x1),
+ svld1_ver_za16 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 7. */
+/*
+** ld1_za16_0_7:
+** mov (w1[2-5]), #?7
+** ld1h { za0v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_7,
+ svld1_ver_za16 (0, 7, p0, x1),
+ svld1_ver_za16 (0, 7, p0, x1))
+
+/*
+** ld1_za16_0_8:
+** mov (w1[2-5]), #?8
+** ld1h { za0v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_8,
+ svld1_ver_za16 (0, 8, p0, x1),
+ svld1_ver_za16 (0, 8, p0, x1))
+
+/*
+** ld1_za16_0_w0:
+** mov (w1[2-5]), w0
+** ld1h { za0v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_w0,
+ svld1_ver_za16 (0, w0, p0, x1),
+ svld1_ver_za16 (0, w0, p0, x1))
+
+/*
+** ld1_za16_0_w0_p1:
+** mov (w1[2-5]), w0
+** ld1h { za0v\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_w0_p1,
+ svld1_ver_za16 (0, w0 + 1, p0, x1),
+ svld1_ver_za16 (0, w0 + 1, p0, x1))
+
+/*
+** ld1_za16_0_w0_p7:
+** mov (w1[2-5]), w0
+** ld1h { za0v\.h\[\1, 7\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_w0_p7,
+ svld1_ver_za16 (0, w0 + 7, p0, x1),
+ svld1_ver_za16 (0, w0 + 7, p0, x1))
+
+/*
+** ld1_za16_1_w0:
+** mov (w1[2-5]), w0
+** ld1h { za1v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_1_w0,
+ svld1_ver_za16 (1, w0, p0, x1),
+ svld1_ver_za16 (1, w0, p0, x1))
+
+
+/*
+** ld1_za16_1_w0_p1:
+** mov (w1[2-5]), w0
+** ld1h { za1v\.h\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_1_w0_p1,
+ svld1_ver_za16 (1, w0 + 1, p0, x1),
+ svld1_ver_za16 (1, w0 + 1, p0, x1))
+
+/*
+** ld1_za16_1_w0_p7:
+** mov (w1[2-5]), w0
+** ld1h { za1v\.h\[\1, 7\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_1_w0_p7,
+ svld1_ver_za16 (1, w0 + 7, p0, x1),
+ svld1_ver_za16 (1, w0 + 7, p0, x1))
+
+/*
+** ld1_za16_1_w0_p5_index:
+** mov (w1[2-5]), w0
+** ld1h { za1v\.h\[\1, 5\] }, p0/z, \[x1, x2, lsl #?1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_1_w0_p5_index,
+ svld1_ver_za16 (1, w0 + 5, p0, x1 + x2 * 2),
+ svld1_ver_za16 (1, w0 + 5, p0, x1 + x2 * 2))
+
+/*
+** ld1_za16_0_w0_p8:
+** add (w1[2-5]), w0, #?8
+** ld1h { za0v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_w0_p8,
+ svld1_ver_za16 (0, w0 + 8, p0, x1),
+ svld1_ver_za16 (0, w0 + 8, p0, x1))
+
+/*
+** ld1_za16_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** ld1h { za0v\.h\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za16_0_w0_m1,
+ svld1_ver_za16 (0, w0 - 1, p0, x1),
+ svld1_ver_za16 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za32.c
new file mode 100644
index 0000000..bb9d931
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za32.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_za32_0_0:
+** mov (w1[2-5]), (?:w0|#?0)
+** ld1w { za0v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_0,
+ svld1_ver_za32 (0, 0, p0, x1),
+ svld1_ver_za32 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 3. */
+/*
+** ld1_za32_0_3:
+** mov (w1[2-5]), #?3
+** ld1w { za0v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_3,
+ svld1_ver_za32 (0, 3, p0, x1),
+ svld1_ver_za32 (0, 3, p0, x1))
+
+/*
+** ld1_za32_0_4:
+** mov (w1[2-5]), #?4
+** ld1w { za0v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_4,
+ svld1_ver_za32 (0, 4, p0, x1),
+ svld1_ver_za32 (0, 4, p0, x1))
+
+/*
+** ld1_za32_0_w0:
+** mov (w1[2-5]), w0
+** ld1w { za0v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_w0,
+ svld1_ver_za32 (0, w0, p0, x1),
+ svld1_ver_za32 (0, w0, p0, x1))
+
+/*
+** ld1_za32_0_w0_p1:
+** mov (w1[2-5]), w0
+** ld1w { za0v\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_w0_p1,
+ svld1_ver_za32 (0, w0 + 1, p0, x1),
+ svld1_ver_za32 (0, w0 + 1, p0, x1))
+
+/*
+** ld1_za32_0_w0_p3:
+** mov (w1[2-5]), w0
+** ld1w { za0v\.s\[\1, 3\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_w0_p3,
+ svld1_ver_za32 (0, w0 + 3, p0, x1),
+ svld1_ver_za32 (0, w0 + 3, p0, x1))
+
+/*
+** ld1_za32_3_w0:
+** mov (w1[2-5]), w0
+** ld1w { za3v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_3_w0,
+ svld1_ver_za32 (3, w0, p0, x1),
+ svld1_ver_za32 (3, w0, p0, x1))
+
+/*
+** ld1_za32_3_w0_p1:
+** mov (w1[2-5]), w0
+** ld1w { za3v\.s\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_3_w0_p1,
+ svld1_ver_za32 (3, w0 + 1, p0, x1),
+ svld1_ver_za32 (3, w0 + 1, p0, x1))
+
+/*
+** ld1_za32_3_w0_p3:
+** mov (w1[2-5]), w0
+** ld1w { za3v\.s\[\1, 3\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_3_w0_p3,
+ svld1_ver_za32 (3, w0 + 3, p0, x1),
+ svld1_ver_za32 (3, w0 + 3, p0, x1))
+
+/*
+** ld1_za32_1_w0_p2_index:
+** mov (w1[2-5]), w0
+** ld1w { za1v\.s\[\1, 2\] }, p0/z, \[x1, x2, lsl #?2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_1_w0_p2_index,
+ svld1_ver_za32 (1, w0 + 2, p0, x1 + x2 * 4),
+ svld1_ver_za32 (1, w0 + 2, p0, x1 + x2 * 4))
+
+/*
+** ld1_za32_0_w0_p4:
+** add (w1[2-5]), w0, #?4
+** ld1w { za0v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_w0_p4,
+ svld1_ver_za32 (0, w0 + 4, p0, x1),
+ svld1_ver_za32 (0, w0 + 4, p0, x1))
+
+/*
+** ld1_za32_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** ld1w { za0v\.s\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za32_0_w0_m1,
+ svld1_ver_za32 (0, w0 - 1, p0, x1),
+ svld1_ver_za32 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za64.c
new file mode 100644
index 0000000..58d73ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za64.c
@@ -0,0 +1,105 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_za64_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1d { za0v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_0,
+ svld1_ver_za64 (0, 0, p0, x1),
+ svld1_ver_za64 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 1. */
+/*
+** ld1_za64_0_1:
+** mov (w1[2-5]), #?1
+** ld1d { za0v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_1,
+ svld1_ver_za64 (0, 1, p0, x1),
+ svld1_ver_za64 (0, 1, p0, x1))
+
+/*
+** ld1_za64_0_2:
+** mov (w1[2-5]), #?2
+** ld1d { za0v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_2,
+ svld1_ver_za64 (0, 2, p0, x1),
+ svld1_ver_za64 (0, 2, p0, x1))
+
+/*
+** ld1_za64_0_w0:
+** mov (w1[2-5]), w0
+** ld1d { za0v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_w0,
+ svld1_ver_za64 (0, w0, p0, x1),
+ svld1_ver_za64 (0, w0, p0, x1))
+
+/*
+** ld1_za64_0_w0_p1:
+** mov (w1[2-5]), w0
+** ld1d { za0v\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_w0_p1,
+ svld1_ver_za64 (0, w0 + 1, p0, x1),
+ svld1_ver_za64 (0, w0 + 1, p0, x1))
+
+/*
+** ld1_za64_7_w0:
+** mov (w1[2-5]), w0
+** ld1d { za7v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_7_w0,
+ svld1_ver_za64 (7, w0, p0, x1),
+ svld1_ver_za64 (7, w0, p0, x1))
+
+/*
+** ld1_za64_7_w0_p1:
+** mov (w1[2-5]), w0
+** ld1d { za7v\.d\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_7_w0_p1,
+ svld1_ver_za64 (7, w0 + 1, p0, x1),
+ svld1_ver_za64 (7, w0 + 1, p0, x1))
+
+/*
+** ld1_za64_5_w0_p1_index:
+** mov (w1[2-5]), w0
+** ld1d { za5v\.d\[\1, 1\] }, p0/z, \[x1, x2, lsl #?3\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_5_w0_p1_index,
+ svld1_ver_za64 (5, w0 + 1, p0, x1 + x2 * 8),
+ svld1_ver_za64 (5, w0 + 1, p0, x1 + x2 * 8))
+
+/*
+** ld1_za64_0_w0_p2:
+** add (w1[2-5]), w0, #?2
+** ld1d { za0v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_w0_p2,
+ svld1_ver_za64 (0, w0 + 2, p0, x1),
+ svld1_ver_za64 (0, w0 + 2, p0, x1))
+
+/*
+** ld1_za64_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** ld1d { za0v\.d\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za64_0_w0_m1,
+ svld1_ver_za64 (0, w0 - 1, p0, x1),
+ svld1_ver_za64 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za8.c
new file mode 100644
index 0000000..38211b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ld1_ver_za8.c
@@ -0,0 +1,95 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ld1_za8_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ld1b { za0v\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_0,
+ svld1_ver_za8 (0, 0, p0, x1),
+ svld1_ver_za8 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 15. */
+/*
+** ld1_za8_0_15:
+** mov (w1[2-5]), #?15
+** ld1b { za0v\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_15,
+ svld1_ver_za8 (0, 15, p0, x1),
+ svld1_ver_za8 (0, 15, p0, x1))
+
+/*
+** ld1_za8_0_16:
+** mov (w1[2-5]), #?16
+** ld1b { za0v\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_16,
+ svld1_ver_za8 (0, 16, p0, x1),
+ svld1_ver_za8 (0, 16, p0, x1))
+
+/*
+** ld1_za8_0_w0:
+** mov (w1[2-5]), w0
+** ld1b { za0v\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0,
+ svld1_ver_za8 (0, w0, p0, x1),
+ svld1_ver_za8 (0, w0, p0, x1))
+
+/*
+** ld1_za8_0_w0_p1:
+** mov (w1[2-5]), w0
+** ld1b { za0v\.b\[\1, 1\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0_p1,
+ svld1_ver_za8 (0, w0 + 1, p0, x1),
+ svld1_ver_za8 (0, w0 + 1, p0, x1))
+
+/*
+** ld1_za8_0_w0_p15:
+** mov (w1[2-5]), w0
+** ld1b { za0v\.b\[\1, 15\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0_p15,
+ svld1_ver_za8 (0, w0 + 15, p0, x1),
+ svld1_ver_za8 (0, w0 + 15, p0, x1))
+
+/*
+** ld1_za8_0_w0_p13_index:
+** mov (w1[2-5]), w0
+** ld1b { za0v\.b\[\1, 15\] }, p0/z, \[x1, x2\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0_p13_index,
+ svld1_ver_za8 (0, w0 + 15, p0, x1 + x2),
+ svld1_ver_za8 (0, w0 + 15, p0, x1 + x2))
+
+/*
+** ld1_za8_0_w0_p16:
+** add (w1[2-5]), w0, #?16
+** ld1b { za0v\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0_p16,
+ svld1_ver_za8 (0, w0 + 16, p0, x1),
+ svld1_ver_za8 (0, w0 + 16, p0, x1))
+
+/*
+** ld1_za8_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** ld1b { za0v\.b\[\1, 0\] }, p0/z, \[x1\]
+** ret
+*/
+TEST_LOAD_ZA (ld1_za8_0_w0_m1,
+ svld1_ver_za8 (0, w0 - 1, p0, x1),
+ svld1_ver_za8 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_vnum_za_s.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_vnum_za_s.c
new file mode 100644
index 0000000..90495d0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_vnum_za_s.c
@@ -0,0 +1,147 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ldr_vnum_za_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_0_0,
+ svldr_vnum_za (0, x1, 0),
+ svldr_vnum_za (0, x1, 0))
+
+/*
+** ldr_vnum_za_0_1:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ldr za\[\1, 1\], \[x1(?:, #1, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_0_1,
+ svldr_vnum_za (0, x1, 1),
+ svldr_vnum_za (0, x1, 1))
+
+/*
+** ldr_vnum_za_1_0:
+** mov (w1[2-5]), #?1
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_1_0,
+ svldr_vnum_za (1, x1, 0),
+ svldr_vnum_za (1, x1, 0))
+
+/*
+** ldr_vnum_za_1_2:
+** mov (w1[2-5]), #?1
+** ldr za\[\1, 2\], \[x1(?:, #2, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_1_2,
+ svldr_vnum_za (1, x1, 2),
+ svldr_vnum_za (1, x1, 2))
+
+/*
+** ldr_vnum_za_w0_0:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_0,
+ svldr_vnum_za (w0, x1, 0),
+ svldr_vnum_za (w0, x1, 0))
+
+/*
+** ldr_vnum_za_w0_1:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 1\], \[x1, #1, mul vl\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_1,
+ svldr_vnum_za (w0, x1, 1),
+ svldr_vnum_za (w0, x1, 1))
+
+/*
+** ldr_vnum_za_w0_13:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 13\], \[x1, #13, mul vl\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_13,
+ svldr_vnum_za (w0, x1, 13),
+ svldr_vnum_za (w0, x1, 13))
+
+/*
+** ldr_vnum_za_w0_15:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 15\], \[x1, #15, mul vl\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_15,
+ svldr_vnum_za (w0, x1, 15),
+ svldr_vnum_za (w0, x1, 15))
+
+/*
+** ldr_vnum_za_w0_16:
+** (
+** add (w1[2-5]), w0, #?16
+** incb x1, all, mul #16
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** |
+** incb x1, all, mul #16
+** add (w1[2-5]), w0, #?16
+** ldr za\[\2, 0\], \[x1(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_16,
+ svldr_vnum_za (w0, x1, 16),
+ svldr_vnum_za (w0, x1, 16))
+
+/*
+** ldr_vnum_za_w0_m1:
+** (
+** sub (w1[2-5]), w0, #?1
+** decb x1
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** |
+** decb x1
+** sub (w1[2-5]), w0, #?1
+** ldr za\[\2, 0\], \[x1(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_m1,
+ svldr_vnum_za (w0, x1, -1),
+ svldr_vnum_za (w0, x1, -1))
+
+/*
+** ldr_vnum_za_w0p1_0:
+** add (w1[2-5]), w0, #?1
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0p1_0,
+ svldr_vnum_za (w0 + 1, x1, 0),
+ svldr_vnum_za (w0 + 1, x1, 0))
+
+/*
+** ldr_vnum_za_w0m1_1:
+** sub (w1[2-5]), w0, #?1
+** ldr za\[\1, 1\], \[x1(?:, #1, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0m1_1,
+ svldr_vnum_za (w0 - 1, x1, 1),
+ svldr_vnum_za (w0 - 1, x1, 1))
+
+/*
+** ldr_vnum_za_w0p2_3:
+** add (w1[2-5]), w0, #?2
+** ldr za\[\1, 3\], \[x1(?:, #3, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0p2_3,
+ svldr_vnum_za (w0 + 2, x1, 3),
+ svldr_vnum_za (w0 + 2, x1, 3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_vnum_za_sc.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_vnum_za_sc.c
new file mode 100644
index 0000000..dfc2d13
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_vnum_za_sc.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#include "test_sme_acle.h"
+
+/*
+** ldr_vnum_za_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_0_0,
+ svldr_vnum_za (0, x1, 0),
+ svldr_vnum_za (0, x1, 0))
+
+/*
+** ldr_vnum_za_0_1:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ldr za\[\1, 1\], \[x1(?:, #1, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_0_1,
+ svldr_vnum_za (0, x1, 1),
+ svldr_vnum_za (0, x1, 1))
+
+/*
+** ldr_vnum_za_1_0:
+** mov (w1[2-5]), #?1
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_1_0,
+ svldr_vnum_za (1, x1, 0),
+ svldr_vnum_za (1, x1, 0))
+
+/*
+** ldr_vnum_za_1_2:
+** mov (w1[2-5]), #?1
+** ldr za\[\1, 2\], \[x1(?:, #2, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_1_2,
+ svldr_vnum_za (1, x1, 2),
+ svldr_vnum_za (1, x1, 2))
+
+/*
+** ldr_vnum_za_w0_0:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_0,
+ svldr_vnum_za (w0, x1, 0),
+ svldr_vnum_za (w0, x1, 0))
+
+/*
+** ldr_vnum_za_w0_1:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 1\], \[x1, #1, mul vl\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_1,
+ svldr_vnum_za (w0, x1, 1),
+ svldr_vnum_za (w0, x1, 1))
+
+/*
+** ldr_vnum_za_w0_13:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 13\], \[x1, #13, mul vl\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_13,
+ svldr_vnum_za (w0, x1, 13),
+ svldr_vnum_za (w0, x1, 13))
+
+/*
+** ldr_vnum_za_w0_15:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 15\], \[x1, #15, mul vl\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_15,
+ svldr_vnum_za (w0, x1, 15),
+ svldr_vnum_za (w0, x1, 15))
+
+/*
+** ldr_vnum_za_w0_16:
+** (
+** add (w1[2-5]), w0, #?16
+** addsvl (x[0-9]+), x1, #16
+** ldr za\[\1, 0\], \[\2(?:, #0, mul vl)?\]
+** |
+** addsvl (x[0-9]+), x1, #16
+** add (w1[2-5]), w0, #?16
+** ldr za\[\4, 0\], \[\3(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_16,
+ svldr_vnum_za (w0, x1, 16),
+ svldr_vnum_za (w0, x1, 16))
+
+/*
+** ldr_vnum_za_w0_m1:
+** (
+** sub (w1[2-5]), w0, #?1
+** addsvl (x[0-9]+), x1, #-1
+** ldr za\[\1, 0\], \[\2(?:, #0, mul vl)?\]
+** |
+** addsvl (x[0-9]+), x1, #-1
+** sub (w1[2-5]), w0, #?1
+** ldr za\[\4, 0\], \[\3(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0_m1,
+ svldr_vnum_za (w0, x1, -1),
+ svldr_vnum_za (w0, x1, -1))
+
+/*
+** ldr_vnum_za_w0p1_0:
+** add (w1[2-5]), w0, #?1
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0p1_0,
+ svldr_vnum_za (w0 + 1, x1, 0),
+ svldr_vnum_za (w0 + 1, x1, 0))
+
+/*
+** ldr_vnum_za_w0m1_1:
+** sub (w1[2-5]), w0, #?1
+** ldr za\[\1, 1\], \[x1(?:, #1, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0m1_1,
+ svldr_vnum_za (w0 - 1, x1, 1),
+ svldr_vnum_za (w0 - 1, x1, 1))
+
+/*
+** ldr_vnum_za_w0p2_3:
+** add (w1[2-5]), w0, #?2
+** ldr za\[\1, 3\], \[x1(?:, #3, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_vnum_za_w0p2_3,
+ svldr_vnum_za (w0 + 2, x1, 3),
+ svldr_vnum_za (w0 + 2, x1, 3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_za_s.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_za_s.c
new file mode 100644
index 0000000..313b323
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_za_s.c
@@ -0,0 +1,124 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** ldr_za_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_0,
+ svldr_za (0, x1),
+ svldr_za (0, x1))
+
+/*
+** ldr_za_1:
+** mov (w1[2-5]), #?1
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_1,
+ svldr_za (1, x1),
+ svldr_za (1, x1))
+
+/*
+** ldr_za_w0:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_w0,
+ svldr_za (w0, x1),
+ svldr_za (w0, x1))
+
+/*
+** ldr_za_w0_1_vnum:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 1\], \[x1, #1, mul vl\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_w0_1_vnum,
+ svldr_za (w0 + 1, x1 + svcntsb ()),
+ svldr_za (w0 + 1, x1 + svcntsb ()))
+
+/*
+** ldr_za_w0_13_vnum:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 13\], \[x1, #13, mul vl\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_w0_13_vnum,
+ svldr_za (w0 + 13, x1 + svcntsb () * 13),
+ svldr_za (w0 + 13, x1 + svcntsb () * 13))
+
+/*
+** ldr_za_w0_15_vnum:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 15\], \[x1, #15, mul vl\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_w0_15_vnum,
+ svldr_za (w0 + 15, x1 + svcntsb () * 15),
+ svldr_za (w0 + 15, x1 + svcntsb () * 15))
+
+/*
+** ldr_za_w0_16_vnum:
+** (
+** add (w1[2-5]), w0, #?16
+** incb x1, all, mul #16
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** |
+** incb x1, all, mul #16
+** add (w1[2-5]), w0, #?16
+** ldr za\[\2, 0\], \[x1(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_w0_16_vnum,
+ svldr_za (w0 + 16, x1 + svcntsb () * 16),
+ svldr_za (w0 + 16, x1 + svcntsb () * 16))
+
+/*
+** ldr_za_w0_m1_vnum:
+** (
+** sub (w1[2-5]), w0, #?1
+** decb x1
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** |
+** decb x1
+** sub (w1[2-5]), w0, #?1
+** ldr za\[\2, 0\], \[x1(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_w0_m1_vnum,
+ svldr_za (w0 - 1, x1 - svcntsb ()),
+ svldr_za (w0 - 1, x1 - svcntsb ()))
+
+/*
+** ldr_za_w0p2:
+** add (w1[2-5]), w0, #?2
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_w0p2,
+ svldr_za (w0 + 2, x1),
+ svldr_za (w0 + 2, x1))
+
+/*
+** ldr_za_offset:
+** (
+** mov (w1[2-5]), w0
+** add (x[0-9]+), x1, #?1
+** ldr za\[\1, 0\], \[\2(?:, #0, mul vl)?\]
+** |
+** add (x[0-9]+), x1, #?1
+** mov (w1[2-5]), w0
+** ldr za\[\4, 0\], \[\3(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_offset,
+ svldr_za (w0, x1 + 1),
+ svldr_za (w0, x1 + 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_za_sc.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_za_sc.c
new file mode 100644
index 0000000..a27be7671
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/ldr_za_sc.c
@@ -0,0 +1,71 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#include "test_sme_acle.h"
+
+/*
+** ldr_za_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_0,
+ svldr_za (0, x1),
+ svldr_za (0, x1))
+
+/*
+** ldr_za_1:
+** mov (w1[2-5]), #?1
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_1,
+ svldr_za (1, x1),
+ svldr_za (1, x1))
+
+/*
+** ldr_za_w0:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_w0,
+ svldr_za (w0, x1),
+ svldr_za (w0, x1))
+
+/*
+** ldr_za_w0_1_vnum:
+** mov (w1[2-5]), w0
+** ldr za\[\1, 1\], \[x1, #1, mul vl\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_w0_1_vnum,
+ svldr_za (w0 + 1, x1 + svcntsb ()),
+ svldr_za (w0 + 1, x1 + svcntsb ()))
+
+/*
+** ldr_za_w0p2:
+** add (w1[2-5]), w0, #?2
+** ldr za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_w0p2,
+ svldr_za (w0 + 2, x1),
+ svldr_za (w0 + 2, x1))
+
+/*
+** ldr_za_offset:
+** (
+** mov (w1[2-5]), w0
+** add (x[0-9]+), x1, #?1
+** ldr za\[\1, 0\], \[\2(?:, #0, mul vl)?\]
+** |
+** add (x[0-9]+), x1, #?1
+** mov (w1[2-5]), w0
+** ldr za\[\4, 0\], \[\3(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_LOAD_ZA (ldr_za_offset,
+ svldr_za (w0, x1 + 1),
+ svldr_za (w0, x1 + 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mopa_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mopa_za32.c
new file mode 100644
index 0000000..480de2c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mopa_za32.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** mopa_za32_s8_0_p0_p1_z0_z1:
+** smopa za0\.s, p0/m, p1/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_s8_0_p0_p1_z0_z1, svint8_t,
+ svmopa_za32_s8_m (0, p0, p1, z0, z1),
+ svmopa_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mopa_za32_s8_0_p1_p0_z1_z0:
+** smopa za0\.s, p1/m, p0/m, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_s8_0_p1_p0_z1_z0, svint8_t,
+ svmopa_za32_s8_m (0, p1, p0, z1, z0),
+ svmopa_za32_m (0, p1, p0, z1, z0))
+
+/*
+** mopa_za32_s8_3_p0_p1_z0_z1:
+** smopa za3\.s, p0/m, p1/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_s8_3_p0_p1_z0_z1, svint8_t,
+ svmopa_za32_s8_m (3, p0, p1, z0, z1),
+ svmopa_za32_m (3, p0, p1, z0, z1))
+
+/*
+** mopa_za32_u8_0_p0_p1_z0_z1:
+** umopa za0\.s, p0/m, p1/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_u8_0_p0_p1_z0_z1, svuint8_t,
+ svmopa_za32_u8_m (0, p0, p1, z0, z1),
+ svmopa_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mopa_za32_u8_3_p0_p1_z0_z1:
+** umopa za3\.s, p0/m, p1/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_u8_3_p0_p1_z0_z1, svuint8_t,
+ svmopa_za32_u8_m (3, p0, p1, z0, z1),
+ svmopa_za32_m (3, p0, p1, z0, z1))
+
+/*
+** mopa_za32_bf16_0_p0_p1_z0_z1:
+** bfmopa za0\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_bf16_0_p0_p1_z0_z1, svbfloat16_t,
+ svmopa_za32_bf16_m (0, p0, p1, z0, z1),
+ svmopa_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mopa_za32_bf16_3_p0_p1_z0_z1:
+** bfmopa za3\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_bf16_3_p0_p1_z0_z1, svbfloat16_t,
+ svmopa_za32_bf16_m (3, p0, p1, z0, z1),
+ svmopa_za32_m (3, p0, p1, z0, z1))
+
+/*
+** mopa_za32_f16_0_p0_p1_z0_z1:
+** fmopa za0\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_f16_0_p0_p1_z0_z1, svfloat16_t,
+ svmopa_za32_f16_m (0, p0, p1, z0, z1),
+ svmopa_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mopa_za32_f16_3_p0_p1_z0_z1:
+** fmopa za3\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_f16_3_p0_p1_z0_z1, svfloat16_t,
+ svmopa_za32_f16_m (3, p0, p1, z0, z1),
+ svmopa_za32_m (3, p0, p1, z0, z1))
+
+/*
+** mopa_za32_f32_0_p0_p1_z0_z1:
+** fmopa za0\.s, p0/m, p1/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_f32_0_p0_p1_z0_z1, svfloat32_t,
+ svmopa_za32_f32_m (0, p0, p1, z0, z1),
+ svmopa_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mopa_za32_f32_3_p0_p1_z0_z1:
+** fmopa za3\.s, p0/m, p1/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_f32_3_p0_p1_z0_z1, svfloat32_t,
+ svmopa_za32_f32_m (3, p0, p1, z0, z1),
+ svmopa_za32_m (3, p0, p1, z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mopa_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mopa_za64.c
new file mode 100644
index 0000000..f523b96
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mopa_za64.c
@@ -0,0 +1,70 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+#pragma GCC target "+sme-i16i64"
+
+/*
+** mopa_za64_s16_0_p0_p1_z0_z1:
+** smopa za0\.d, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za64_s16_0_p0_p1_z0_z1, svint16_t,
+ svmopa_za64_s16_m (0, p0, p1, z0, z1),
+ svmopa_za64_m (0, p0, p1, z0, z1))
+
+/*
+** mopa_za64_s16_0_p1_p0_z1_z0:
+** smopa za0\.d, p1/m, p0/m, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za64_s16_0_p1_p0_z1_z0, svint16_t,
+ svmopa_za64_s16_m (0, p1, p0, z1, z0),
+ svmopa_za64_m (0, p1, p0, z1, z0))
+
+/*
+** mopa_za64_s16_7_p0_p1_z0_z1:
+** smopa za7\.d, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za64_s16_7_p0_p1_z0_z1, svint16_t,
+ svmopa_za64_s16_m (7, p0, p1, z0, z1),
+ svmopa_za64_m (7, p0, p1, z0, z1))
+
+/*
+** mopa_za64_u16_0_p0_p1_z0_z1:
+** umopa za0\.d, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za64_u16_0_p0_p1_z0_z1, svuint16_t,
+ svmopa_za64_u16_m (0, p0, p1, z0, z1),
+ svmopa_za64_m (0, p0, p1, z0, z1))
+
+/*
+** mopa_za64_u16_7_p0_p1_z0_z1:
+** umopa za7\.d, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za64_u16_7_p0_p1_z0_z1, svuint16_t,
+ svmopa_za64_u16_m (7, p0, p1, z0, z1),
+ svmopa_za64_m (7, p0, p1, z0, z1))
+
+#pragma GCC target "+nosme-i16i64+sme-f64f64"
+
+/*
+** mopa_za64_f64_0_p0_p1_z0_z1:
+** fmopa za0\.d, p0/m, p1/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za64_f64_0_p0_p1_z0_z1, svfloat64_t,
+ svmopa_za64_f64_m (0, p0, p1, z0, z1),
+ svmopa_za64_m (0, p0, p1, z0, z1))
+
+/*
+** mopa_za64_f64_7_p0_p1_z0_z1:
+** fmopa za7\.d, p0/m, p1/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za64_f64_7_p0_p1_z0_z1, svfloat64_t,
+ svmopa_za64_f64_m (7, p0, p1, z0, z1),
+ svmopa_za64_m (7, p0, p1, z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mops_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mops_za32.c
new file mode 100644
index 0000000..63c2b80
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mops_za32.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** mops_za32_s8_0_p0_p1_z0_z1:
+** smops za0\.s, p0/m, p1/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_s8_0_p0_p1_z0_z1, svint8_t,
+ svmops_za32_s8_m (0, p0, p1, z0, z1),
+ svmops_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mops_za32_s8_0_p1_p0_z1_z0:
+** smops za0\.s, p1/m, p0/m, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_s8_0_p1_p0_z1_z0, svint8_t,
+ svmops_za32_s8_m (0, p1, p0, z1, z0),
+ svmops_za32_m (0, p1, p0, z1, z0))
+
+/*
+** mops_za32_s8_3_p0_p1_z0_z1:
+** smops za3\.s, p0/m, p1/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_s8_3_p0_p1_z0_z1, svint8_t,
+ svmops_za32_s8_m (3, p0, p1, z0, z1),
+ svmops_za32_m (3, p0, p1, z0, z1))
+
+/*
+** mops_za32_u8_0_p0_p1_z0_z1:
+** umops za0\.s, p0/m, p1/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_u8_0_p0_p1_z0_z1, svuint8_t,
+ svmops_za32_u8_m (0, p0, p1, z0, z1),
+ svmops_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mops_za32_u8_3_p0_p1_z0_z1:
+** umops za3\.s, p0/m, p1/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_u8_3_p0_p1_z0_z1, svuint8_t,
+ svmops_za32_u8_m (3, p0, p1, z0, z1),
+ svmops_za32_m (3, p0, p1, z0, z1))
+
+/*
+** mops_za32_bf16_0_p0_p1_z0_z1:
+** bfmops za0\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_bf16_0_p0_p1_z0_z1, svbfloat16_t,
+ svmops_za32_bf16_m (0, p0, p1, z0, z1),
+ svmops_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mops_za32_bf16_3_p0_p1_z0_z1:
+** bfmops za3\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_bf16_3_p0_p1_z0_z1, svbfloat16_t,
+ svmops_za32_bf16_m (3, p0, p1, z0, z1),
+ svmops_za32_m (3, p0, p1, z0, z1))
+
+/*
+** mops_za32_f16_0_p0_p1_z0_z1:
+** fmops za0\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_f16_0_p0_p1_z0_z1, svfloat16_t,
+ svmops_za32_f16_m (0, p0, p1, z0, z1),
+ svmops_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mops_za32_f16_3_p0_p1_z0_z1:
+** fmops za3\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_f16_3_p0_p1_z0_z1, svfloat16_t,
+ svmops_za32_f16_m (3, p0, p1, z0, z1),
+ svmops_za32_m (3, p0, p1, z0, z1))
+
+/*
+** mops_za32_f32_0_p0_p1_z0_z1:
+** fmops za0\.s, p0/m, p1/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_f32_0_p0_p1_z0_z1, svfloat32_t,
+ svmops_za32_f32_m (0, p0, p1, z0, z1),
+ svmops_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mops_za32_f32_3_p0_p1_z0_z1:
+** fmops za3\.s, p0/m, p1/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_f32_3_p0_p1_z0_z1, svfloat32_t,
+ svmops_za32_f32_m (3, p0, p1, z0, z1),
+ svmops_za32_m (3, p0, p1, z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mops_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mops_za64.c
new file mode 100644
index 0000000..bc04c3c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/mops_za64.c
@@ -0,0 +1,70 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+#pragma GCC target "+sme-i16i64"
+
+/*
+** mops_za64_s16_0_p0_p1_z0_z1:
+** smops za0\.d, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za64_s16_0_p0_p1_z0_z1, svint16_t,
+ svmops_za64_s16_m (0, p0, p1, z0, z1),
+ svmops_za64_m (0, p0, p1, z0, z1))
+
+/*
+** mops_za64_s16_0_p1_p0_z1_z0:
+** smops za0\.d, p1/m, p0/m, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za64_s16_0_p1_p0_z1_z0, svint16_t,
+ svmops_za64_s16_m (0, p1, p0, z1, z0),
+ svmops_za64_m (0, p1, p0, z1, z0))
+
+/*
+** mops_za64_s16_7_p0_p1_z0_z1:
+** smops za7\.d, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za64_s16_7_p0_p1_z0_z1, svint16_t,
+ svmops_za64_s16_m (7, p0, p1, z0, z1),
+ svmops_za64_m (7, p0, p1, z0, z1))
+
+/*
+** mops_za64_u16_0_p0_p1_z0_z1:
+** umops za0\.d, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za64_u16_0_p0_p1_z0_z1, svuint16_t,
+ svmops_za64_u16_m (0, p0, p1, z0, z1),
+ svmops_za64_m (0, p0, p1, z0, z1))
+
+/*
+** mops_za64_u16_7_p0_p1_z0_z1:
+** umops za7\.d, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za64_u16_7_p0_p1_z0_z1, svuint16_t,
+ svmops_za64_u16_m (7, p0, p1, z0, z1),
+ svmops_za64_m (7, p0, p1, z0, z1))
+
+#pragma GCC target "+nosme-i16i64+sme-f64f64"
+
+/*
+** mops_za64_f64_0_p0_p1_z0_z1:
+** fmops za0\.d, p0/m, p1/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za64_f64_0_p0_p1_z0_z1, svfloat64_t,
+ svmops_za64_f64_m (0, p0, p1, z0, z1),
+ svmops_za64_m (0, p0, p1, z0, z1))
+
+/*
+** mops_za64_f64_7_p0_p1_z0_z1:
+** fmops za7\.d, p0/m, p1/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za64_f64_7_p0_p1_z0_z1, svfloat64_t,
+ svmops_za64_f64_m (7, p0, p1, z0, z1),
+ svmops_za64_m (7, p0, p1, z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za128.c
new file mode 100644
index 0000000..c8eef3b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za128.c
@@ -0,0 +1,435 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** read_za128_s8_0_0_tied:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_0_tied, svint8_t,
+ z0 = svread_hor_za128_s8_m (z0, p0, 0, 0),
+ z0 = svread_hor_za128_m (z0, p0, 0, 0))
+
+/*
+** read_za128_s8_0_1_tied:
+** mov (w1[2-5]), #?1
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_1_tied, svint8_t,
+ z0 = svread_hor_za128_s8_m (z0, p0, 0, 1),
+ z0 = svread_hor_za128_m (z0, p0, 0, 1))
+
+/*
+** read_za128_s8_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_w0_tied, svint8_t,
+ z0 = svread_hor_za128_s8_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_s8_0_w0p1_tied:
+** add (w1[2-5]), w0, #?1
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_w0p1_tied, svint8_t,
+ z0 = svread_hor_za128_s8_m (z0, p0, 0, w0 + 1),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0 + 1))
+
+/*
+** read_za128_s8_0_w0m1_tied:
+** sub (w1[2-5]), w0, #?1
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_w0m1_tied, svint8_t,
+ z0 = svread_hor_za128_s8_m (z0, p0, 0, w0 - 1),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0 - 1))
+
+/*
+** read_za128_s8_1_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za1h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_1_w0_tied, svint8_t,
+ z0 = svread_hor_za128_s8_m (z0, p0, 1, w0),
+ z0 = svread_hor_za128_m (z0, p0, 1, w0))
+
+/*
+** read_za128_s8_15_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za15h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_15_w0_tied, svint8_t,
+ z0 = svread_hor_za128_s8_m (z0, p0, 15, w0),
+ z0 = svread_hor_za128_m (z0, p0, 15, w0))
+
+/*
+** read_za128_s8_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_w0_untied, svint8_t,
+ z0 = svread_hor_za128_s8_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_u8_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_u8_0_w0_tied, svuint8_t,
+ z0 = svread_hor_za128_u8_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_u8_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_u8_0_w0_untied, svuint8_t,
+ z0 = svread_hor_za128_u8_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_s16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s16_0_w0_tied, svint16_t,
+ z0 = svread_hor_za128_s16_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_s16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_s16_0_w0_untied, svint16_t,
+ z0 = svread_hor_za128_s16_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_u16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_u16_0_w0_tied, svuint16_t,
+ z0 = svread_hor_za128_u16_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_u16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_u16_0_w0_untied, svuint16_t,
+ z0 = svread_hor_za128_u16_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_f16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_f16_0_w0_tied, svfloat16_t,
+ z0 = svread_hor_za128_f16_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_f16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_f16_0_w0_untied, svfloat16_t,
+ z0 = svread_hor_za128_f16_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_bf16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_bf16_0_w0_tied, svbfloat16_t,
+ z0 = svread_hor_za128_bf16_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_bf16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_bf16_0_w0_untied, svbfloat16_t,
+ z0 = svread_hor_za128_bf16_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_s32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s32_0_w0_tied, svint32_t,
+ z0 = svread_hor_za128_s32_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_s32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_s32_0_w0_untied, svint32_t,
+ z0 = svread_hor_za128_s32_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_u32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_u32_0_w0_tied, svuint32_t,
+ z0 = svread_hor_za128_u32_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_u32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_u32_0_w0_untied, svuint32_t,
+ z0 = svread_hor_za128_u32_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_f32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_f32_0_w0_tied, svfloat32_t,
+ z0 = svread_hor_za128_f32_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_f32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_f32_0_w0_untied, svfloat32_t,
+ z0 = svread_hor_za128_f32_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_s64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s64_0_w0_tied, svint64_t,
+ z0 = svread_hor_za128_s64_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_s64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_s64_0_w0_untied, svint64_t,
+ z0 = svread_hor_za128_s64_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_u64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_u64_0_w0_tied, svuint64_t,
+ z0 = svread_hor_za128_u64_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_u64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_u64_0_w0_untied, svuint64_t,
+ z0 = svread_hor_za128_u64_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_f64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_f64_0_w0_tied, svfloat64_t,
+ z0 = svread_hor_za128_f64_m (z0, p0, 0, w0),
+ z0 = svread_hor_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_f64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0h\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0h\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0h\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_f64_0_w0_untied, svfloat64_t,
+ z0 = svread_hor_za128_f64_m (z1, p0, 0, w0),
+ z0 = svread_hor_za128_m (z1, p0, 0, w0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za16.c
new file mode 100644
index 0000000..2e0a965
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za16.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** read_za16_s16_0_0_tied:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_0_tied, svint16_t,
+ z0 = svread_hor_za16_s16_m (z0, p0, 0, 0),
+ z0 = svread_hor_za16_m (z0, p0, 0, 0))
+
+/*
+** read_za16_s16_0_1_tied:
+** mov (w1[2-5]), #?1
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_1_tied, svint16_t,
+ z0 = svread_hor_za16_s16_m (z0, p0, 0, 1),
+ z0 = svread_hor_za16_m (z0, p0, 0, 1))
+
+/*
+** read_za16_s16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0_tied, svint16_t,
+ z0 = svread_hor_za16_s16_m (z0, p0, 0, w0),
+ z0 = svread_hor_za16_m (z0, p0, 0, w0))
+
+/*
+** read_za16_s16_0_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0h\.h\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0p1_tied, svint16_t,
+ z0 = svread_hor_za16_s16_m (z0, p0, 0, w0 + 1),
+ z0 = svread_hor_za16_m (z0, p0, 0, w0 + 1))
+
+/*
+** read_za16_s16_0_w0p7_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0h\.h\[\1, 7\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0p7_tied, svint16_t,
+ z0 = svread_hor_za16_s16_m (z0, p0, 0, w0 + 7),
+ z0 = svread_hor_za16_m (z0, p0, 0, w0 + 7))
+
+/*
+** read_za16_s16_0_w0p8_tied:
+** add (w1[2-5]), w0, #?8
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0p8_tied, svint16_t,
+ z0 = svread_hor_za16_s16_m (z0, p0, 0, w0 + 8),
+ z0 = svread_hor_za16_m (z0, p0, 0, w0 + 8))
+
+/*
+** read_za16_s16_0_w0m1_tied:
+** sub (w1[2-5]), w0, #?1
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0m1_tied, svint16_t,
+ z0 = svread_hor_za16_s16_m (z0, p0, 0, w0 - 1),
+ z0 = svread_hor_za16_m (z0, p0, 0, w0 - 1))
+
+/*
+** read_za16_s16_1_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za1h\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_1_w0_tied, svint16_t,
+ z0 = svread_hor_za16_s16_m (z0, p0, 1, w0),
+ z0 = svread_hor_za16_m (z0, p0, 1, w0))
+
+/*
+** read_za16_s16_1_w0p7_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za1h\.h\[\1, 7\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_1_w0p7_tied, svint16_t,
+ z0 = svread_hor_za16_s16_m (z0, p0, 1, w0 + 7),
+ z0 = svread_hor_za16_m (z0, p0, 1, w0 + 7))
+
+/*
+** read_za16_s16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0h\.h\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.h, p0/m, za0h\.h\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0_untied, svint16_t,
+ z0 = svread_hor_za16_s16_m (z1, p0, 0, w0),
+ z0 = svread_hor_za16_m (z1, p0, 0, w0))
+
+/*
+** read_za16_u16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_u16_0_w0_tied, svuint16_t,
+ z0 = svread_hor_za16_u16_m (z0, p0, 0, w0),
+ z0 = svread_hor_za16_m (z0, p0, 0, w0))
+
+/*
+** read_za16_u16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0h\.h\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.h, p0/m, za0h\.h\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za16_u16_0_w0_untied, svuint16_t,
+ z0 = svread_hor_za16_u16_m (z1, p0, 0, w0),
+ z0 = svread_hor_za16_m (z1, p0, 0, w0))
+
+/*
+** read_za16_f16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_f16_0_w0_tied, svfloat16_t,
+ z0 = svread_hor_za16_f16_m (z0, p0, 0, w0),
+ z0 = svread_hor_za16_m (z0, p0, 0, w0))
+
+/*
+** read_za16_f16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0h\.h\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.h, p0/m, za0h\.h\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za16_f16_0_w0_untied, svfloat16_t,
+ z0 = svread_hor_za16_f16_m (z1, p0, 0, w0),
+ z0 = svread_hor_za16_m (z1, p0, 0, w0))
+
+/*
+** read_za16_bf16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_bf16_0_w0_tied, svbfloat16_t,
+ z0 = svread_hor_za16_bf16_m (z0, p0, 0, w0),
+ z0 = svread_hor_za16_m (z0, p0, 0, w0))
+
+/*
+** read_za16_bf16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.h, p0/m, za0h\.h\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0h\.h\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.h, p0/m, za0h\.h\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za16_bf16_0_w0_untied, svbfloat16_t,
+ z0 = svread_hor_za16_bf16_m (z1, p0, 0, w0),
+ z0 = svread_hor_za16_m (z1, p0, 0, w0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za32.c
new file mode 100644
index 0000000..d111b60
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za32.c
@@ -0,0 +1,196 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** read_za32_s32_0_0_tied:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova z0\.s, p0/m, za0h\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_0_tied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z0, p0, 0, 0),
+ z0 = svread_hor_za32_m (z0, p0, 0, 0))
+
+/*
+** read_za32_s32_0_1_tied:
+** mov (w1[2-5]), #?1
+** mova z0\.s, p0/m, za0h\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_1_tied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z0, p0, 0, 1),
+ z0 = svread_hor_za32_m (z0, p0, 0, 1))
+
+/*
+** read_za32_s32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0h\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0_tied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z0, p0, 0, w0),
+ z0 = svread_hor_za32_m (z0, p0, 0, w0))
+
+/*
+** read_za32_s32_0_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0h\.s\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0p1_tied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z0, p0, 0, w0 + 1),
+ z0 = svread_hor_za32_m (z0, p0, 0, w0 + 1))
+
+/*
+** read_za32_s32_0_w0p3_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0h\.s\[\1, 3\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0p3_tied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z0, p0, 0, w0 + 3),
+ z0 = svread_hor_za32_m (z0, p0, 0, w0 + 3))
+
+/*
+** read_za32_s32_0_w0p4_tied:
+** add (w1[2-5]), w0, #?4
+** mova z0\.s, p0/m, za0h\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0p4_tied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z0, p0, 0, w0 + 4),
+ z0 = svread_hor_za32_m (z0, p0, 0, w0 + 4))
+
+/*
+** read_za32_s32_0_w0m1_tied:
+** sub (w1[2-5]), w0, #?1
+** mova z0\.s, p0/m, za0h\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0m1_tied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z0, p0, 0, w0 - 1),
+ z0 = svread_hor_za32_m (z0, p0, 0, w0 - 1))
+
+/*
+** read_za32_s32_1_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za1h\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_1_w0_tied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z0, p0, 1, w0),
+ z0 = svread_hor_za32_m (z0, p0, 1, w0))
+
+/*
+** read_za32_s32_1_w0p3_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za1h\.s\[\1, 3\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_1_w0p3_tied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z0, p0, 1, w0 + 3),
+ z0 = svread_hor_za32_m (z0, p0, 1, w0 + 3))
+
+/*
+** read_za32_s32_3_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za3h\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_3_w0_tied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z0, p0, 3, w0),
+ z0 = svread_hor_za32_m (z0, p0, 3, w0))
+
+/*
+** read_za32_s32_3_w0p3_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za3h\.s\[\1, 3\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_3_w0p3_tied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z0, p0, 3, w0 + 3),
+ z0 = svread_hor_za32_m (z0, p0, 3, w0 + 3))
+
+/*
+** read_za32_s32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.s, p0/m, za0h\.s\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0h\.s\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.s, p0/m, za0h\.s\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0_untied, svint32_t,
+ z0 = svread_hor_za32_s32_m (z1, p0, 0, w0),
+ z0 = svread_hor_za32_m (z1, p0, 0, w0))
+
+/*
+** read_za32_u32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0h\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_u32_0_w0_tied, svuint32_t,
+ z0 = svread_hor_za32_u32_m (z0, p0, 0, w0),
+ z0 = svread_hor_za32_m (z0, p0, 0, w0))
+
+/*
+** read_za32_u32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.s, p0/m, za0h\.s\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0h\.s\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.s, p0/m, za0h\.s\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za32_u32_0_w0_untied, svuint32_t,
+ z0 = svread_hor_za32_u32_m (z1, p0, 0, w0),
+ z0 = svread_hor_za32_m (z1, p0, 0, w0))
+
+/*
+** read_za32_f32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0h\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_f32_0_w0_tied, svfloat32_t,
+ z0 = svread_hor_za32_f32_m (z0, p0, 0, w0),
+ z0 = svread_hor_za32_m (z0, p0, 0, w0))
+
+/*
+** read_za32_f32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.s, p0/m, za0h\.s\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0h\.s\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.s, p0/m, za0h\.s\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za32_f32_0_w0_untied, svfloat32_t,
+ z0 = svread_hor_za32_f32_m (z1, p0, 0, w0),
+ z0 = svread_hor_za32_m (z1, p0, 0, w0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za64.c
new file mode 100644
index 0000000..b75c531
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za64.c
@@ -0,0 +1,186 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** read_za64_s64_0_0_tied:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova z0\.d, p0/m, za0h\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_0_tied, svint64_t,
+ z0 = svread_hor_za64_s64_m (z0, p0, 0, 0),
+ z0 = svread_hor_za64_m (z0, p0, 0, 0))
+
+/*
+** read_za64_s64_0_1_tied:
+** mov (w1[2-5]), #?1
+** mova z0\.d, p0/m, za0h\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_1_tied, svint64_t,
+ z0 = svread_hor_za64_s64_m (z0, p0, 0, 1),
+ z0 = svread_hor_za64_m (z0, p0, 0, 1))
+
+/*
+** read_za64_s64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0h\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_w0_tied, svint64_t,
+ z0 = svread_hor_za64_s64_m (z0, p0, 0, w0),
+ z0 = svread_hor_za64_m (z0, p0, 0, w0))
+
+/*
+** read_za64_s64_0_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0h\.d\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_w0p1_tied, svint64_t,
+ z0 = svread_hor_za64_s64_m (z0, p0, 0, w0 + 1),
+ z0 = svread_hor_za64_m (z0, p0, 0, w0 + 1))
+
+/*
+** read_za64_s64_0_w0p2_tied:
+** add (w1[2-5]), w0, #?2
+** mova z0\.d, p0/m, za0h\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_w0p2_tied, svint64_t,
+ z0 = svread_hor_za64_s64_m (z0, p0, 0, w0 + 2),
+ z0 = svread_hor_za64_m (z0, p0, 0, w0 + 2))
+
+/*
+** read_za64_s64_0_w0m1_tied:
+** sub (w1[2-5]), w0, #?1
+** mova z0\.d, p0/m, za0h\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_w0m1_tied, svint64_t,
+ z0 = svread_hor_za64_s64_m (z0, p0, 0, w0 - 1),
+ z0 = svread_hor_za64_m (z0, p0, 0, w0 - 1))
+
+/*
+** read_za64_s64_1_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za1h\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_1_w0_tied, svint64_t,
+ z0 = svread_hor_za64_s64_m (z0, p0, 1, w0),
+ z0 = svread_hor_za64_m (z0, p0, 1, w0))
+
+/*
+** read_za64_s64_1_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za1h\.d\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_1_w0p1_tied, svint64_t,
+ z0 = svread_hor_za64_s64_m (z0, p0, 1, w0 + 1),
+ z0 = svread_hor_za64_m (z0, p0, 1, w0 + 1))
+
+/*
+** read_za64_s64_7_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za7h\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_7_w0_tied, svint64_t,
+ z0 = svread_hor_za64_s64_m (z0, p0, 7, w0),
+ z0 = svread_hor_za64_m (z0, p0, 7, w0))
+
+/*
+** read_za64_s64_7_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za7h\.d\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_7_w0p1_tied, svint64_t,
+ z0 = svread_hor_za64_s64_m (z0, p0, 7, w0 + 1),
+ z0 = svread_hor_za64_m (z0, p0, 7, w0 + 1))
+
+/*
+** read_za64_s64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.d, p0/m, za0h\.d\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0h\.d\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.d, p0/m, za0h\.d\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_w0_untied, svint64_t,
+ z0 = svread_hor_za64_s64_m (z1, p0, 0, w0),
+ z0 = svread_hor_za64_m (z1, p0, 0, w0))
+
+/*
+** read_za64_u64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0h\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_u64_0_w0_tied, svuint64_t,
+ z0 = svread_hor_za64_u64_m (z0, p0, 0, w0),
+ z0 = svread_hor_za64_m (z0, p0, 0, w0))
+
+/*
+** read_za64_u64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.d, p0/m, za0h\.d\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0h\.d\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.d, p0/m, za0h\.d\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za64_u64_0_w0_untied, svuint64_t,
+ z0 = svread_hor_za64_u64_m (z1, p0, 0, w0),
+ z0 = svread_hor_za64_m (z1, p0, 0, w0))
+
+/*
+** read_za64_f64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0h\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_f64_0_w0_tied, svfloat64_t,
+ z0 = svread_hor_za64_f64_m (z0, p0, 0, w0),
+ z0 = svread_hor_za64_m (z0, p0, 0, w0))
+
+/*
+** read_za64_f64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.d, p0/m, za0h\.d\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0h\.d\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.d, p0/m, za0h\.d\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za64_f64_0_w0_untied, svfloat64_t,
+ z0 = svread_hor_za64_f64_m (z1, p0, 0, w0),
+ z0 = svread_hor_za64_m (z1, p0, 0, w0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za8.c
new file mode 100644
index 0000000..0ad5a95
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_hor_za8.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** read_za8_s8_0_0_tied:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova z0\.b, p0/m, za0h\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_0_tied, svint8_t,
+ z0 = svread_hor_za8_s8_m (z0, p0, 0, 0),
+ z0 = svread_hor_za8_m (z0, p0, 0, 0))
+
+/*
+** read_za8_s8_0_1_tied:
+** mov (w1[2-5]), #?1
+** mova z0\.b, p0/m, za0h\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_1_tied, svint8_t,
+ z0 = svread_hor_za8_s8_m (z0, p0, 0, 1),
+ z0 = svread_hor_za8_m (z0, p0, 0, 1))
+
+/*
+** read_za8_s8_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0h\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0_tied, svint8_t,
+ z0 = svread_hor_za8_s8_m (z0, p0, 0, w0),
+ z0 = svread_hor_za8_m (z0, p0, 0, w0))
+
+/*
+** read_za8_s8_0_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0h\.b\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0p1_tied, svint8_t,
+ z0 = svread_hor_za8_s8_m (z0, p0, 0, w0 + 1),
+ z0 = svread_hor_za8_m (z0, p0, 0, w0 + 1))
+
+/*
+** read_za8_s8_0_w0p15_tied:
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0h\.b\[\1, 15\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0p15_tied, svint8_t,
+ z0 = svread_hor_za8_s8_m (z0, p0, 0, w0 + 15),
+ z0 = svread_hor_za8_m (z0, p0, 0, w0 + 15))
+
+/*
+** read_za8_s8_0_w0p16_tied:
+** add (w1[2-5]), w0, #?16
+** mova z0\.b, p0/m, za0h\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0p16_tied, svint8_t,
+ z0 = svread_hor_za8_s8_m (z0, p0, 0, w0 + 16),
+ z0 = svread_hor_za8_m (z0, p0, 0, w0 + 16))
+
+/*
+** read_za8_s8_0_w0m1_tied:
+** sub (w1[2-5]), w0, #?1
+** mova z0\.b, p0/m, za0h\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0m1_tied, svint8_t,
+ z0 = svread_hor_za8_s8_m (z0, p0, 0, w0 - 1),
+ z0 = svread_hor_za8_m (z0, p0, 0, w0 - 1))
+
+/*
+** read_za8_s8_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.b, p0/m, za0h\.b\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0h\.b\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.b, p0/m, za0h\.b\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0_untied, svint8_t,
+ z0 = svread_hor_za8_s8_m (z1, p0, 0, w0),
+ z0 = svread_hor_za8_m (z1, p0, 0, w0))
+
+/*
+** read_za8_u8_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0h\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_u8_0_w0_tied, svuint8_t,
+ z0 = svread_hor_za8_u8_m (z0, p0, 0, w0),
+ z0 = svread_hor_za8_m (z0, p0, 0, w0))
+
+/*
+** read_za8_u8_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.b, p0/m, za0h\.b\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0h\.b\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.b, p0/m, za0h\.b\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za8_u8_0_w0_untied, svuint8_t,
+ z0 = svread_hor_za8_u8_m (z1, p0, 0, w0),
+ z0 = svread_hor_za8_m (z1, p0, 0, w0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za128.c
new file mode 100644
index 0000000..93d5d60
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za128.c
@@ -0,0 +1,435 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** read_za128_s8_0_0_tied:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_0_tied, svint8_t,
+ z0 = svread_ver_za128_s8_m (z0, p0, 0, 0),
+ z0 = svread_ver_za128_m (z0, p0, 0, 0))
+
+/*
+** read_za128_s8_0_1_tied:
+** mov (w1[2-5]), #?1
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_1_tied, svint8_t,
+ z0 = svread_ver_za128_s8_m (z0, p0, 0, 1),
+ z0 = svread_ver_za128_m (z0, p0, 0, 1))
+
+/*
+** read_za128_s8_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_w0_tied, svint8_t,
+ z0 = svread_ver_za128_s8_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_s8_0_w0p1_tied:
+** add (w1[2-5]), w0, #?1
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_w0p1_tied, svint8_t,
+ z0 = svread_ver_za128_s8_m (z0, p0, 0, w0 + 1),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0 + 1))
+
+/*
+** read_za128_s8_0_w0m1_tied:
+** sub (w1[2-5]), w0, #?1
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_w0m1_tied, svint8_t,
+ z0 = svread_ver_za128_s8_m (z0, p0, 0, w0 - 1),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0 - 1))
+
+/*
+** read_za128_s8_1_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za1v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_1_w0_tied, svint8_t,
+ z0 = svread_ver_za128_s8_m (z0, p0, 1, w0),
+ z0 = svread_ver_za128_m (z0, p0, 1, w0))
+
+/*
+** read_za128_s8_15_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za15v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_15_w0_tied, svint8_t,
+ z0 = svread_ver_za128_s8_m (z0, p0, 15, w0),
+ z0 = svread_ver_za128_m (z0, p0, 15, w0))
+
+/*
+** read_za128_s8_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_s8_0_w0_untied, svint8_t,
+ z0 = svread_ver_za128_s8_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_u8_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_u8_0_w0_tied, svuint8_t,
+ z0 = svread_ver_za128_u8_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_u8_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_u8_0_w0_untied, svuint8_t,
+ z0 = svread_ver_za128_u8_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_s16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s16_0_w0_tied, svint16_t,
+ z0 = svread_ver_za128_s16_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_s16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_s16_0_w0_untied, svint16_t,
+ z0 = svread_ver_za128_s16_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_u16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_u16_0_w0_tied, svuint16_t,
+ z0 = svread_ver_za128_u16_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_u16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_u16_0_w0_untied, svuint16_t,
+ z0 = svread_ver_za128_u16_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_f16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_f16_0_w0_tied, svfloat16_t,
+ z0 = svread_ver_za128_f16_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_f16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_f16_0_w0_untied, svfloat16_t,
+ z0 = svread_ver_za128_f16_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_bf16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_bf16_0_w0_tied, svbfloat16_t,
+ z0 = svread_ver_za128_bf16_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_bf16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_bf16_0_w0_untied, svbfloat16_t,
+ z0 = svread_ver_za128_bf16_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_s32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s32_0_w0_tied, svint32_t,
+ z0 = svread_ver_za128_s32_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_s32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_s32_0_w0_untied, svint32_t,
+ z0 = svread_ver_za128_s32_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_u32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_u32_0_w0_tied, svuint32_t,
+ z0 = svread_ver_za128_u32_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_u32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_u32_0_w0_untied, svuint32_t,
+ z0 = svread_ver_za128_u32_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_f32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_f32_0_w0_tied, svfloat32_t,
+ z0 = svread_ver_za128_f32_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_f32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_f32_0_w0_untied, svfloat32_t,
+ z0 = svread_ver_za128_f32_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_s64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_s64_0_w0_tied, svint64_t,
+ z0 = svread_ver_za128_s64_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_s64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_s64_0_w0_untied, svint64_t,
+ z0 = svread_ver_za128_s64_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_u64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_u64_0_w0_tied, svuint64_t,
+ z0 = svread_ver_za128_u64_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_u64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_u64_0_w0_untied, svuint64_t,
+ z0 = svread_ver_za128_u64_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
+
+/*
+** read_za128_f64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za128_f64_0_w0_tied, svfloat64_t,
+ z0 = svread_ver_za128_f64_m (z0, p0, 0, w0),
+ z0 = svread_ver_za128_m (z0, p0, 0, w0))
+
+/*
+** read_za128_f64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.q, p0/m, za0v\.q\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.q, p0/m, za0v\.q\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.q, p0/m, za0v\.q\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za128_f64_0_w0_untied, svfloat64_t,
+ z0 = svread_ver_za128_f64_m (z1, p0, 0, w0),
+ z0 = svread_ver_za128_m (z1, p0, 0, w0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za16.c
new file mode 100644
index 0000000..d0353dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za16.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** read_za16_s16_0_0_tied:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_0_tied, svint16_t,
+ z0 = svread_ver_za16_s16_m (z0, p0, 0, 0),
+ z0 = svread_ver_za16_m (z0, p0, 0, 0))
+
+/*
+** read_za16_s16_0_1_tied:
+** mov (w1[2-5]), #?1
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_1_tied, svint16_t,
+ z0 = svread_ver_za16_s16_m (z0, p0, 0, 1),
+ z0 = svread_ver_za16_m (z0, p0, 0, 1))
+
+/*
+** read_za16_s16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0_tied, svint16_t,
+ z0 = svread_ver_za16_s16_m (z0, p0, 0, w0),
+ z0 = svread_ver_za16_m (z0, p0, 0, w0))
+
+/*
+** read_za16_s16_0_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0v\.h\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0p1_tied, svint16_t,
+ z0 = svread_ver_za16_s16_m (z0, p0, 0, w0 + 1),
+ z0 = svread_ver_za16_m (z0, p0, 0, w0 + 1))
+
+/*
+** read_za16_s16_0_w0p7_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0v\.h\[\1, 7\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0p7_tied, svint16_t,
+ z0 = svread_ver_za16_s16_m (z0, p0, 0, w0 + 7),
+ z0 = svread_ver_za16_m (z0, p0, 0, w0 + 7))
+
+/*
+** read_za16_s16_0_w0p8_tied:
+** add (w1[2-5]), w0, #?8
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0p8_tied, svint16_t,
+ z0 = svread_ver_za16_s16_m (z0, p0, 0, w0 + 8),
+ z0 = svread_ver_za16_m (z0, p0, 0, w0 + 8))
+
+/*
+** read_za16_s16_0_w0m1_tied:
+** sub (w1[2-5]), w0, #?1
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0m1_tied, svint16_t,
+ z0 = svread_ver_za16_s16_m (z0, p0, 0, w0 - 1),
+ z0 = svread_ver_za16_m (z0, p0, 0, w0 - 1))
+
+/*
+** read_za16_s16_1_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za1v\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_1_w0_tied, svint16_t,
+ z0 = svread_ver_za16_s16_m (z0, p0, 1, w0),
+ z0 = svread_ver_za16_m (z0, p0, 1, w0))
+
+/*
+** read_za16_s16_1_w0p7_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za1v\.h\[\1, 7\]
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_1_w0p7_tied, svint16_t,
+ z0 = svread_ver_za16_s16_m (z0, p0, 1, w0 + 7),
+ z0 = svread_ver_za16_m (z0, p0, 1, w0 + 7))
+
+/*
+** read_za16_s16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0v\.h\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.h, p0/m, za0v\.h\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za16_s16_0_w0_untied, svint16_t,
+ z0 = svread_ver_za16_s16_m (z1, p0, 0, w0),
+ z0 = svread_ver_za16_m (z1, p0, 0, w0))
+
+/*
+** read_za16_u16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_u16_0_w0_tied, svuint16_t,
+ z0 = svread_ver_za16_u16_m (z0, p0, 0, w0),
+ z0 = svread_ver_za16_m (z0, p0, 0, w0))
+
+/*
+** read_za16_u16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0v\.h\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.h, p0/m, za0v\.h\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za16_u16_0_w0_untied, svuint16_t,
+ z0 = svread_ver_za16_u16_m (z1, p0, 0, w0),
+ z0 = svread_ver_za16_m (z1, p0, 0, w0))
+
+/*
+** read_za16_f16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_f16_0_w0_tied, svfloat16_t,
+ z0 = svread_ver_za16_f16_m (z0, p0, 0, w0),
+ z0 = svread_ver_za16_m (z0, p0, 0, w0))
+
+/*
+** read_za16_f16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0v\.h\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.h, p0/m, za0v\.h\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za16_f16_0_w0_untied, svfloat16_t,
+ z0 = svread_ver_za16_f16_m (z1, p0, 0, w0),
+ z0 = svread_ver_za16_m (z1, p0, 0, w0))
+
+/*
+** read_za16_bf16_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za16_bf16_0_w0_tied, svbfloat16_t,
+ z0 = svread_ver_za16_bf16_m (z0, p0, 0, w0),
+ z0 = svread_ver_za16_m (z0, p0, 0, w0))
+
+/*
+** read_za16_bf16_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.h, p0/m, za0v\.h\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.h, p0/m, za0v\.h\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.h, p0/m, za0v\.h\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za16_bf16_0_w0_untied, svbfloat16_t,
+ z0 = svread_ver_za16_bf16_m (z1, p0, 0, w0),
+ z0 = svread_ver_za16_m (z1, p0, 0, w0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za32.c
new file mode 100644
index 0000000..362e818
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za32.c
@@ -0,0 +1,196 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** read_za32_s32_0_0_tied:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova z0\.s, p0/m, za0v\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_0_tied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z0, p0, 0, 0),
+ z0 = svread_ver_za32_m (z0, p0, 0, 0))
+
+/*
+** read_za32_s32_0_1_tied:
+** mov (w1[2-5]), #?1
+** mova z0\.s, p0/m, za0v\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_1_tied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z0, p0, 0, 1),
+ z0 = svread_ver_za32_m (z0, p0, 0, 1))
+
+/*
+** read_za32_s32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0v\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0_tied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z0, p0, 0, w0),
+ z0 = svread_ver_za32_m (z0, p0, 0, w0))
+
+/*
+** read_za32_s32_0_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0v\.s\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0p1_tied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z0, p0, 0, w0 + 1),
+ z0 = svread_ver_za32_m (z0, p0, 0, w0 + 1))
+
+/*
+** read_za32_s32_0_w0p3_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0v\.s\[\1, 3\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0p3_tied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z0, p0, 0, w0 + 3),
+ z0 = svread_ver_za32_m (z0, p0, 0, w0 + 3))
+
+/*
+** read_za32_s32_0_w0p4_tied:
+** add (w1[2-5]), w0, #?4
+** mova z0\.s, p0/m, za0v\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0p4_tied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z0, p0, 0, w0 + 4),
+ z0 = svread_ver_za32_m (z0, p0, 0, w0 + 4))
+
+/*
+** read_za32_s32_0_w0m1_tied:
+** sub (w1[2-5]), w0, #?1
+** mova z0\.s, p0/m, za0v\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0m1_tied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z0, p0, 0, w0 - 1),
+ z0 = svread_ver_za32_m (z0, p0, 0, w0 - 1))
+
+/*
+** read_za32_s32_1_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za1v\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_1_w0_tied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z0, p0, 1, w0),
+ z0 = svread_ver_za32_m (z0, p0, 1, w0))
+
+/*
+** read_za32_s32_1_w0p3_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za1v\.s\[\1, 3\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_1_w0p3_tied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z0, p0, 1, w0 + 3),
+ z0 = svread_ver_za32_m (z0, p0, 1, w0 + 3))
+
+/*
+** read_za32_s32_3_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za3v\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_3_w0_tied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z0, p0, 3, w0),
+ z0 = svread_ver_za32_m (z0, p0, 3, w0))
+
+/*
+** read_za32_s32_3_w0p3_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za3v\.s\[\1, 3\]
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_3_w0p3_tied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z0, p0, 3, w0 + 3),
+ z0 = svread_ver_za32_m (z0, p0, 3, w0 + 3))
+
+/*
+** read_za32_s32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.s, p0/m, za0v\.s\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0v\.s\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.s, p0/m, za0v\.s\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za32_s32_0_w0_untied, svint32_t,
+ z0 = svread_ver_za32_s32_m (z1, p0, 0, w0),
+ z0 = svread_ver_za32_m (z1, p0, 0, w0))
+
+/*
+** read_za32_u32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0v\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_u32_0_w0_tied, svuint32_t,
+ z0 = svread_ver_za32_u32_m (z0, p0, 0, w0),
+ z0 = svread_ver_za32_m (z0, p0, 0, w0))
+
+/*
+** read_za32_u32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.s, p0/m, za0v\.s\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0v\.s\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.s, p0/m, za0v\.s\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za32_u32_0_w0_untied, svuint32_t,
+ z0 = svread_ver_za32_u32_m (z1, p0, 0, w0),
+ z0 = svread_ver_za32_m (z1, p0, 0, w0))
+
+/*
+** read_za32_f32_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0v\.s\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za32_f32_0_w0_tied, svfloat32_t,
+ z0 = svread_ver_za32_f32_m (z0, p0, 0, w0),
+ z0 = svread_ver_za32_m (z0, p0, 0, w0))
+
+/*
+** read_za32_f32_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.s, p0/m, za0v\.s\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.s, p0/m, za0v\.s\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.s, p0/m, za0v\.s\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za32_f32_0_w0_untied, svfloat32_t,
+ z0 = svread_ver_za32_f32_m (z1, p0, 0, w0),
+ z0 = svread_ver_za32_m (z1, p0, 0, w0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za64.c
new file mode 100644
index 0000000..dba3c6f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za64.c
@@ -0,0 +1,186 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** read_za64_s64_0_0_tied:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova z0\.d, p0/m, za0v\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_0_tied, svint64_t,
+ z0 = svread_ver_za64_s64_m (z0, p0, 0, 0),
+ z0 = svread_ver_za64_m (z0, p0, 0, 0))
+
+/*
+** read_za64_s64_0_1_tied:
+** mov (w1[2-5]), #?1
+** mova z0\.d, p0/m, za0v\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_1_tied, svint64_t,
+ z0 = svread_ver_za64_s64_m (z0, p0, 0, 1),
+ z0 = svread_ver_za64_m (z0, p0, 0, 1))
+
+/*
+** read_za64_s64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0v\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_w0_tied, svint64_t,
+ z0 = svread_ver_za64_s64_m (z0, p0, 0, w0),
+ z0 = svread_ver_za64_m (z0, p0, 0, w0))
+
+/*
+** read_za64_s64_0_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0v\.d\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_w0p1_tied, svint64_t,
+ z0 = svread_ver_za64_s64_m (z0, p0, 0, w0 + 1),
+ z0 = svread_ver_za64_m (z0, p0, 0, w0 + 1))
+
+/*
+** read_za64_s64_0_w0p2_tied:
+** add (w1[2-5]), w0, #?2
+** mova z0\.d, p0/m, za0v\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_w0p2_tied, svint64_t,
+ z0 = svread_ver_za64_s64_m (z0, p0, 0, w0 + 2),
+ z0 = svread_ver_za64_m (z0, p0, 0, w0 + 2))
+
+/*
+** read_za64_s64_0_w0m1_tied:
+** sub (w1[2-5]), w0, #?1
+** mova z0\.d, p0/m, za0v\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_w0m1_tied, svint64_t,
+ z0 = svread_ver_za64_s64_m (z0, p0, 0, w0 - 1),
+ z0 = svread_ver_za64_m (z0, p0, 0, w0 - 1))
+
+/*
+** read_za64_s64_1_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za1v\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_1_w0_tied, svint64_t,
+ z0 = svread_ver_za64_s64_m (z0, p0, 1, w0),
+ z0 = svread_ver_za64_m (z0, p0, 1, w0))
+
+/*
+** read_za64_s64_1_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za1v\.d\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_1_w0p1_tied, svint64_t,
+ z0 = svread_ver_za64_s64_m (z0, p0, 1, w0 + 1),
+ z0 = svread_ver_za64_m (z0, p0, 1, w0 + 1))
+
+/*
+** read_za64_s64_7_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za7v\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_7_w0_tied, svint64_t,
+ z0 = svread_ver_za64_s64_m (z0, p0, 7, w0),
+ z0 = svread_ver_za64_m (z0, p0, 7, w0))
+
+/*
+** read_za64_s64_7_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za7v\.d\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_7_w0p1_tied, svint64_t,
+ z0 = svread_ver_za64_s64_m (z0, p0, 7, w0 + 1),
+ z0 = svread_ver_za64_m (z0, p0, 7, w0 + 1))
+
+/*
+** read_za64_s64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.d, p0/m, za0v\.d\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0v\.d\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.d, p0/m, za0v\.d\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za64_s64_0_w0_untied, svint64_t,
+ z0 = svread_ver_za64_s64_m (z1, p0, 0, w0),
+ z0 = svread_ver_za64_m (z1, p0, 0, w0))
+
+/*
+** read_za64_u64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0v\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_u64_0_w0_tied, svuint64_t,
+ z0 = svread_ver_za64_u64_m (z0, p0, 0, w0),
+ z0 = svread_ver_za64_m (z0, p0, 0, w0))
+
+/*
+** read_za64_u64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.d, p0/m, za0v\.d\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0v\.d\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.d, p0/m, za0v\.d\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za64_u64_0_w0_untied, svuint64_t,
+ z0 = svread_ver_za64_u64_m (z1, p0, 0, w0),
+ z0 = svread_ver_za64_m (z1, p0, 0, w0))
+
+/*
+** read_za64_f64_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0v\.d\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za64_f64_0_w0_tied, svfloat64_t,
+ z0 = svread_ver_za64_f64_m (z0, p0, 0, w0),
+ z0 = svread_ver_za64_m (z0, p0, 0, w0))
+
+/*
+** read_za64_f64_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.d, p0/m, za0v\.d\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.d, p0/m, za0v\.d\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.d, p0/m, za0v\.d\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za64_f64_0_w0_untied, svfloat64_t,
+ z0 = svread_ver_za64_f64_m (z1, p0, 0, w0),
+ z0 = svread_ver_za64_m (z1, p0, 0, w0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za8.c
new file mode 100644
index 0000000..87564d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/read_ver_za8.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** read_za8_s8_0_0_tied:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova z0\.b, p0/m, za0v\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_0_tied, svint8_t,
+ z0 = svread_ver_za8_s8_m (z0, p0, 0, 0),
+ z0 = svread_ver_za8_m (z0, p0, 0, 0))
+
+/*
+** read_za8_s8_0_1_tied:
+** mov (w1[2-5]), #?1
+** mova z0\.b, p0/m, za0v\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_1_tied, svint8_t,
+ z0 = svread_ver_za8_s8_m (z0, p0, 0, 1),
+ z0 = svread_ver_za8_m (z0, p0, 0, 1))
+
+/*
+** read_za8_s8_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0v\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0_tied, svint8_t,
+ z0 = svread_ver_za8_s8_m (z0, p0, 0, w0),
+ z0 = svread_ver_za8_m (z0, p0, 0, w0))
+
+/*
+** read_za8_s8_0_w0p1_tied:
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0v\.b\[\1, 1\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0p1_tied, svint8_t,
+ z0 = svread_ver_za8_s8_m (z0, p0, 0, w0 + 1),
+ z0 = svread_ver_za8_m (z0, p0, 0, w0 + 1))
+
+/*
+** read_za8_s8_0_w0p15_tied:
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0v\.b\[\1, 15\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0p15_tied, svint8_t,
+ z0 = svread_ver_za8_s8_m (z0, p0, 0, w0 + 15),
+ z0 = svread_ver_za8_m (z0, p0, 0, w0 + 15))
+
+/*
+** read_za8_s8_0_w0p16_tied:
+** add (w1[2-5]), w0, #?16
+** mova z0\.b, p0/m, za0v\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0p16_tied, svint8_t,
+ z0 = svread_ver_za8_s8_m (z0, p0, 0, w0 + 16),
+ z0 = svread_ver_za8_m (z0, p0, 0, w0 + 16))
+
+/*
+** read_za8_s8_0_w0m1_tied:
+** sub (w1[2-5]), w0, #?1
+** mova z0\.b, p0/m, za0v\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0m1_tied, svint8_t,
+ z0 = svread_ver_za8_s8_m (z0, p0, 0, w0 - 1),
+ z0 = svread_ver_za8_m (z0, p0, 0, w0 - 1))
+
+/*
+** read_za8_s8_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.b, p0/m, za0v\.b\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0v\.b\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.b, p0/m, za0v\.b\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za8_s8_0_w0_untied, svint8_t,
+ z0 = svread_ver_za8_s8_m (z1, p0, 0, w0),
+ z0 = svread_ver_za8_m (z1, p0, 0, w0))
+
+/*
+** read_za8_u8_0_w0_tied:
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0v\.b\[\1, 0\]
+** ret
+*/
+TEST_READ_ZA (read_za8_u8_0_w0_tied, svuint8_t,
+ z0 = svread_ver_za8_u8_m (z0, p0, 0, w0),
+ z0 = svread_ver_za8_m (z0, p0, 0, w0))
+
+/*
+** read_za8_u8_0_w0_untied:
+** (
+** mov (w1[2-5]), w0
+** mov z0\.d, z1\.d
+** mova z0\.b, p0/m, za0v\.b\[\1, 0\]
+** |
+** mov z0\.d, z1\.d
+** mov (w1[2-5]), w0
+** mova z0\.b, p0/m, za0v\.b\[\2, 0\]
+** |
+** mov (w1[2-5]), w0
+** mova z1\.b, p0/m, za0v\.b\[\3, 0\]
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_READ_ZA (read_za8_u8_0_w0_untied, svuint8_t,
+ z0 = svread_ver_za8_u8_m (z1, p0, 0, w0),
+ z0 = svread_ver_za8_m (z1, p0, 0, w0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_bf16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_bf16.c
new file mode 100644
index 0000000..6507c5a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_bf16.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_bf16_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_m_tied12, svbfloat16_t,
+ z0 = svrevd_bf16_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_bf16_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_m_tied1, svbfloat16_t,
+ z0 = svrevd_bf16_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_bf16_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_m_tied2, svbfloat16_t,
+ z0 = svrevd_bf16_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_bf16_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_m_untied, svbfloat16_t,
+ z0 = svrevd_bf16_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_bf16_z_tied1, svbfloat16_t,
+ z0 = svrevd_bf16_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_bf16_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_z_untied, svbfloat16_t,
+ z0 = svrevd_bf16_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_bf16_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_x_tied1, svbfloat16_t,
+ z0 = svrevd_bf16_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_bf16_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_x_untied, svbfloat16_t,
+ z0 = svrevd_bf16_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f16.c
new file mode 100644
index 0000000..1a2f893
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f16.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_f16_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_m_tied12, svfloat16_t,
+ z0 = svrevd_f16_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_f16_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_m_tied1, svfloat16_t,
+ z0 = svrevd_f16_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_f16_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_m_tied2, svfloat16_t,
+ z0 = svrevd_f16_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_f16_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_m_untied, svfloat16_t,
+ z0 = svrevd_f16_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_f16_z_tied1, svfloat16_t,
+ z0 = svrevd_f16_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_f16_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_z_untied, svfloat16_t,
+ z0 = svrevd_f16_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_f16_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_x_tied1, svfloat16_t,
+ z0 = svrevd_f16_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_f16_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_x_untied, svfloat16_t,
+ z0 = svrevd_f16_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f32.c
new file mode 100644
index 0000000..81c77d5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f32.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_f32_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_m_tied12, svfloat32_t,
+ z0 = svrevd_f32_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_f32_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_m_tied1, svfloat32_t,
+ z0 = svrevd_f32_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_f32_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_m_tied2, svfloat32_t,
+ z0 = svrevd_f32_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_f32_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_m_untied, svfloat32_t,
+ z0 = svrevd_f32_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_f32_z_tied1, svfloat32_t,
+ z0 = svrevd_f32_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_f32_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_z_untied, svfloat32_t,
+ z0 = svrevd_f32_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_f32_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_x_tied1, svfloat32_t,
+ z0 = svrevd_f32_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_f32_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_x_untied, svfloat32_t,
+ z0 = svrevd_f32_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f64.c
new file mode 100644
index 0000000..fce6d65
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_f64.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_f64_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_m_tied12, svfloat64_t,
+ z0 = svrevd_f64_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_f64_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_m_tied1, svfloat64_t,
+ z0 = svrevd_f64_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_f64_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_m_tied2, svfloat64_t,
+ z0 = svrevd_f64_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_f64_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_m_untied, svfloat64_t,
+ z0 = svrevd_f64_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_f64_z_tied1, svfloat64_t,
+ z0 = svrevd_f64_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_f64_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_z_untied, svfloat64_t,
+ z0 = svrevd_f64_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_f64_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_x_tied1, svfloat64_t,
+ z0 = svrevd_f64_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_f64_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_x_untied, svfloat64_t,
+ z0 = svrevd_f64_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s16.c
new file mode 100644
index 0000000..a2eba6a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s16.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_s16_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_m_tied12, svint16_t,
+ z0 = svrevd_s16_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_s16_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_m_tied1, svint16_t,
+ z0 = svrevd_s16_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_s16_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_m_tied2, svint16_t,
+ z0 = svrevd_s16_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_s16_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_m_untied, svint16_t,
+ z0 = svrevd_s16_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_s16_z_tied1, svint16_t,
+ z0 = svrevd_s16_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_s16_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_z_untied, svint16_t,
+ z0 = svrevd_s16_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_s16_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_x_tied1, svint16_t,
+ z0 = svrevd_s16_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_s16_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_x_untied, svint16_t,
+ z0 = svrevd_s16_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s32.c
new file mode 100644
index 0000000..cbc0dc0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s32.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_s32_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_m_tied12, svint32_t,
+ z0 = svrevd_s32_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_s32_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_m_tied1, svint32_t,
+ z0 = svrevd_s32_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_s32_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_m_tied2, svint32_t,
+ z0 = svrevd_s32_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_s32_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_m_untied, svint32_t,
+ z0 = svrevd_s32_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_s32_z_tied1, svint32_t,
+ z0 = svrevd_s32_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_s32_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_z_untied, svint32_t,
+ z0 = svrevd_s32_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_s32_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_x_tied1, svint32_t,
+ z0 = svrevd_s32_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_s32_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_x_untied, svint32_t,
+ z0 = svrevd_s32_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s64.c
new file mode 100644
index 0000000..aa963d3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s64.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_s64_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_m_tied12, svint64_t,
+ z0 = svrevd_s64_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_s64_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_m_tied1, svint64_t,
+ z0 = svrevd_s64_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_s64_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_m_tied2, svint64_t,
+ z0 = svrevd_s64_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_s64_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_m_untied, svint64_t,
+ z0 = svrevd_s64_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_s64_z_tied1, svint64_t,
+ z0 = svrevd_s64_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_s64_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_z_untied, svint64_t,
+ z0 = svrevd_s64_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_s64_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_x_tied1, svint64_t,
+ z0 = svrevd_s64_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_s64_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_x_untied, svint64_t,
+ z0 = svrevd_s64_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s8.c
new file mode 100644
index 0000000..4291b71
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_s8.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_s8_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_m_tied12, svint8_t,
+ z0 = svrevd_s8_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_s8_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_m_tied1, svint8_t,
+ z0 = svrevd_s8_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_s8_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_m_tied2, svint8_t,
+ z0 = svrevd_s8_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_s8_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_m_untied, svint8_t,
+ z0 = svrevd_s8_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_s8_z_tied1, svint8_t,
+ z0 = svrevd_s8_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_s8_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_z_untied, svint8_t,
+ z0 = svrevd_s8_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_s8_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_x_tied1, svint8_t,
+ z0 = svrevd_s8_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_s8_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_x_untied, svint8_t,
+ z0 = svrevd_s8_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u16.c
new file mode 100644
index 0000000..eaed0d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u16.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_u16_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_m_tied12, svuint16_t,
+ z0 = svrevd_u16_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_u16_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_m_tied1, svuint16_t,
+ z0 = svrevd_u16_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_u16_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_m_tied2, svuint16_t,
+ z0 = svrevd_u16_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_u16_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_m_untied, svuint16_t,
+ z0 = svrevd_u16_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_u16_z_tied1, svuint16_t,
+ z0 = svrevd_u16_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_u16_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_z_untied, svuint16_t,
+ z0 = svrevd_u16_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_u16_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_x_tied1, svuint16_t,
+ z0 = svrevd_u16_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_u16_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_x_untied, svuint16_t,
+ z0 = svrevd_u16_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u32.c
new file mode 100644
index 0000000..3b76c70
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u32.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_u32_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_m_tied12, svuint32_t,
+ z0 = svrevd_u32_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_u32_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_m_tied1, svuint32_t,
+ z0 = svrevd_u32_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_u32_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_m_tied2, svuint32_t,
+ z0 = svrevd_u32_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_u32_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_m_untied, svuint32_t,
+ z0 = svrevd_u32_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_u32_z_tied1, svuint32_t,
+ z0 = svrevd_u32_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_u32_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_z_untied, svuint32_t,
+ z0 = svrevd_u32_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_u32_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_x_tied1, svuint32_t,
+ z0 = svrevd_u32_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_u32_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_x_untied, svuint32_t,
+ z0 = svrevd_u32_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u64.c
new file mode 100644
index 0000000..4589c46
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u64.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_u64_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_m_tied12, svuint64_t,
+ z0 = svrevd_u64_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_u64_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_m_tied1, svuint64_t,
+ z0 = svrevd_u64_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_u64_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_m_tied2, svuint64_t,
+ z0 = svrevd_u64_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_u64_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_m_untied, svuint64_t,
+ z0 = svrevd_u64_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_u64_z_tied1, svuint64_t,
+ z0 = svrevd_u64_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_u64_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_z_untied, svuint64_t,
+ z0 = svrevd_u64_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_u64_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_x_tied1, svuint64_t,
+ z0 = svrevd_u64_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_u64_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_x_untied, svuint64_t,
+ z0 = svrevd_u64_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u8.c
new file mode 100644
index 0000000..ac5d749
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/revd_u8.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** revd_u8_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_m_tied12, svuint8_t,
+ z0 = svrevd_u8_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_u8_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_m_tied1, svuint8_t,
+ z0 = svrevd_u8_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_u8_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_m_tied2, svuint8_t,
+ z0 = svrevd_u8_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_u8_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_m_untied, svuint8_t,
+ z0 = svrevd_u8_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_u8_z_tied1, svuint8_t,
+ z0 = svrevd_u8_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_u8_z_untied:
+** mov z0\.[bhsd], #0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_z_untied, svuint8_t,
+ z0 = svrevd_u8_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_u8_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_x_tied1, svuint8_t,
+ z0 = svrevd_u8_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_u8_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_x_untied, svuint8_t,
+ z0 = svrevd_u8_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za128.c
new file mode 100644
index 0000000..057b6f2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za128.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_vnum_za128_0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1q { za0h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_0_0_0,
+ svst1_hor_vnum_za128 (0, 0, p0, x1, 0),
+ svst1_hor_vnum_za128 (0, 0, p0, x1, 0))
+
+/*
+** st1_vnum_za128_7_1_0:
+** mov (w1[2-5]), #?1
+** st1q { za7h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_7_1_0,
+ svst1_hor_vnum_za128 (7, 1, p0, x1, 0),
+ svst1_hor_vnum_za128 (7, 1, p0, x1, 0))
+
+/*
+** st1_vnum_za128_11_1_5:
+** incb x1, all, mul #5
+** mov (w1[2-5]), #?6
+** st1q { za11h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_11_1_5,
+ svst1_hor_vnum_za128 (11, 1, p0, x1, 5),
+ svst1_hor_vnum_za128 (11, 1, p0, x1, 5))
+
+/*
+** st1_vnum_za128_3_w0_0:
+** mov (w1[2-5]), w0
+** st1q { za3h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_3_w0_0,
+ svst1_hor_vnum_za128 (3, w0, p0, x1, 0),
+ svst1_hor_vnum_za128 (3, w0, p0, x1, 0))
+
+/*
+** st1_vnum_za128_5_w0_0:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** st1q { za5h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_5_w0_0,
+ svst1_hor_vnum_za128 (5, w0, p0, x1, 13),
+ svst1_hor_vnum_za128 (5, w0, p0, x1, 13))
+
+/*
+** st1_vnum_za128_11_w0_0:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** st1q { za11h\.q\[\3, 0\] }, p0, \[\2\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_11_w0_0,
+ svst1_hor_vnum_za128 (11, w0, p0, x1, x2),
+ svst1_hor_vnum_za128 (11, w0, p0, x1, x2))
+
+/*
+** st1_vnum_za128_15_w0p1_0:
+** add (w1[2-5]), w0, #?1
+** st1q { za15h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_15_w0p1_0,
+ svst1_hor_vnum_za128 (15, w0 + 1, p0, x1, 0),
+ svst1_hor_vnum_za128 (15, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za16.c
new file mode 100644
index 0000000..0b57dda
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za16.c
@@ -0,0 +1,123 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_vnum_za16_1_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1h { za1h\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_1_0_1,
+ svst1_hor_vnum_za16 (1, 0, p0, x1, 1),
+ svst1_hor_vnum_za16 (1, 0, p0, x1, 1))
+
+/*
+** st1_vnum_za16_1_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** st1h { za1h\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_1_1_1,
+ svst1_hor_vnum_za16 (1, 1, p0, x1, 1),
+ svst1_hor_vnum_za16 (1, 1, p0, x1, 1))
+
+/*
+** st1_vnum_za16_0_0_8:
+** incb x1, all, mul #8
+** mov (w1[2-5]), #?8
+** st1h { za0h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_0_8,
+ svst1_hor_vnum_za16 (0, 0, p0, x1, 8),
+ svst1_hor_vnum_za16 (0, 0, p0, x1, 8))
+
+/*
+** st1_vnum_za16_0_1_8:
+** incb x1, all, mul #8
+** mov (w1[2-5]), #?9
+** st1h { za0h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_1_8,
+ svst1_hor_vnum_za16 (0, 1, p0, x1, 8),
+ svst1_hor_vnum_za16 (0, 1, p0, x1, 8))
+
+/*
+** st1_vnum_za16_0_w0_0:
+** mov (w1[2-5]), w0
+** st1h { za0h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_w0_0,
+ svst1_hor_vnum_za16 (0, w0, p0, x1, 0),
+ svst1_hor_vnum_za16 (0, w0, p0, x1, 0))
+
+/*
+** st1_vnum_za16_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** st1h { za0h\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_w0_1,
+ svst1_hor_vnum_za16 (0, w0, p0, x1, 1),
+ svst1_hor_vnum_za16 (0, w0, p0, x1, 1))
+
+/*
+** st1_vnum_za16_0_w0_7:
+** incb x1, all, mul #7
+** mov (w1[2-5]), w0
+** st1h { za0h\.h\[\1, 7\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_w0_7,
+ svst1_hor_vnum_za16 (0, w0, p0, x1, 7),
+ svst1_hor_vnum_za16 (0, w0, p0, x1, 7))
+
+/*
+** st1_vnum_za16_1_w0_8:
+** incb x1, all, mul #8
+** add (w1[2-5]), w0, #?8
+** st1h { za1h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_1_w0_8,
+ svst1_hor_vnum_za16 (1, w0, p0, x1, 8),
+ svst1_hor_vnum_za16 (1, w0, p0, x1, 8))
+
+/*
+** st1_vnum_za16_1_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** st1h { za1h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_1_w0_13,
+ svst1_hor_vnum_za16 (1, w0, p0, x1, 13),
+ svst1_hor_vnum_za16 (1, w0, p0, x1, 13))
+
+/*
+** st1_vnum_za16_0_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** st1h { za0h\.h\[\3, 0\] }, p0, \[\2\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_w0_x2,
+ svst1_hor_vnum_za16 (0, w0, p0, x1, x2),
+ svst1_hor_vnum_za16 (0, w0, p0, x1, x2))
+
+/*
+** st1_vnum_za16_1_w0p1_0:
+** mov (w1[2-5]), w0
+** st1h { za1h\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_1_w0p1_0,
+ svst1_hor_vnum_za16 (1, w0 + 1, p0, x1, 0),
+ svst1_hor_vnum_za16 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za32.c
new file mode 100644
index 0000000..d438118
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za32.c
@@ -0,0 +1,123 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_vnum_za32_3_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1w { za3h\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_3_0_1,
+ svst1_hor_vnum_za32 (3, 0, p0, x1, 1),
+ svst1_hor_vnum_za32 (3, 0, p0, x1, 1))
+
+/*
+** st1_vnum_za32_2_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** st1w { za2h\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_2_1_1,
+ svst1_hor_vnum_za32 (2, 1, p0, x1, 1),
+ svst1_hor_vnum_za32 (2, 1, p0, x1, 1))
+
+/*
+** st1_vnum_za32_0_0_4:
+** incb x1, all, mul #4
+** mov (w1[2-5]), #?4
+** st1w { za0h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_0_0_4,
+ svst1_hor_vnum_za32 (0, 0, p0, x1, 4),
+ svst1_hor_vnum_za32 (0, 0, p0, x1, 4))
+
+/*
+** st1_vnum_za32_2_1_4:
+** incb x1, all, mul #4
+** mov (w1[2-5]), #?5
+** st1w { za2h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_2_1_4,
+ svst1_hor_vnum_za32 (2, 1, p0, x1, 4),
+ svst1_hor_vnum_za32 (2, 1, p0, x1, 4))
+
+/*
+** st1_vnum_za32_0_w0_0:
+** mov (w1[2-5]), w0
+** st1w { za0h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_0_w0_0,
+ svst1_hor_vnum_za32 (0, w0, p0, x1, 0),
+ svst1_hor_vnum_za32 (0, w0, p0, x1, 0))
+
+/*
+** st1_vnum_za32_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** st1w { za0h\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_0_w0_1,
+ svst1_hor_vnum_za32 (0, w0, p0, x1, 1),
+ svst1_hor_vnum_za32 (0, w0, p0, x1, 1))
+
+/*
+** st1_vnum_za32_0_w0_3:
+** incb x1, all, mul #3
+** mov (w1[2-5]), w0
+** st1w { za0h\.s\[\1, 3\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_0_w0_3,
+ svst1_hor_vnum_za32 (0, w0, p0, x1, 3),
+ svst1_hor_vnum_za32 (0, w0, p0, x1, 3))
+
+/*
+** st1_vnum_za32_1_w0_4:
+** incb x1, all, mul #4
+** add (w1[2-5]), w0, #?4
+** st1w { za1h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_1_w0_4,
+ svst1_hor_vnum_za32 (1, w0, p0, x1, 4),
+ svst1_hor_vnum_za32 (1, w0, p0, x1, 4))
+
+/*
+** st1_vnum_za32_3_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** st1w { za3h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_3_w0_13,
+ svst1_hor_vnum_za32 (3, w0, p0, x1, 13),
+ svst1_hor_vnum_za32 (3, w0, p0, x1, 13))
+
+/*
+** st1_vnum_za32_0_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** st1w { za0h\.s\[\3, 0\] }, p0, \[\2\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_0_w0_x2,
+ svst1_hor_vnum_za32 (0, w0, p0, x1, x2),
+ svst1_hor_vnum_za32 (0, w0, p0, x1, x2))
+
+/*
+** st1_vnum_za32_1_w0p1_0:
+** mov (w1[2-5]), w0
+** st1w { za1h\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_1_w0p1_0,
+ svst1_hor_vnum_za32 (1, w0 + 1, p0, x1, 0),
+ svst1_hor_vnum_za32 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za64.c
new file mode 100644
index 0000000..be60637
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za64.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_vnum_za64_3_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1d { za3h\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_3_0_1,
+ svst1_hor_vnum_za64 (3, 0, p0, x1, 1),
+ svst1_hor_vnum_za64 (3, 0, p0, x1, 1))
+
+/*
+** st1_vnum_za64_7_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** st1d { za7h\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_7_1_1,
+ svst1_hor_vnum_za64 (7, 1, p0, x1, 1),
+ svst1_hor_vnum_za64 (7, 1, p0, x1, 1))
+
+/*
+** st1_vnum_za64_0_0_2:
+** incb x1, all, mul #2
+** mov (w1[2-5]), #?2
+** st1d { za0h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_0_0_2,
+ svst1_hor_vnum_za64 (0, 0, p0, x1, 2),
+ svst1_hor_vnum_za64 (0, 0, p0, x1, 2))
+
+/*
+** st1_vnum_za64_5_1_2:
+** incb x1, all, mul #2
+** mov (w1[2-5]), #?3
+** st1d { za5h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_5_1_2,
+ svst1_hor_vnum_za64 (5, 1, p0, x1, 2),
+ svst1_hor_vnum_za64 (5, 1, p0, x1, 2))
+
+/*
+** st1_vnum_za64_0_w0_0:
+** mov (w1[2-5]), w0
+** st1d { za0h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_0_w0_0,
+ svst1_hor_vnum_za64 (0, w0, p0, x1, 0),
+ svst1_hor_vnum_za64 (0, w0, p0, x1, 0))
+
+/*
+** st1_vnum_za64_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** st1d { za0h\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_0_w0_1,
+ svst1_hor_vnum_za64 (0, w0, p0, x1, 1),
+ svst1_hor_vnum_za64 (0, w0, p0, x1, 1))
+
+/*
+** st1_vnum_za64_6_w0_2:
+** incb x1, all, mul #2
+** add (w1[2-5]), w0, #?2
+** st1d { za6h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_6_w0_2,
+ svst1_hor_vnum_za64 (6, w0, p0, x1, 2),
+ svst1_hor_vnum_za64 (6, w0, p0, x1, 2))
+
+/*
+** st1_vnum_za64_2_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** st1d { za2h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_2_w0_13,
+ svst1_hor_vnum_za64 (2, w0, p0, x1, 13),
+ svst1_hor_vnum_za64 (2, w0, p0, x1, 13))
+
+/*
+** st1_vnum_za64_4_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** st1d { za4h\.d\[\3, 0\] }, p0, \[\2\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_4_w0_x2,
+ svst1_hor_vnum_za64 (4, w0, p0, x1, x2),
+ svst1_hor_vnum_za64 (4, w0, p0, x1, x2))
+
+/*
+** st1_vnum_za64_1_w0p1_0:
+** mov (w1[2-5]), w0
+** st1d { za1h\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_1_w0p1_0,
+ svst1_hor_vnum_za64 (1, w0 + 1, p0, x1, 0),
+ svst1_hor_vnum_za64 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za8.c
new file mode 100644
index 0000000..eed41d2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_vnum_za8.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_vnum_za8_0_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1b { za0h\.b\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_0_1,
+ svst1_hor_vnum_za8 (0, 0, p0, x1, 1),
+ svst1_hor_vnum_za8 (0, 0, p0, x1, 1))
+
+/*
+** st1_vnum_za8_0_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** st1b { za0h\.b\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_1_1,
+ svst1_hor_vnum_za8 (0, 1, p0, x1, 1),
+ svst1_hor_vnum_za8 (0, 1, p0, x1, 1))
+
+/*
+** st1_vnum_za8_0_0_16:
+** incb x1, all, mul #16
+** mov (w1[2-5]), #?16
+** st1b { za0h\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_0_16,
+ svst1_hor_vnum_za8 (0, 0, p0, x1, 16),
+ svst1_hor_vnum_za8 (0, 0, p0, x1, 16))
+
+/*
+** st1_vnum_za8_0_1_16:
+** incb x1, all, mul #16
+** mov (w1[2-5]), #?17
+** st1b { za0h\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_1_16,
+ svst1_hor_vnum_za8 (0, 1, p0, x1, 16),
+ svst1_hor_vnum_za8 (0, 1, p0, x1, 16))
+
+/*
+** st1_vnum_za8_0_w0_0:
+** mov (w1[2-5]), w0
+** st1b { za0h\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0_0,
+ svst1_hor_vnum_za8 (0, w0, p0, x1, 0),
+ svst1_hor_vnum_za8 (0, w0, p0, x1, 0))
+
+/*
+** st1_vnum_za8_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** st1b { za0h\.b\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0_1,
+ svst1_hor_vnum_za8 (0, w0, p0, x1, 1),
+ svst1_hor_vnum_za8 (0, w0, p0, x1, 1))
+
+/*
+** st1_vnum_za8_0_w0_15:
+** incb x1, all, mul #15
+** mov (w1[2-5]), w0
+** st1b { za0h\.b\[\1, 15\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0_15,
+ svst1_hor_vnum_za8 (0, w0, p0, x1, 15),
+ svst1_hor_vnum_za8 (0, w0, p0, x1, 15))
+
+/*
+** st1_vnum_za8_0_w0_16:
+** incb x1, all, mul #16
+** add (w1[2-5]), w0, #?16
+** st1b { za0h\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0_16,
+ svst1_hor_vnum_za8 (0, w0, p0, x1, 16),
+ svst1_hor_vnum_za8 (0, w0, p0, x1, 16))
+
+/*
+** st1_vnum_za8_0_w0_x2:
+** cntb (x[0-9]+)
+** mul (x[0-9]+), (?:\1, x2|x2, \1)
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** st1b { za0h\.b\[\3, 0\] }, p0, \[x1, \2\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0_x2,
+ svst1_hor_vnum_za8 (0, w0, p0, x1, x2),
+ svst1_hor_vnum_za8 (0, w0, p0, x1, x2))
+
+/*
+** st1_vnum_za8_0_w0p1_0:
+** mov (w1[2-5]), w0
+** st1b { za0h\.b\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0p1_0,
+ svst1_hor_vnum_za8 (0, w0 + 1, p0, x1, 0),
+ svst1_hor_vnum_za8 (0, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za128.c
new file mode 100644
index 0000000..5f3d613
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za128.c
@@ -0,0 +1,83 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_za128_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1q { za0h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_0_0,
+ svst1_hor_za128 (0, 0, p0, x1),
+ svst1_hor_za128 (0, 0, p0, x1))
+
+/*
+** st1_za128_0_1:
+** mov (w1[2-5]), #?1
+** st1q { za0h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_0_1,
+ svst1_hor_za128 (0, 1, p0, x1),
+ svst1_hor_za128 (0, 1, p0, x1))
+
+/*
+** st1_za128_0_w0:
+** mov (w1[2-5]), w0
+** st1q { za0h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_0_w0,
+ svst1_hor_za128 (0, w0, p0, x1),
+ svst1_hor_za128 (0, w0, p0, x1))
+
+/*
+** st1_za128_0_w0_p1:
+** add (w1[2-5]), w0, #?1
+** st1q { za0h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_0_w0_p1,
+ svst1_hor_za128 (0, w0 + 1, p0, x1),
+ svst1_hor_za128 (0, w0 + 1, p0, x1))
+
+/*
+** st1_za128_7_w0:
+** mov (w1[2-5]), w0
+** st1q { za7h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_7_w0,
+ svst1_hor_za128 (7, w0, p0, x1),
+ svst1_hor_za128 (7, w0, p0, x1))
+
+/*
+** st1_za128_13_w0:
+** mov (w1[2-5]), w0
+** st1q { za13h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_13_w0,
+ svst1_hor_za128 (13, w0, p0, x1),
+ svst1_hor_za128 (13, w0, p0, x1))
+
+/*
+** st1_za128_15_w0:
+** mov (w1[2-5]), w0
+** st1q { za15h\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_15_w0,
+ svst1_hor_za128 (15, w0, p0, x1),
+ svst1_hor_za128 (15, w0, p0, x1))
+
+/*
+** st1_za128_9_w0_index:
+** mov (w1[2-5]), w0
+** st1q { za9h\.q\[\1, 0\] }, p0, \[x1, x2, lsl #?4\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_9_w0_index,
+ svst1_hor_za128 (9, w0, p0, x1 + x2 * 16),
+ svst1_hor_za128 (9, w0, p0, x1 + x2 * 16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za16.c
new file mode 100644
index 0000000..206306b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za16.c
@@ -0,0 +1,126 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_za16_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1h { za0h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_0,
+ svst1_hor_za16 (0, 0, p0, x1),
+ svst1_hor_za16 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 7. */
+/*
+** st1_za16_0_7:
+** mov (w1[2-5]), #?7
+** st1h { za0h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_7,
+ svst1_hor_za16 (0, 7, p0, x1),
+ svst1_hor_za16 (0, 7, p0, x1))
+
+/*
+** st1_za16_0_8:
+** mov (w1[2-5]), #?8
+** st1h { za0h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_8,
+ svst1_hor_za16 (0, 8, p0, x1),
+ svst1_hor_za16 (0, 8, p0, x1))
+
+/*
+** st1_za16_0_w0:
+** mov (w1[2-5]), w0
+** st1h { za0h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_w0,
+ svst1_hor_za16 (0, w0, p0, x1),
+ svst1_hor_za16 (0, w0, p0, x1))
+
+/*
+** st1_za16_0_w0_p1:
+** mov (w1[2-5]), w0
+** st1h { za0h\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_w0_p1,
+ svst1_hor_za16 (0, w0 + 1, p0, x1),
+ svst1_hor_za16 (0, w0 + 1, p0, x1))
+
+/*
+** st1_za16_0_w0_p7:
+** mov (w1[2-5]), w0
+** st1h { za0h\.h\[\1, 7\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_w0_p7,
+ svst1_hor_za16 (0, w0 + 7, p0, x1),
+ svst1_hor_za16 (0, w0 + 7, p0, x1))
+
+/*
+** st1_za16_1_w0:
+** mov (w1[2-5]), w0
+** st1h { za1h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_1_w0,
+ svst1_hor_za16 (1, w0, p0, x1),
+ svst1_hor_za16 (1, w0, p0, x1))
+
+
+/*
+** st1_za16_1_w0_p1:
+** mov (w1[2-5]), w0
+** st1h { za1h\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_1_w0_p1,
+ svst1_hor_za16 (1, w0 + 1, p0, x1),
+ svst1_hor_za16 (1, w0 + 1, p0, x1))
+
+/*
+** st1_za16_1_w0_p7:
+** mov (w1[2-5]), w0
+** st1h { za1h\.h\[\1, 7\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_1_w0_p7,
+ svst1_hor_za16 (1, w0 + 7, p0, x1),
+ svst1_hor_za16 (1, w0 + 7, p0, x1))
+
+/*
+** st1_za16_1_w0_p5_index:
+** mov (w1[2-5]), w0
+** st1h { za1h\.h\[\1, 5\] }, p0, \[x1, x2, lsl #?1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_1_w0_p5_index,
+ svst1_hor_za16 (1, w0 + 5, p0, x1 + x2 * 2),
+ svst1_hor_za16 (1, w0 + 5, p0, x1 + x2 * 2))
+
+/*
+** st1_za16_0_w0_p8:
+** add (w1[2-5]), w0, #?8
+** st1h { za0h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_w0_p8,
+ svst1_hor_za16 (0, w0 + 8, p0, x1),
+ svst1_hor_za16 (0, w0 + 8, p0, x1))
+
+/*
+** st1_za16_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** st1h { za0h\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_w0_m1,
+ svst1_hor_za16 (0, w0 - 1, p0, x1),
+ svst1_hor_za16 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za32.c
new file mode 100644
index 0000000..ed9b2b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za32.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_za32_0_0:
+** mov (w1[2-5]), (?:w0|#?0)
+** st1w { za0h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_0,
+ svst1_hor_za32 (0, 0, p0, x1),
+ svst1_hor_za32 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 3. */
+/*
+** st1_za32_0_3:
+** mov (w1[2-5]), #?3
+** st1w { za0h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_3,
+ svst1_hor_za32 (0, 3, p0, x1),
+ svst1_hor_za32 (0, 3, p0, x1))
+
+/*
+** st1_za32_0_4:
+** mov (w1[2-5]), #?4
+** st1w { za0h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_4,
+ svst1_hor_za32 (0, 4, p0, x1),
+ svst1_hor_za32 (0, 4, p0, x1))
+
+/*
+** st1_za32_0_w0:
+** mov (w1[2-5]), w0
+** st1w { za0h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_w0,
+ svst1_hor_za32 (0, w0, p0, x1),
+ svst1_hor_za32 (0, w0, p0, x1))
+
+/*
+** st1_za32_0_w0_p1:
+** mov (w1[2-5]), w0
+** st1w { za0h\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_w0_p1,
+ svst1_hor_za32 (0, w0 + 1, p0, x1),
+ svst1_hor_za32 (0, w0 + 1, p0, x1))
+
+/*
+** st1_za32_0_w0_p3:
+** mov (w1[2-5]), w0
+** st1w { za0h\.s\[\1, 3\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_w0_p3,
+ svst1_hor_za32 (0, w0 + 3, p0, x1),
+ svst1_hor_za32 (0, w0 + 3, p0, x1))
+
+/*
+** st1_za32_3_w0:
+** mov (w1[2-5]), w0
+** st1w { za3h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_3_w0,
+ svst1_hor_za32 (3, w0, p0, x1),
+ svst1_hor_za32 (3, w0, p0, x1))
+
+/*
+** st1_za32_3_w0_p1:
+** mov (w1[2-5]), w0
+** st1w { za3h\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_3_w0_p1,
+ svst1_hor_za32 (3, w0 + 1, p0, x1),
+ svst1_hor_za32 (3, w0 + 1, p0, x1))
+
+/*
+** st1_za32_3_w0_p3:
+** mov (w1[2-5]), w0
+** st1w { za3h\.s\[\1, 3\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_3_w0_p3,
+ svst1_hor_za32 (3, w0 + 3, p0, x1),
+ svst1_hor_za32 (3, w0 + 3, p0, x1))
+
+/*
+** st1_za32_1_w0_p2_index:
+** mov (w1[2-5]), w0
+** st1w { za1h\.s\[\1, 2\] }, p0, \[x1, x2, lsl #?2\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_1_w0_p2_index,
+ svst1_hor_za32 (1, w0 + 2, p0, x1 + x2 * 4),
+ svst1_hor_za32 (1, w0 + 2, p0, x1 + x2 * 4))
+
+/*
+** st1_za32_0_w0_p4:
+** add (w1[2-5]), w0, #?4
+** st1w { za0h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_w0_p4,
+ svst1_hor_za32 (0, w0 + 4, p0, x1),
+ svst1_hor_za32 (0, w0 + 4, p0, x1))
+
+/*
+** st1_za32_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** st1w { za0h\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_w0_m1,
+ svst1_hor_za32 (0, w0 - 1, p0, x1),
+ svst1_hor_za32 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za64.c
new file mode 100644
index 0000000..3600f5b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za64.c
@@ -0,0 +1,105 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_za64_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1d { za0h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_0,
+ svst1_hor_za64 (0, 0, p0, x1),
+ svst1_hor_za64 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 1. */
+/*
+** st1_za64_0_1:
+** mov (w1[2-5]), #?1
+** st1d { za0h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_1,
+ svst1_hor_za64 (0, 1, p0, x1),
+ svst1_hor_za64 (0, 1, p0, x1))
+
+/*
+** st1_za64_0_2:
+** mov (w1[2-5]), #?2
+** st1d { za0h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_2,
+ svst1_hor_za64 (0, 2, p0, x1),
+ svst1_hor_za64 (0, 2, p0, x1))
+
+/*
+** st1_za64_0_w0:
+** mov (w1[2-5]), w0
+** st1d { za0h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_w0,
+ svst1_hor_za64 (0, w0, p0, x1),
+ svst1_hor_za64 (0, w0, p0, x1))
+
+/*
+** st1_za64_0_w0_p1:
+** mov (w1[2-5]), w0
+** st1d { za0h\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_w0_p1,
+ svst1_hor_za64 (0, w0 + 1, p0, x1),
+ svst1_hor_za64 (0, w0 + 1, p0, x1))
+
+/*
+** st1_za64_7_w0:
+** mov (w1[2-5]), w0
+** st1d { za7h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_7_w0,
+ svst1_hor_za64 (7, w0, p0, x1),
+ svst1_hor_za64 (7, w0, p0, x1))
+
+/*
+** st1_za64_7_w0_p1:
+** mov (w1[2-5]), w0
+** st1d { za7h\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_7_w0_p1,
+ svst1_hor_za64 (7, w0 + 1, p0, x1),
+ svst1_hor_za64 (7, w0 + 1, p0, x1))
+
+/*
+** st1_za64_5_w0_p1_index:
+** mov (w1[2-5]), w0
+** st1d { za5h\.d\[\1, 1\] }, p0, \[x1, x2, lsl #?3\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_5_w0_p1_index,
+ svst1_hor_za64 (5, w0 + 1, p0, x1 + x2 * 8),
+ svst1_hor_za64 (5, w0 + 1, p0, x1 + x2 * 8))
+
+/*
+** st1_za64_0_w0_p2:
+** add (w1[2-5]), w0, #?2
+** st1d { za0h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_w0_p2,
+ svst1_hor_za64 (0, w0 + 2, p0, x1),
+ svst1_hor_za64 (0, w0 + 2, p0, x1))
+
+/*
+** st1_za64_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** st1d { za0h\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_w0_m1,
+ svst1_hor_za64 (0, w0 - 1, p0, x1),
+ svst1_hor_za64 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za8.c
new file mode 100644
index 0000000..9026fae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_hor_za8.c
@@ -0,0 +1,95 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_za8_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1b { za0h\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_0,
+ svst1_hor_za8 (0, 0, p0, x1),
+ svst1_hor_za8 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 15. */
+/*
+** st1_za8_0_15:
+** mov (w1[2-5]), #?15
+** st1b { za0h\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_15,
+ svst1_hor_za8 (0, 15, p0, x1),
+ svst1_hor_za8 (0, 15, p0, x1))
+
+/*
+** st1_za8_0_16:
+** mov (w1[2-5]), #?16
+** st1b { za0h\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_16,
+ svst1_hor_za8 (0, 16, p0, x1),
+ svst1_hor_za8 (0, 16, p0, x1))
+
+/*
+** st1_za8_0_w0:
+** mov (w1[2-5]), w0
+** st1b { za0h\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0,
+ svst1_hor_za8 (0, w0, p0, x1),
+ svst1_hor_za8 (0, w0, p0, x1))
+
+/*
+** st1_za8_0_w0_p1:
+** mov (w1[2-5]), w0
+** st1b { za0h\.b\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0_p1,
+ svst1_hor_za8 (0, w0 + 1, p0, x1),
+ svst1_hor_za8 (0, w0 + 1, p0, x1))
+
+/*
+** st1_za8_0_w0_p15:
+** mov (w1[2-5]), w0
+** st1b { za0h\.b\[\1, 15\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0_p15,
+ svst1_hor_za8 (0, w0 + 15, p0, x1),
+ svst1_hor_za8 (0, w0 + 15, p0, x1))
+
+/*
+** st1_za8_0_w0_p13_index:
+** mov (w1[2-5]), w0
+** st1b { za0h\.b\[\1, 15\] }, p0, \[x1, x2\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0_p13_index,
+ svst1_hor_za8 (0, w0 + 15, p0, x1 + x2),
+ svst1_hor_za8 (0, w0 + 15, p0, x1 + x2))
+
+/*
+** st1_za8_0_w0_p16:
+** add (w1[2-5]), w0, #?16
+** st1b { za0h\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0_p16,
+ svst1_hor_za8 (0, w0 + 16, p0, x1),
+ svst1_hor_za8 (0, w0 + 16, p0, x1))
+
+/*
+** st1_za8_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** st1b { za0h\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0_m1,
+ svst1_hor_za8 (0, w0 - 1, p0, x1),
+ svst1_hor_za8 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za128.c
new file mode 100644
index 0000000..210687a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za128.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_vnum_za128_0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1q { za0v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_0_0_0,
+ svst1_ver_vnum_za128 (0, 0, p0, x1, 0),
+ svst1_ver_vnum_za128 (0, 0, p0, x1, 0))
+
+/*
+** st1_vnum_za128_7_1_0:
+** mov (w1[2-5]), #?1
+** st1q { za7v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_7_1_0,
+ svst1_ver_vnum_za128 (7, 1, p0, x1, 0),
+ svst1_ver_vnum_za128 (7, 1, p0, x1, 0))
+
+/*
+** st1_vnum_za128_11_1_5:
+** incb x1, all, mul #5
+** mov (w1[2-5]), #?6
+** st1q { za11v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_11_1_5,
+ svst1_ver_vnum_za128 (11, 1, p0, x1, 5),
+ svst1_ver_vnum_za128 (11, 1, p0, x1, 5))
+
+/*
+** st1_vnum_za128_3_w0_0:
+** mov (w1[2-5]), w0
+** st1q { za3v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_3_w0_0,
+ svst1_ver_vnum_za128 (3, w0, p0, x1, 0),
+ svst1_ver_vnum_za128 (3, w0, p0, x1, 0))
+
+/*
+** st1_vnum_za128_5_w0_0:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** st1q { za5v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_5_w0_0,
+ svst1_ver_vnum_za128 (5, w0, p0, x1, 13),
+ svst1_ver_vnum_za128 (5, w0, p0, x1, 13))
+
+/*
+** st1_vnum_za128_11_w0_0:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** st1q { za11v\.q\[\3, 0\] }, p0, \[\2\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_11_w0_0,
+ svst1_ver_vnum_za128 (11, w0, p0, x1, x2),
+ svst1_ver_vnum_za128 (11, w0, p0, x1, x2))
+
+/*
+** st1_vnum_za128_15_w0p1_0:
+** add (w1[2-5]), w0, #?1
+** st1q { za15v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za128_15_w0p1_0,
+ svst1_ver_vnum_za128 (15, w0 + 1, p0, x1, 0),
+ svst1_ver_vnum_za128 (15, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za16.c
new file mode 100644
index 0000000..f75a224
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za16.c
@@ -0,0 +1,123 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_vnum_za16_1_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1h { za1v\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_1_0_1,
+ svst1_ver_vnum_za16 (1, 0, p0, x1, 1),
+ svst1_ver_vnum_za16 (1, 0, p0, x1, 1))
+
+/*
+** st1_vnum_za16_1_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** st1h { za1v\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_1_1_1,
+ svst1_ver_vnum_za16 (1, 1, p0, x1, 1),
+ svst1_ver_vnum_za16 (1, 1, p0, x1, 1))
+
+/*
+** st1_vnum_za16_0_0_8:
+** incb x1, all, mul #8
+** mov (w1[2-5]), #?8
+** st1h { za0v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_0_8,
+ svst1_ver_vnum_za16 (0, 0, p0, x1, 8),
+ svst1_ver_vnum_za16 (0, 0, p0, x1, 8))
+
+/*
+** st1_vnum_za16_0_1_8:
+** incb x1, all, mul #8
+** mov (w1[2-5]), #?9
+** st1h { za0v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_1_8,
+ svst1_ver_vnum_za16 (0, 1, p0, x1, 8),
+ svst1_ver_vnum_za16 (0, 1, p0, x1, 8))
+
+/*
+** st1_vnum_za16_0_w0_0:
+** mov (w1[2-5]), w0
+** st1h { za0v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_w0_0,
+ svst1_ver_vnum_za16 (0, w0, p0, x1, 0),
+ svst1_ver_vnum_za16 (0, w0, p0, x1, 0))
+
+/*
+** st1_vnum_za16_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** st1h { za0v\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_w0_1,
+ svst1_ver_vnum_za16 (0, w0, p0, x1, 1),
+ svst1_ver_vnum_za16 (0, w0, p0, x1, 1))
+
+/*
+** st1_vnum_za16_0_w0_7:
+** incb x1, all, mul #7
+** mov (w1[2-5]), w0
+** st1h { za0v\.h\[\1, 7\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_w0_7,
+ svst1_ver_vnum_za16 (0, w0, p0, x1, 7),
+ svst1_ver_vnum_za16 (0, w0, p0, x1, 7))
+
+/*
+** st1_vnum_za16_1_w0_8:
+** incb x1, all, mul #8
+** add (w1[2-5]), w0, #?8
+** st1h { za1v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_1_w0_8,
+ svst1_ver_vnum_za16 (1, w0, p0, x1, 8),
+ svst1_ver_vnum_za16 (1, w0, p0, x1, 8))
+
+/*
+** st1_vnum_za16_1_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** st1h { za1v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_1_w0_13,
+ svst1_ver_vnum_za16 (1, w0, p0, x1, 13),
+ svst1_ver_vnum_za16 (1, w0, p0, x1, 13))
+
+/*
+** st1_vnum_za16_0_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** st1h { za0v\.h\[\3, 0\] }, p0, \[\2\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_0_w0_x2,
+ svst1_ver_vnum_za16 (0, w0, p0, x1, x2),
+ svst1_ver_vnum_za16 (0, w0, p0, x1, x2))
+
+/*
+** st1_vnum_za16_1_w0p1_0:
+** mov (w1[2-5]), w0
+** st1h { za1v\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za16_1_w0p1_0,
+ svst1_ver_vnum_za16 (1, w0 + 1, p0, x1, 0),
+ svst1_ver_vnum_za16 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za32.c
new file mode 100644
index 0000000..45db67a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za32.c
@@ -0,0 +1,123 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_vnum_za32_3_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1w { za3v\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_3_0_1,
+ svst1_ver_vnum_za32 (3, 0, p0, x1, 1),
+ svst1_ver_vnum_za32 (3, 0, p0, x1, 1))
+
+/*
+** st1_vnum_za32_2_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** st1w { za2v\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_2_1_1,
+ svst1_ver_vnum_za32 (2, 1, p0, x1, 1),
+ svst1_ver_vnum_za32 (2, 1, p0, x1, 1))
+
+/*
+** st1_vnum_za32_0_0_4:
+** incb x1, all, mul #4
+** mov (w1[2-5]), #?4
+** st1w { za0v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_0_0_4,
+ svst1_ver_vnum_za32 (0, 0, p0, x1, 4),
+ svst1_ver_vnum_za32 (0, 0, p0, x1, 4))
+
+/*
+** st1_vnum_za32_2_1_4:
+** incb x1, all, mul #4
+** mov (w1[2-5]), #?5
+** st1w { za2v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_2_1_4,
+ svst1_ver_vnum_za32 (2, 1, p0, x1, 4),
+ svst1_ver_vnum_za32 (2, 1, p0, x1, 4))
+
+/*
+** st1_vnum_za32_0_w0_0:
+** mov (w1[2-5]), w0
+** st1w { za0v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_0_w0_0,
+ svst1_ver_vnum_za32 (0, w0, p0, x1, 0),
+ svst1_ver_vnum_za32 (0, w0, p0, x1, 0))
+
+/*
+** st1_vnum_za32_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** st1w { za0v\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_0_w0_1,
+ svst1_ver_vnum_za32 (0, w0, p0, x1, 1),
+ svst1_ver_vnum_za32 (0, w0, p0, x1, 1))
+
+/*
+** st1_vnum_za32_0_w0_3:
+** incb x1, all, mul #3
+** mov (w1[2-5]), w0
+** st1w { za0v\.s\[\1, 3\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_0_w0_3,
+ svst1_ver_vnum_za32 (0, w0, p0, x1, 3),
+ svst1_ver_vnum_za32 (0, w0, p0, x1, 3))
+
+/*
+** st1_vnum_za32_1_w0_4:
+** incb x1, all, mul #4
+** add (w1[2-5]), w0, #?4
+** st1w { za1v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_1_w0_4,
+ svst1_ver_vnum_za32 (1, w0, p0, x1, 4),
+ svst1_ver_vnum_za32 (1, w0, p0, x1, 4))
+
+/*
+** st1_vnum_za32_3_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** st1w { za3v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_3_w0_13,
+ svst1_ver_vnum_za32 (3, w0, p0, x1, 13),
+ svst1_ver_vnum_za32 (3, w0, p0, x1, 13))
+
+/*
+** st1_vnum_za32_0_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** st1w { za0v\.s\[\3, 0\] }, p0, \[\2\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_0_w0_x2,
+ svst1_ver_vnum_za32 (0, w0, p0, x1, x2),
+ svst1_ver_vnum_za32 (0, w0, p0, x1, x2))
+
+/*
+** st1_vnum_za32_1_w0p1_0:
+** mov (w1[2-5]), w0
+** st1w { za1v\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za32_1_w0p1_0,
+ svst1_ver_vnum_za32 (1, w0 + 1, p0, x1, 0),
+ svst1_ver_vnum_za32 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za64.c
new file mode 100644
index 0000000..bd061fc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za64.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_vnum_za64_3_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1d { za3v\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_3_0_1,
+ svst1_ver_vnum_za64 (3, 0, p0, x1, 1),
+ svst1_ver_vnum_za64 (3, 0, p0, x1, 1))
+
+/*
+** st1_vnum_za64_7_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** st1d { za7v\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_7_1_1,
+ svst1_ver_vnum_za64 (7, 1, p0, x1, 1),
+ svst1_ver_vnum_za64 (7, 1, p0, x1, 1))
+
+/*
+** st1_vnum_za64_0_0_2:
+** incb x1, all, mul #2
+** mov (w1[2-5]), #?2
+** st1d { za0v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_0_0_2,
+ svst1_ver_vnum_za64 (0, 0, p0, x1, 2),
+ svst1_ver_vnum_za64 (0, 0, p0, x1, 2))
+
+/*
+** st1_vnum_za64_5_1_2:
+** incb x1, all, mul #2
+** mov (w1[2-5]), #?3
+** st1d { za5v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_5_1_2,
+ svst1_ver_vnum_za64 (5, 1, p0, x1, 2),
+ svst1_ver_vnum_za64 (5, 1, p0, x1, 2))
+
+/*
+** st1_vnum_za64_0_w0_0:
+** mov (w1[2-5]), w0
+** st1d { za0v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_0_w0_0,
+ svst1_ver_vnum_za64 (0, w0, p0, x1, 0),
+ svst1_ver_vnum_za64 (0, w0, p0, x1, 0))
+
+/*
+** st1_vnum_za64_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** st1d { za0v\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_0_w0_1,
+ svst1_ver_vnum_za64 (0, w0, p0, x1, 1),
+ svst1_ver_vnum_za64 (0, w0, p0, x1, 1))
+
+/*
+** st1_vnum_za64_6_w0_2:
+** incb x1, all, mul #2
+** add (w1[2-5]), w0, #?2
+** st1d { za6v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_6_w0_2,
+ svst1_ver_vnum_za64 (6, w0, p0, x1, 2),
+ svst1_ver_vnum_za64 (6, w0, p0, x1, 2))
+
+/*
+** st1_vnum_za64_2_w0_13:
+** incb x1, all, mul #13
+** add (w1[2-5]), w0, #?13
+** st1d { za2v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_2_w0_13,
+ svst1_ver_vnum_za64 (2, w0, p0, x1, 13),
+ svst1_ver_vnum_za64 (2, w0, p0, x1, 13))
+
+/*
+** st1_vnum_za64_4_w0_x2:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:\1, x2|x2, \1), x1
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** st1d { za4v\.d\[\3, 0\] }, p0, \[\2\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_4_w0_x2,
+ svst1_ver_vnum_za64 (4, w0, p0, x1, x2),
+ svst1_ver_vnum_za64 (4, w0, p0, x1, x2))
+
+/*
+** st1_vnum_za64_1_w0p1_0:
+** mov (w1[2-5]), w0
+** st1d { za1v\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za64_1_w0p1_0,
+ svst1_ver_vnum_za64 (1, w0 + 1, p0, x1, 0),
+ svst1_ver_vnum_za64 (1, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za8.c
new file mode 100644
index 0000000..b15a7eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_vnum_za8.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_vnum_za8_0_0_1:
+** incb x1
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1b { za0v\.b\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_0_1,
+ svst1_ver_vnum_za8 (0, 0, p0, x1, 1),
+ svst1_ver_vnum_za8 (0, 0, p0, x1, 1))
+
+/*
+** st1_vnum_za8_0_1_1:
+** incb x1
+** mov (w1[2-5]), #?1
+** st1b { za0v\.b\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_1_1,
+ svst1_ver_vnum_za8 (0, 1, p0, x1, 1),
+ svst1_ver_vnum_za8 (0, 1, p0, x1, 1))
+
+/*
+** st1_vnum_za8_0_0_16:
+** incb x1, all, mul #16
+** mov (w1[2-5]), #?16
+** st1b { za0v\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_0_16,
+ svst1_ver_vnum_za8 (0, 0, p0, x1, 16),
+ svst1_ver_vnum_za8 (0, 0, p0, x1, 16))
+
+/*
+** st1_vnum_za8_0_1_16:
+** incb x1, all, mul #16
+** mov (w1[2-5]), #?17
+** st1b { za0v\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_1_16,
+ svst1_ver_vnum_za8 (0, 1, p0, x1, 16),
+ svst1_ver_vnum_za8 (0, 1, p0, x1, 16))
+
+/*
+** st1_vnum_za8_0_w0_0:
+** mov (w1[2-5]), w0
+** st1b { za0v\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0_0,
+ svst1_ver_vnum_za8 (0, w0, p0, x1, 0),
+ svst1_ver_vnum_za8 (0, w0, p0, x1, 0))
+
+/*
+** st1_vnum_za8_0_w0_1:
+** incb x1
+** mov (w1[2-5]), w0
+** st1b { za0v\.b\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0_1,
+ svst1_ver_vnum_za8 (0, w0, p0, x1, 1),
+ svst1_ver_vnum_za8 (0, w0, p0, x1, 1))
+
+/*
+** st1_vnum_za8_0_w0_15:
+** incb x1, all, mul #15
+** mov (w1[2-5]), w0
+** st1b { za0v\.b\[\1, 15\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0_15,
+ svst1_ver_vnum_za8 (0, w0, p0, x1, 15),
+ svst1_ver_vnum_za8 (0, w0, p0, x1, 15))
+
+/*
+** st1_vnum_za8_0_w0_16:
+** incb x1, all, mul #16
+** add (w1[2-5]), w0, #?16
+** st1b { za0v\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0_16,
+ svst1_ver_vnum_za8 (0, w0, p0, x1, 16),
+ svst1_ver_vnum_za8 (0, w0, p0, x1, 16))
+
+/*
+** st1_vnum_za8_0_w0_x2:
+** cntb (x[0-9]+)
+** mul (x[0-9]+), (?:\1, x2|x2, \1)
+** add (w1[2-5]), (?:w0, w2|w2, w0)
+** st1b { za0v\.b\[\3, 0\] }, p0, \[x1, \2\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0_x2,
+ svst1_ver_vnum_za8 (0, w0, p0, x1, x2),
+ svst1_ver_vnum_za8 (0, w0, p0, x1, x2))
+
+/*
+** st1_vnum_za8_0_w0p1_0:
+** mov (w1[2-5]), w0
+** st1b { za0v\.b\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_vnum_za8_0_w0p1_0,
+ svst1_ver_vnum_za8 (0, w0 + 1, p0, x1, 0),
+ svst1_ver_vnum_za8 (0, w0 + 1, p0, x1, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za128.c
new file mode 100644
index 0000000..7be6d5a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za128.c
@@ -0,0 +1,83 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_za128_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1q { za0v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_0_0,
+ svst1_ver_za128 (0, 0, p0, x1),
+ svst1_ver_za128 (0, 0, p0, x1))
+
+/*
+** st1_za128_0_1:
+** mov (w1[2-5]), #?1
+** st1q { za0v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_0_1,
+ svst1_ver_za128 (0, 1, p0, x1),
+ svst1_ver_za128 (0, 1, p0, x1))
+
+/*
+** st1_za128_0_w0:
+** mov (w1[2-5]), w0
+** st1q { za0v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_0_w0,
+ svst1_ver_za128 (0, w0, p0, x1),
+ svst1_ver_za128 (0, w0, p0, x1))
+
+/*
+** st1_za128_0_w0_p1:
+** add (w1[2-5]), w0, #?1
+** st1q { za0v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_0_w0_p1,
+ svst1_ver_za128 (0, w0 + 1, p0, x1),
+ svst1_ver_za128 (0, w0 + 1, p0, x1))
+
+/*
+** st1_za128_7_w0:
+** mov (w1[2-5]), w0
+** st1q { za7v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_7_w0,
+ svst1_ver_za128 (7, w0, p0, x1),
+ svst1_ver_za128 (7, w0, p0, x1))
+
+/*
+** st1_za128_13_w0:
+** mov (w1[2-5]), w0
+** st1q { za13v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_13_w0,
+ svst1_ver_za128 (13, w0, p0, x1),
+ svst1_ver_za128 (13, w0, p0, x1))
+
+/*
+** st1_za128_15_w0:
+** mov (w1[2-5]), w0
+** st1q { za15v\.q\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_15_w0,
+ svst1_ver_za128 (15, w0, p0, x1),
+ svst1_ver_za128 (15, w0, p0, x1))
+
+/*
+** st1_za128_9_w0_index:
+** mov (w1[2-5]), w0
+** st1q { za9v\.q\[\1, 0\] }, p0, \[x1, x2, lsl #?4\]
+** ret
+*/
+TEST_STORE_ZA (st1_za128_9_w0_index,
+ svst1_ver_za128 (9, w0, p0, x1 + x2 * 16),
+ svst1_ver_za128 (9, w0, p0, x1 + x2 * 16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za16.c
new file mode 100644
index 0000000..1bbf12a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za16.c
@@ -0,0 +1,126 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_za16_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1h { za0v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_0,
+ svst1_ver_za16 (0, 0, p0, x1),
+ svst1_ver_za16 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 7. */
+/*
+** st1_za16_0_7:
+** mov (w1[2-5]), #?7
+** st1h { za0v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_7,
+ svst1_ver_za16 (0, 7, p0, x1),
+ svst1_ver_za16 (0, 7, p0, x1))
+
+/*
+** st1_za16_0_8:
+** mov (w1[2-5]), #?8
+** st1h { za0v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_8,
+ svst1_ver_za16 (0, 8, p0, x1),
+ svst1_ver_za16 (0, 8, p0, x1))
+
+/*
+** st1_za16_0_w0:
+** mov (w1[2-5]), w0
+** st1h { za0v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_w0,
+ svst1_ver_za16 (0, w0, p0, x1),
+ svst1_ver_za16 (0, w0, p0, x1))
+
+/*
+** st1_za16_0_w0_p1:
+** mov (w1[2-5]), w0
+** st1h { za0v\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_w0_p1,
+ svst1_ver_za16 (0, w0 + 1, p0, x1),
+ svst1_ver_za16 (0, w0 + 1, p0, x1))
+
+/*
+** st1_za16_0_w0_p7:
+** mov (w1[2-5]), w0
+** st1h { za0v\.h\[\1, 7\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_w0_p7,
+ svst1_ver_za16 (0, w0 + 7, p0, x1),
+ svst1_ver_za16 (0, w0 + 7, p0, x1))
+
+/*
+** st1_za16_1_w0:
+** mov (w1[2-5]), w0
+** st1h { za1v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_1_w0,
+ svst1_ver_za16 (1, w0, p0, x1),
+ svst1_ver_za16 (1, w0, p0, x1))
+
+
+/*
+** st1_za16_1_w0_p1:
+** mov (w1[2-5]), w0
+** st1h { za1v\.h\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_1_w0_p1,
+ svst1_ver_za16 (1, w0 + 1, p0, x1),
+ svst1_ver_za16 (1, w0 + 1, p0, x1))
+
+/*
+** st1_za16_1_w0_p7:
+** mov (w1[2-5]), w0
+** st1h { za1v\.h\[\1, 7\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_1_w0_p7,
+ svst1_ver_za16 (1, w0 + 7, p0, x1),
+ svst1_ver_za16 (1, w0 + 7, p0, x1))
+
+/*
+** st1_za16_1_w0_p5_index:
+** mov (w1[2-5]), w0
+** st1h { za1v\.h\[\1, 5\] }, p0, \[x1, x2, lsl #?1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_1_w0_p5_index,
+ svst1_ver_za16 (1, w0 + 5, p0, x1 + x2 * 2),
+ svst1_ver_za16 (1, w0 + 5, p0, x1 + x2 * 2))
+
+/*
+** st1_za16_0_w0_p8:
+** add (w1[2-5]), w0, #?8
+** st1h { za0v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_w0_p8,
+ svst1_ver_za16 (0, w0 + 8, p0, x1),
+ svst1_ver_za16 (0, w0 + 8, p0, x1))
+
+/*
+** st1_za16_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** st1h { za0v\.h\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za16_0_w0_m1,
+ svst1_ver_za16 (0, w0 - 1, p0, x1),
+ svst1_ver_za16 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za32.c
new file mode 100644
index 0000000..9809e97
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za32.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_za32_0_0:
+** mov (w1[2-5]), (?:w0|#?0)
+** st1w { za0v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_0,
+ svst1_ver_za32 (0, 0, p0, x1),
+ svst1_ver_za32 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 3. */
+/*
+** st1_za32_0_3:
+** mov (w1[2-5]), #?3
+** st1w { za0v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_3,
+ svst1_ver_za32 (0, 3, p0, x1),
+ svst1_ver_za32 (0, 3, p0, x1))
+
+/*
+** st1_za32_0_4:
+** mov (w1[2-5]), #?4
+** st1w { za0v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_4,
+ svst1_ver_za32 (0, 4, p0, x1),
+ svst1_ver_za32 (0, 4, p0, x1))
+
+/*
+** st1_za32_0_w0:
+** mov (w1[2-5]), w0
+** st1w { za0v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_w0,
+ svst1_ver_za32 (0, w0, p0, x1),
+ svst1_ver_za32 (0, w0, p0, x1))
+
+/*
+** st1_za32_0_w0_p1:
+** mov (w1[2-5]), w0
+** st1w { za0v\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_w0_p1,
+ svst1_ver_za32 (0, w0 + 1, p0, x1),
+ svst1_ver_za32 (0, w0 + 1, p0, x1))
+
+/*
+** st1_za32_0_w0_p3:
+** mov (w1[2-5]), w0
+** st1w { za0v\.s\[\1, 3\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_w0_p3,
+ svst1_ver_za32 (0, w0 + 3, p0, x1),
+ svst1_ver_za32 (0, w0 + 3, p0, x1))
+
+/*
+** st1_za32_3_w0:
+** mov (w1[2-5]), w0
+** st1w { za3v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_3_w0,
+ svst1_ver_za32 (3, w0, p0, x1),
+ svst1_ver_za32 (3, w0, p0, x1))
+
+/*
+** st1_za32_3_w0_p1:
+** mov (w1[2-5]), w0
+** st1w { za3v\.s\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_3_w0_p1,
+ svst1_ver_za32 (3, w0 + 1, p0, x1),
+ svst1_ver_za32 (3, w0 + 1, p0, x1))
+
+/*
+** st1_za32_3_w0_p3:
+** mov (w1[2-5]), w0
+** st1w { za3v\.s\[\1, 3\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_3_w0_p3,
+ svst1_ver_za32 (3, w0 + 3, p0, x1),
+ svst1_ver_za32 (3, w0 + 3, p0, x1))
+
+/*
+** st1_za32_1_w0_p2_index:
+** mov (w1[2-5]), w0
+** st1w { za1v\.s\[\1, 2\] }, p0, \[x1, x2, lsl #?2\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_1_w0_p2_index,
+ svst1_ver_za32 (1, w0 + 2, p0, x1 + x2 * 4),
+ svst1_ver_za32 (1, w0 + 2, p0, x1 + x2 * 4))
+
+/*
+** st1_za32_0_w0_p4:
+** add (w1[2-5]), w0, #?4
+** st1w { za0v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_w0_p4,
+ svst1_ver_za32 (0, w0 + 4, p0, x1),
+ svst1_ver_za32 (0, w0 + 4, p0, x1))
+
+/*
+** st1_za32_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** st1w { za0v\.s\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za32_0_w0_m1,
+ svst1_ver_za32 (0, w0 - 1, p0, x1),
+ svst1_ver_za32 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za64.c
new file mode 100644
index 0000000..0e93f4d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za64.c
@@ -0,0 +1,105 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_za64_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1d { za0v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_0,
+ svst1_ver_za64 (0, 0, p0, x1),
+ svst1_ver_za64 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 1. */
+/*
+** st1_za64_0_1:
+** mov (w1[2-5]), #?1
+** st1d { za0v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_1,
+ svst1_ver_za64 (0, 1, p0, x1),
+ svst1_ver_za64 (0, 1, p0, x1))
+
+/*
+** st1_za64_0_2:
+** mov (w1[2-5]), #?2
+** st1d { za0v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_2,
+ svst1_ver_za64 (0, 2, p0, x1),
+ svst1_ver_za64 (0, 2, p0, x1))
+
+/*
+** st1_za64_0_w0:
+** mov (w1[2-5]), w0
+** st1d { za0v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_w0,
+ svst1_ver_za64 (0, w0, p0, x1),
+ svst1_ver_za64 (0, w0, p0, x1))
+
+/*
+** st1_za64_0_w0_p1:
+** mov (w1[2-5]), w0
+** st1d { za0v\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_w0_p1,
+ svst1_ver_za64 (0, w0 + 1, p0, x1),
+ svst1_ver_za64 (0, w0 + 1, p0, x1))
+
+/*
+** st1_za64_7_w0:
+** mov (w1[2-5]), w0
+** st1d { za7v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_7_w0,
+ svst1_ver_za64 (7, w0, p0, x1),
+ svst1_ver_za64 (7, w0, p0, x1))
+
+/*
+** st1_za64_7_w0_p1:
+** mov (w1[2-5]), w0
+** st1d { za7v\.d\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_7_w0_p1,
+ svst1_ver_za64 (7, w0 + 1, p0, x1),
+ svst1_ver_za64 (7, w0 + 1, p0, x1))
+
+/*
+** st1_za64_5_w0_p1_index:
+** mov (w1[2-5]), w0
+** st1d { za5v\.d\[\1, 1\] }, p0, \[x1, x2, lsl #?3\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_5_w0_p1_index,
+ svst1_ver_za64 (5, w0 + 1, p0, x1 + x2 * 8),
+ svst1_ver_za64 (5, w0 + 1, p0, x1 + x2 * 8))
+
+/*
+** st1_za64_0_w0_p2:
+** add (w1[2-5]), w0, #?2
+** st1d { za0v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_w0_p2,
+ svst1_ver_za64 (0, w0 + 2, p0, x1),
+ svst1_ver_za64 (0, w0 + 2, p0, x1))
+
+/*
+** st1_za64_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** st1d { za0v\.d\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za64_0_w0_m1,
+ svst1_ver_za64 (0, w0 - 1, p0, x1),
+ svst1_ver_za64 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za8.c
new file mode 100644
index 0000000..c76b5c28
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/st1_ver_za8.c
@@ -0,0 +1,95 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** st1_za8_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** st1b { za0v\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_0,
+ svst1_ver_za8 (0, 0, p0, x1),
+ svst1_ver_za8 (0, 0, p0, x1))
+
+/* It would also be OK (and perhaps better) to move 0 into a register
+ and use an offset of 15. */
+/*
+** st1_za8_0_15:
+** mov (w1[2-5]), #?15
+** st1b { za0v\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_15,
+ svst1_ver_za8 (0, 15, p0, x1),
+ svst1_ver_za8 (0, 15, p0, x1))
+
+/*
+** st1_za8_0_16:
+** mov (w1[2-5]), #?16
+** st1b { za0v\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_16,
+ svst1_ver_za8 (0, 16, p0, x1),
+ svst1_ver_za8 (0, 16, p0, x1))
+
+/*
+** st1_za8_0_w0:
+** mov (w1[2-5]), w0
+** st1b { za0v\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0,
+ svst1_ver_za8 (0, w0, p0, x1),
+ svst1_ver_za8 (0, w0, p0, x1))
+
+/*
+** st1_za8_0_w0_p1:
+** mov (w1[2-5]), w0
+** st1b { za0v\.b\[\1, 1\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0_p1,
+ svst1_ver_za8 (0, w0 + 1, p0, x1),
+ svst1_ver_za8 (0, w0 + 1, p0, x1))
+
+/*
+** st1_za8_0_w0_p15:
+** mov (w1[2-5]), w0
+** st1b { za0v\.b\[\1, 15\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0_p15,
+ svst1_ver_za8 (0, w0 + 15, p0, x1),
+ svst1_ver_za8 (0, w0 + 15, p0, x1))
+
+/*
+** st1_za8_0_w0_p13_index:
+** mov (w1[2-5]), w0
+** st1b { za0v\.b\[\1, 15\] }, p0, \[x1, x2\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0_p13_index,
+ svst1_ver_za8 (0, w0 + 15, p0, x1 + x2),
+ svst1_ver_za8 (0, w0 + 15, p0, x1 + x2))
+
+/*
+** st1_za8_0_w0_p16:
+** add (w1[2-5]), w0, #?16
+** st1b { za0v\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0_p16,
+ svst1_ver_za8 (0, w0 + 16, p0, x1),
+ svst1_ver_za8 (0, w0 + 16, p0, x1))
+
+/*
+** st1_za8_0_w0_m1:
+** sub (w1[2-5]), w0, #?1
+** st1b { za0v\.b\[\1, 0\] }, p0, \[x1\]
+** ret
+*/
+TEST_STORE_ZA (st1_za8_0_w0_m1,
+ svst1_ver_za8 (0, w0 - 1, p0, x1),
+ svst1_ver_za8 (0, w0 - 1, p0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_vnum_za_s.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_vnum_za_s.c
new file mode 100644
index 0000000..3ef7e0c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_vnum_za_s.c
@@ -0,0 +1,147 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** str_vnum_za_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_0_0,
+ svstr_vnum_za (0, x1, 0),
+ svstr_vnum_za (0, x1, 0))
+
+/*
+** str_vnum_za_0_1:
+** mov (w1[2-5]), (?:wzr|#?0)
+** str za\[\1, 1\], \[x1(?:, #1, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_0_1,
+ svstr_vnum_za (0, x1, 1),
+ svstr_vnum_za (0, x1, 1))
+
+/*
+** str_vnum_za_1_0:
+** mov (w1[2-5]), #?1
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_1_0,
+ svstr_vnum_za (1, x1, 0),
+ svstr_vnum_za (1, x1, 0))
+
+/*
+** str_vnum_za_1_2:
+** mov (w1[2-5]), #?1
+** str za\[\1, 2\], \[x1(?:, #2, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_1_2,
+ svstr_vnum_za (1, x1, 2),
+ svstr_vnum_za (1, x1, 2))
+
+/*
+** str_vnum_za_w0_0:
+** mov (w1[2-5]), w0
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_0,
+ svstr_vnum_za (w0, x1, 0),
+ svstr_vnum_za (w0, x1, 0))
+
+/*
+** str_vnum_za_w0_1:
+** mov (w1[2-5]), w0
+** str za\[\1, 1\], \[x1, #1, mul vl\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_1,
+ svstr_vnum_za (w0, x1, 1),
+ svstr_vnum_za (w0, x1, 1))
+
+/*
+** str_vnum_za_w0_13:
+** mov (w1[2-5]), w0
+** str za\[\1, 13\], \[x1, #13, mul vl\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_13,
+ svstr_vnum_za (w0, x1, 13),
+ svstr_vnum_za (w0, x1, 13))
+
+/*
+** str_vnum_za_w0_15:
+** mov (w1[2-5]), w0
+** str za\[\1, 15\], \[x1, #15, mul vl\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_15,
+ svstr_vnum_za (w0, x1, 15),
+ svstr_vnum_za (w0, x1, 15))
+
+/*
+** str_vnum_za_w0_16:
+** (
+** add (w1[2-5]), w0, #?16
+** incb x1, all, mul #16
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** |
+** incb x1, all, mul #16
+** add (w1[2-5]), w0, #?16
+** str za\[\2, 0\], \[x1(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_16,
+ svstr_vnum_za (w0, x1, 16),
+ svstr_vnum_za (w0, x1, 16))
+
+/*
+** str_vnum_za_w0_m1:
+** (
+** sub (w1[2-5]), w0, #?1
+** decb x1
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** |
+** decb x1
+** sub (w1[2-5]), w0, #?1
+** str za\[\2, 0\], \[x1(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_m1,
+ svstr_vnum_za (w0, x1, -1),
+ svstr_vnum_za (w0, x1, -1))
+
+/*
+** str_vnum_za_w0p1_0:
+** add (w1[2-5]), w0, #?1
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0p1_0,
+ svstr_vnum_za (w0 + 1, x1, 0),
+ svstr_vnum_za (w0 + 1, x1, 0))
+
+/*
+** str_vnum_za_w0m1_1:
+** sub (w1[2-5]), w0, #?1
+** str za\[\1, 1\], \[x1(?:, #1, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0m1_1,
+ svstr_vnum_za (w0 - 1, x1, 1),
+ svstr_vnum_za (w0 - 1, x1, 1))
+
+/*
+** str_vnum_za_w0p2_3:
+** add (w1[2-5]), w0, #?2
+** str za\[\1, 3\], \[x1(?:, #3, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0p2_3,
+ svstr_vnum_za (w0 + 2, x1, 3),
+ svstr_vnum_za (w0 + 2, x1, 3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_vnum_za_sc.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_vnum_za_sc.c
new file mode 100644
index 0000000..7cd09e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_vnum_za_sc.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#include "test_sme_acle.h"
+
+/*
+** str_vnum_za_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_0_0,
+ svstr_vnum_za (0, x1, 0),
+ svstr_vnum_za (0, x1, 0))
+
+/*
+** str_vnum_za_0_1:
+** mov (w1[2-5]), (?:wzr|#?0)
+** str za\[\1, 1\], \[x1(?:, #1, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_0_1,
+ svstr_vnum_za (0, x1, 1),
+ svstr_vnum_za (0, x1, 1))
+
+/*
+** str_vnum_za_1_0:
+** mov (w1[2-5]), #?1
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_1_0,
+ svstr_vnum_za (1, x1, 0),
+ svstr_vnum_za (1, x1, 0))
+
+/*
+** str_vnum_za_1_2:
+** mov (w1[2-5]), #?1
+** str za\[\1, 2\], \[x1(?:, #2, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_1_2,
+ svstr_vnum_za (1, x1, 2),
+ svstr_vnum_za (1, x1, 2))
+
+/*
+** str_vnum_za_w0_0:
+** mov (w1[2-5]), w0
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_0,
+ svstr_vnum_za (w0, x1, 0),
+ svstr_vnum_za (w0, x1, 0))
+
+/*
+** str_vnum_za_w0_1:
+** mov (w1[2-5]), w0
+** str za\[\1, 1\], \[x1, #1, mul vl\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_1,
+ svstr_vnum_za (w0, x1, 1),
+ svstr_vnum_za (w0, x1, 1))
+
+/*
+** str_vnum_za_w0_13:
+** mov (w1[2-5]), w0
+** str za\[\1, 13\], \[x1, #13, mul vl\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_13,
+ svstr_vnum_za (w0, x1, 13),
+ svstr_vnum_za (w0, x1, 13))
+
+/*
+** str_vnum_za_w0_15:
+** mov (w1[2-5]), w0
+** str za\[\1, 15\], \[x1, #15, mul vl\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_15,
+ svstr_vnum_za (w0, x1, 15),
+ svstr_vnum_za (w0, x1, 15))
+
+/*
+** str_vnum_za_w0_16:
+** (
+** add (w1[2-5]), w0, #?16
+** addsvl (x[0-9]+), x1, #16
+** str za\[\1, 0\], \[\2(?:, #0, mul vl)?\]
+** |
+** addsvl (x[0-9]+), x1, #16
+** add (w1[2-5]), w0, #?16
+** str za\[\4, 0\], \[\3(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_16,
+ svstr_vnum_za (w0, x1, 16),
+ svstr_vnum_za (w0, x1, 16))
+
+/*
+** str_vnum_za_w0_m1:
+** (
+** sub (w1[2-5]), w0, #?1
+** addsvl (x[0-9]+), x1, #-1
+** str za\[\1, 0\], \[\2(?:, #0, mul vl)?\]
+** |
+** addsvl (x[0-9]+), x1, #-1
+** sub (w1[2-5]), w0, #?1
+** str za\[\4, 0\], \[\3(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0_m1,
+ svstr_vnum_za (w0, x1, -1),
+ svstr_vnum_za (w0, x1, -1))
+
+/*
+** str_vnum_za_w0p1_0:
+** add (w1[2-5]), w0, #?1
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0p1_0,
+ svstr_vnum_za (w0 + 1, x1, 0),
+ svstr_vnum_za (w0 + 1, x1, 0))
+
+/*
+** str_vnum_za_w0m1_1:
+** sub (w1[2-5]), w0, #?1
+** str za\[\1, 1\], \[x1(?:, #1, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0m1_1,
+ svstr_vnum_za (w0 - 1, x1, 1),
+ svstr_vnum_za (w0 - 1, x1, 1))
+
+/*
+** str_vnum_za_w0p2_3:
+** add (w1[2-5]), w0, #?2
+** str za\[\1, 3\], \[x1(?:, #3, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_vnum_za_w0p2_3,
+ svstr_vnum_za (w0 + 2, x1, 3),
+ svstr_vnum_za (w0 + 2, x1, 3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_za_s.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_za_s.c
new file mode 100644
index 0000000..4d953c5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_za_s.c
@@ -0,0 +1,124 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** str_za_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_za_0,
+ svstr_za (0, x1),
+ svstr_za (0, x1))
+
+/*
+** str_za_1:
+** mov (w1[2-5]), #?1
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_za_1,
+ svstr_za (1, x1),
+ svstr_za (1, x1))
+
+/*
+** str_za_w0:
+** mov (w1[2-5]), w0
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_za_w0,
+ svstr_za (w0, x1),
+ svstr_za (w0, x1))
+
+/*
+** str_za_w0_1_vnum:
+** mov (w1[2-5]), w0
+** str za\[\1, 1\], \[x1, #1, mul vl\]
+** ret
+*/
+TEST_STORE_ZA (str_za_w0_1_vnum,
+ svstr_za (w0 + 1, x1 + svcntsb ()),
+ svstr_za (w0 + 1, x1 + svcntsb ()))
+
+/*
+** str_za_w0_13_vnum:
+** mov (w1[2-5]), w0
+** str za\[\1, 13\], \[x1, #13, mul vl\]
+** ret
+*/
+TEST_STORE_ZA (str_za_w0_13_vnum,
+ svstr_za (w0 + 13, x1 + svcntsb () * 13),
+ svstr_za (w0 + 13, x1 + svcntsb () * 13))
+
+/*
+** str_za_w0_15_vnum:
+** mov (w1[2-5]), w0
+** str za\[\1, 15\], \[x1, #15, mul vl\]
+** ret
+*/
+TEST_STORE_ZA (str_za_w0_15_vnum,
+ svstr_za (w0 + 15, x1 + svcntsb () * 15),
+ svstr_za (w0 + 15, x1 + svcntsb () * 15))
+
+/*
+** str_za_w0_16_vnum:
+** (
+** add (w1[2-5]), w0, #?16
+** incb x1, all, mul #16
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** |
+** incb x1, all, mul #16
+** add (w1[2-5]), w0, #?16
+** str za\[\2, 0\], \[x1(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_STORE_ZA (str_za_w0_16_vnum,
+ svstr_za (w0 + 16, x1 + svcntsb () * 16),
+ svstr_za (w0 + 16, x1 + svcntsb () * 16))
+
+/*
+** str_za_w0_m1_vnum:
+** (
+** sub (w1[2-5]), w0, #?1
+** decb x1
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** |
+** decb x1
+** sub (w1[2-5]), w0, #?1
+** str za\[\2, 0\], \[x1(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_STORE_ZA (str_za_w0_m1_vnum,
+ svstr_za (w0 - 1, x1 - svcntsb ()),
+ svstr_za (w0 - 1, x1 - svcntsb ()))
+
+/*
+** str_za_w0p2:
+** add (w1[2-5]), w0, #?2
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_za_w0p2,
+ svstr_za (w0 + 2, x1),
+ svstr_za (w0 + 2, x1))
+
+/*
+** str_za_offset:
+** (
+** mov (w1[2-5]), w0
+** add (x[0-9]+), x1, #?1
+** str za\[\1, 0\], \[\2(?:, #0, mul vl)?\]
+** |
+** add (x[0-9]+), x1, #?1
+** mov (w1[2-5]), w0
+** str za\[\4, 0\], \[\3(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_STORE_ZA (str_za_offset,
+ svstr_za (w0, x1 + 1),
+ svstr_za (w0, x1 + 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_za_sc.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_za_sc.c
new file mode 100644
index 0000000..3406055
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/str_za_sc.c
@@ -0,0 +1,71 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#include "test_sme_acle.h"
+
+/*
+** str_za_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_za_0,
+ svstr_za (0, x1),
+ svstr_za (0, x1))
+
+/*
+** str_za_1:
+** mov (w1[2-5]), #?1
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_za_1,
+ svstr_za (1, x1),
+ svstr_za (1, x1))
+
+/*
+** str_za_w0:
+** mov (w1[2-5]), w0
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_za_w0,
+ svstr_za (w0, x1),
+ svstr_za (w0, x1))
+
+/*
+** str_za_w0_1_vnum:
+** mov (w1[2-5]), w0
+** str za\[\1, 1\], \[x1, #1, mul vl\]
+** ret
+*/
+TEST_STORE_ZA (str_za_w0_1_vnum,
+ svstr_za (w0 + 1, x1 + svcntsb ()),
+ svstr_za (w0 + 1, x1 + svcntsb ()))
+
+/*
+** str_za_w0p2:
+** add (w1[2-5]), w0, #?2
+** str za\[\1, 0\], \[x1(?:, #0, mul vl)?\]
+** ret
+*/
+TEST_STORE_ZA (str_za_w0p2,
+ svstr_za (w0 + 2, x1),
+ svstr_za (w0 + 2, x1))
+
+/*
+** str_za_offset:
+** (
+** mov (w1[2-5]), w0
+** add (x[0-9]+), x1, #?1
+** str za\[\1, 0\], \[\2(?:, #0, mul vl)?\]
+** |
+** add (x[0-9]+), x1, #?1
+** mov (w1[2-5]), w0
+** str za\[\4, 0\], \[\3(?:, #0, mul vl)?\]
+** )
+** ret
+*/
+TEST_STORE_ZA (str_za_offset,
+ svstr_za (w0, x1 + 1),
+ svstr_za (w0, x1 + 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumopa_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumopa_za32.c
new file mode 100644
index 0000000..9dd66f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumopa_za32.c
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** sumopa_za32_s8_0_p0_p1_z0_z4:
+** sumopa za0\.s, p0/m, p1/m, z0\.b, z4\.b
+** ret
+*/
+TEST_DUAL_ZA (sumopa_za32_s8_0_p0_p1_z0_z4, svint8_t, svuint8_t,
+ svsumopa_za32_s8_m (0, p0, p1, z0, z4),
+ svsumopa_za32_m (0, p0, p1, z0, z4))
+
+/*
+** sumopa_za32_s8_0_p1_p0_z4_z0:
+** sumopa za0\.s, p1/m, p0/m, z4\.b, z0\.b
+** ret
+*/
+TEST_DUAL_ZA (sumopa_za32_s8_0_p1_p0_z4_z0, svuint8_t, svint8_t,
+ svsumopa_za32_s8_m (0, p1, p0, z4, z0),
+ svsumopa_za32_m (0, p1, p0, z4, z0))
+
+/*
+** sumopa_za32_s8_3_p0_p1_z0_z4:
+** sumopa za3\.s, p0/m, p1/m, z0\.b, z4\.b
+** ret
+*/
+TEST_DUAL_ZA (sumopa_za32_s8_3_p0_p1_z0_z4, svint8_t, svuint8_t,
+ svsumopa_za32_s8_m (3, p0, p1, z0, z4),
+ svsumopa_za32_m (3, p0, p1, z0, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumopa_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumopa_za64.c
new file mode 100644
index 0000000..2a78ab8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumopa_za64.c
@@ -0,0 +1,32 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+#pragma GCC target "+sme-i16i64"
+
+/*
+** sumopa_za64_s16_0_p0_p1_z0_z4:
+** sumopa za0\.d, p0/m, p1/m, z0\.h, z4\.h
+** ret
+*/
+TEST_DUAL_ZA (sumopa_za64_s16_0_p0_p1_z0_z4, svint16_t, svuint16_t,
+ svsumopa_za64_s16_m (0, p0, p1, z0, z4),
+ svsumopa_za64_m (0, p0, p1, z0, z4))
+
+/*
+** sumopa_za64_s16_0_p1_p0_z4_z0:
+** sumopa za0\.d, p1/m, p0/m, z4\.h, z0\.h
+** ret
+*/
+TEST_DUAL_ZA (sumopa_za64_s16_0_p1_p0_z4_z0, svuint16_t, svint16_t,
+ svsumopa_za64_s16_m (0, p1, p0, z4, z0),
+ svsumopa_za64_m (0, p1, p0, z4, z0))
+
+/*
+** sumopa_za64_s16_7_p0_p1_z0_z4:
+** sumopa za7\.d, p0/m, p1/m, z0\.h, z4\.h
+** ret
+*/
+TEST_DUAL_ZA (sumopa_za64_s16_7_p0_p1_z0_z4, svint16_t, svuint16_t,
+ svsumopa_za64_s16_m (7, p0, p1, z0, z4),
+ svsumopa_za64_m (7, p0, p1, z0, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumops_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumops_za32.c
new file mode 100644
index 0000000..55cb92d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumops_za32.c
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** sumops_za32_s8_0_p0_p1_z0_z4:
+** sumops za0\.s, p0/m, p1/m, z0\.b, z4\.b
+** ret
+*/
+TEST_DUAL_ZA (sumops_za32_s8_0_p0_p1_z0_z4, svint8_t, svuint8_t,
+ svsumops_za32_s8_m (0, p0, p1, z0, z4),
+ svsumops_za32_m (0, p0, p1, z0, z4))
+
+/*
+** sumops_za32_s8_0_p1_p0_z4_z0:
+** sumops za0\.s, p1/m, p0/m, z4\.b, z0\.b
+** ret
+*/
+TEST_DUAL_ZA (sumops_za32_s8_0_p1_p0_z4_z0, svuint8_t, svint8_t,
+ svsumops_za32_s8_m (0, p1, p0, z4, z0),
+ svsumops_za32_m (0, p1, p0, z4, z0))
+
+/*
+** sumops_za32_s8_3_p0_p1_z0_z4:
+** sumops za3\.s, p0/m, p1/m, z0\.b, z4\.b
+** ret
+*/
+TEST_DUAL_ZA (sumops_za32_s8_3_p0_p1_z0_z4, svint8_t, svuint8_t,
+ svsumops_za32_s8_m (3, p0, p1, z0, z4),
+ svsumops_za32_m (3, p0, p1, z0, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumops_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumops_za64.c
new file mode 100644
index 0000000..910a45b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/sumops_za64.c
@@ -0,0 +1,32 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+#pragma GCC target "+sme-i16i64"
+
+/*
+** sumops_za64_s16_0_p0_p1_z0_z4:
+** sumops za0\.d, p0/m, p1/m, z0\.h, z4\.h
+** ret
+*/
+TEST_DUAL_ZA (sumops_za64_s16_0_p0_p1_z0_z4, svint16_t, svuint16_t,
+ svsumops_za64_s16_m (0, p0, p1, z0, z4),
+ svsumops_za64_m (0, p0, p1, z0, z4))
+
+/*
+** sumops_za64_s16_0_p1_p0_z4_z0:
+** sumops za0\.d, p1/m, p0/m, z4\.h, z0\.h
+** ret
+*/
+TEST_DUAL_ZA (sumops_za64_s16_0_p1_p0_z4_z0, svuint16_t, svint16_t,
+ svsumops_za64_s16_m (0, p1, p0, z4, z0),
+ svsumops_za64_m (0, p1, p0, z4, z0))
+
+/*
+** sumops_za64_s16_7_p0_p1_z0_z4:
+** sumops za7\.d, p0/m, p1/m, z0\.h, z4\.h
+** ret
+*/
+TEST_DUAL_ZA (sumops_za64_s16_7_p0_p1_z0_z4, svint16_t, svuint16_t,
+ svsumops_za64_s16_m (7, p0, p1, z0, z4),
+ svsumops_za64_m (7, p0, p1, z0, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/test_sme_acle.h b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/test_sme_acle.h
new file mode 100644
index 0000000..aaadab2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/test_sme_acle.h
@@ -0,0 +1,62 @@
+#ifndef TEST_SME_ACLE_H
+#define TEST_SME_ACLE_H 1
+
+#if (!defined(STREAMING_COMPATIBLE) \
+ && !defined(NON_STREAMING) \
+ && !defined(STREAMING))
+#define STREAMING
+#endif
+
+#if !defined(NO_SHARED_ZA)
+#define SHARED_ZA
+#endif
+
+#include "../../sve/acle/asm/test_sve_acle.h"
+
+#include <arm_sme.h>
+
+#define TEST_LOAD_ZA(NAME, CODE1, CODE2) \
+ PROTO (NAME, void, (svbool_t p0, int32_t w0, const char *x1, \
+ uint64_t x2)) \
+ { \
+ INVOKE (CODE1, CODE2); \
+ }
+
+#define TEST_STORE_ZA(NAME, CODE1, CODE2) \
+ PROTO (NAME, void, (svbool_t p0, int32_t w0, char *x1, \
+ uint64_t x2)) \
+ { \
+ INVOKE (CODE1, CODE2); \
+ }
+
+#define TEST_READ_ZA(NAME, TYPE, CODE1, CODE2) \
+ PROTO (NAME, TYPE, (TYPE z0, TYPE z1, svbool_t p0, \
+ int32_t w0)) \
+ { \
+ INVOKE (CODE1, CODE2); \
+ return z0; \
+ }
+
+#define TEST_WRITE_ZA(NAME, TYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (TYPE z0, TYPE z1, svbool_t p0, \
+ int32_t w0)) \
+ { \
+ INVOKE (CODE1, CODE2); \
+ }
+
+#define TEST_UNIFORM_ZA(NAME, TYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (TYPE z0, TYPE z1, svbool_t p0, \
+ svbool_t p1)) \
+ { \
+ INVOKE (CODE1, CODE2); \
+ }
+
+#define TEST_DUAL_ZA(NAME, TYPE1, TYPE2, CODE1, CODE2) \
+ PROTO (NAME, void, (TYPE1 z0, TYPE1 z1, TYPE1 z2, TYPE1 z3, \
+ TYPE2 z4, TYPE2 z5, TYPE2 z6, TYPE2 z7, \
+ svbool_t p0, svbool_t p1)) \
+ { \
+ INVOKE (CODE1, CODE2); \
+ }
+
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/undef_za.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/undef_za.c
new file mode 100644
index 0000000..5474328
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/undef_za.c
@@ -0,0 +1,33 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#include "test_sme_acle.h"
+
+/*
+** undef_za_1:
+** ret
+*/
+PROTO (undef_za_1, void, ()) { svundef_za (); }
+
+/*
+** undef_za_2:
+** ret
+*/
+PROTO (undef_za_2, void, ())
+{
+ svzero_za ();
+ svundef_za ();
+}
+
+/*
+** undef_za_3:
+** mov (w1[2-5]), (?:wzr|#?0)
+** str za\[\1, 0\], \[x0(?:, #0, mul vl)\]
+** ret
+*/
+PROTO (undef_za_3, void, (void *ptr))
+{
+ svzero_za ();
+ svundef_za ();
+ svstr_za (0, ptr);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmopa_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmopa_za32.c
new file mode 100644
index 0000000..bbc0b6c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmopa_za32.c
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** usmopa_za32_u8_0_p0_p1_z0_z4:
+** usmopa za0\.s, p0/m, p1/m, z0\.b, z4\.b
+** ret
+*/
+TEST_DUAL_ZA (usmopa_za32_u8_0_p0_p1_z0_z4, svuint8_t, svint8_t,
+ svusmopa_za32_u8_m (0, p0, p1, z0, z4),
+ svusmopa_za32_m (0, p0, p1, z0, z4))
+
+/*
+** usmopa_za32_u8_0_p1_p0_z4_z0:
+** usmopa za0\.s, p1/m, p0/m, z4\.b, z0\.b
+** ret
+*/
+TEST_DUAL_ZA (usmopa_za32_u8_0_p1_p0_z4_z0, svint8_t, svuint8_t,
+ svusmopa_za32_u8_m (0, p1, p0, z4, z0),
+ svusmopa_za32_m (0, p1, p0, z4, z0))
+
+/*
+** usmopa_za32_u8_3_p0_p1_z0_z4:
+** usmopa za3\.s, p0/m, p1/m, z0\.b, z4\.b
+** ret
+*/
+TEST_DUAL_ZA (usmopa_za32_u8_3_p0_p1_z0_z4, svuint8_t, svint8_t,
+ svusmopa_za32_u8_m (3, p0, p1, z0, z4),
+ svusmopa_za32_m (3, p0, p1, z0, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmopa_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmopa_za64.c
new file mode 100644
index 0000000..64ee25b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmopa_za64.c
@@ -0,0 +1,32 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+#pragma GCC target "+sme-i16i64"
+
+/*
+** usmopa_za64_u16_0_p0_p1_z0_z4:
+** usmopa za0\.d, p0/m, p1/m, z0\.h, z4\.h
+** ret
+*/
+TEST_DUAL_ZA (usmopa_za64_u16_0_p0_p1_z0_z4, svuint16_t, svint16_t,
+ svusmopa_za64_u16_m (0, p0, p1, z0, z4),
+ svusmopa_za64_m (0, p0, p1, z0, z4))
+
+/*
+** usmopa_za64_u16_0_p1_p0_z4_z0:
+** usmopa za0\.d, p1/m, p0/m, z4\.h, z0\.h
+** ret
+*/
+TEST_DUAL_ZA (usmopa_za64_u16_0_p1_p0_z4_z0, svint16_t, svuint16_t,
+ svusmopa_za64_u16_m (0, p1, p0, z4, z0),
+ svusmopa_za64_m (0, p1, p0, z4, z0))
+
+/*
+** usmopa_za64_u16_7_p0_p1_z0_z4:
+** usmopa za7\.d, p0/m, p1/m, z0\.h, z4\.h
+** ret
+*/
+TEST_DUAL_ZA (usmopa_za64_u16_7_p0_p1_z0_z4, svuint16_t, svint16_t,
+ svusmopa_za64_u16_m (7, p0, p1, z0, z4),
+ svusmopa_za64_m (7, p0, p1, z0, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmops_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmops_za32.c
new file mode 100644
index 0000000..98fd331
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmops_za32.c
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** usmops_za32_u8_0_p0_p1_z0_z4:
+** usmops za0\.s, p0/m, p1/m, z0\.b, z4\.b
+** ret
+*/
+TEST_DUAL_ZA (usmops_za32_u8_0_p0_p1_z0_z4, svuint8_t, svint8_t,
+ svusmops_za32_u8_m (0, p0, p1, z0, z4),
+ svusmops_za32_m (0, p0, p1, z0, z4))
+
+/*
+** usmops_za32_u8_0_p1_p0_z4_z0:
+** usmops za0\.s, p1/m, p0/m, z4\.b, z0\.b
+** ret
+*/
+TEST_DUAL_ZA (usmops_za32_u8_0_p1_p0_z4_z0, svint8_t, svuint8_t,
+ svusmops_za32_u8_m (0, p1, p0, z4, z0),
+ svusmops_za32_m (0, p1, p0, z4, z0))
+
+/*
+** usmops_za32_u8_3_p0_p1_z0_z4:
+** usmops za3\.s, p0/m, p1/m, z0\.b, z4\.b
+** ret
+*/
+TEST_DUAL_ZA (usmops_za32_u8_3_p0_p1_z0_z4, svuint8_t, svint8_t,
+ svusmops_za32_u8_m (3, p0, p1, z0, z4),
+ svusmops_za32_m (3, p0, p1, z0, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmops_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmops_za64.c
new file mode 100644
index 0000000..e20cdab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/usmops_za64.c
@@ -0,0 +1,32 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+#pragma GCC target "+sme-i16i64"
+
+/*
+** usmops_za64_u16_0_p0_p1_z0_z4:
+** usmops za0\.d, p0/m, p1/m, z0\.h, z4\.h
+** ret
+*/
+TEST_DUAL_ZA (usmops_za64_u16_0_p0_p1_z0_z4, svuint16_t, svint16_t,
+ svusmops_za64_u16_m (0, p0, p1, z0, z4),
+ svusmops_za64_m (0, p0, p1, z0, z4))
+
+/*
+** usmops_za64_u16_0_p1_p0_z4_z0:
+** usmops za0\.d, p1/m, p0/m, z4\.h, z0\.h
+** ret
+*/
+TEST_DUAL_ZA (usmops_za64_u16_0_p1_p0_z4_z0, svint16_t, svuint16_t,
+ svusmops_za64_u16_m (0, p1, p0, z4, z0),
+ svusmops_za64_m (0, p1, p0, z4, z0))
+
+/*
+** usmops_za64_u16_7_p0_p1_z0_z4:
+** usmops za7\.d, p0/m, p1/m, z0\.h, z4\.h
+** ret
+*/
+TEST_DUAL_ZA (usmops_za64_u16_7_p0_p1_z0_z4, svuint16_t, svint16_t,
+ svusmops_za64_u16_m (7, p0, p1, z0, z4),
+ svusmops_za64_m (7, p0, p1, z0, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za128.c
new file mode 100644
index 0000000..119a253
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za128.c
@@ -0,0 +1,193 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** write_za128_s8_0_0_z0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_0_z0, svint8_t,
+ svwrite_hor_za128_s8_m (0, 0, p0, z0),
+ svwrite_hor_za128_m (0, 0, p0, z0))
+
+/*
+** write_za128_s8_0_1_z0:
+** mov (w1[2-5]), #?1
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_1_z0, svint8_t,
+ svwrite_hor_za128_s8_m (0, 1, p0, z0),
+ svwrite_hor_za128_m (0, 1, p0, z0))
+
+/*
+** write_za128_s8_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_w0_z0, svint8_t,
+ svwrite_hor_za128_s8_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_s8_0_w0p1_z0:
+** add (w1[2-5]), w0, #?1
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_w0p1_z0, svint8_t,
+ svwrite_hor_za128_s8_m (0, w0 + 1, p0, z0),
+ svwrite_hor_za128_m (0, w0 + 1, p0, z0))
+
+/*
+** write_za128_s8_0_w0m1_z0:
+** sub (w1[2-5]), w0, #?1
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_w0m1_z0, svint8_t,
+ svwrite_hor_za128_s8_m (0, w0 - 1, p0, z0),
+ svwrite_hor_za128_m (0, w0 - 1, p0, z0))
+
+/*
+** write_za128_s8_1_w0_z0:
+** mov (w1[2-5]), w0
+** mova za1h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_1_w0_z0, svint8_t,
+ svwrite_hor_za128_s8_m (1, w0, p0, z0),
+ svwrite_hor_za128_m (1, w0, p0, z0))
+
+/*
+** write_za128_s8_15_w0_z0:
+** mov (w1[2-5]), w0
+** mova za15h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_15_w0_z0, svint8_t,
+ svwrite_hor_za128_s8_m (15, w0, p0, z0),
+ svwrite_hor_za128_m (15, w0, p0, z0))
+
+/*
+** write_za128_s8_0_w0_z1:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z1\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_w0_z1, svint8_t,
+ svwrite_hor_za128_s8_m (0, w0, p0, z1),
+ svwrite_hor_za128_m (0, w0, p0, z1))
+
+/*
+** write_za128_u8_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_u8_0_w0_z0, svuint8_t,
+ svwrite_hor_za128_u8_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_s16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s16_0_w0_z0, svint16_t,
+ svwrite_hor_za128_s16_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_u16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_u16_0_w0_z0, svuint16_t,
+ svwrite_hor_za128_u16_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_f16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_f16_0_w0_z0, svfloat16_t,
+ svwrite_hor_za128_f16_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_bf16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_bf16_0_w0_z0, svbfloat16_t,
+ svwrite_hor_za128_bf16_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_s32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s32_0_w0_z0, svint32_t,
+ svwrite_hor_za128_s32_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_u32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_u32_0_w0_z0, svuint32_t,
+ svwrite_hor_za128_u32_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_f32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_f32_0_w0_z0, svfloat32_t,
+ svwrite_hor_za128_f32_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_s64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s64_0_w0_z0, svint64_t,
+ svwrite_hor_za128_s64_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_u64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_u64_0_w0_z0, svuint64_t,
+ svwrite_hor_za128_u64_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_f64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_f64_0_w0_z0, svfloat64_t,
+ svwrite_hor_za128_f64_m (0, w0, p0, z0),
+ svwrite_hor_za128_m (0, w0, p0, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za16.c
new file mode 100644
index 0000000..c8f13f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za16.c
@@ -0,0 +1,133 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** write_za16_s16_0_0_z0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_0_z0, svint16_t,
+ svwrite_hor_za16_s16_m (0, 0, p0, z0),
+ svwrite_hor_za16_m (0, 0, p0, z0))
+
+/*
+** write_za16_s16_0_1_z0:
+** mov (w1[2-5]), #?1
+** mova za0h\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_1_z0, svint16_t,
+ svwrite_hor_za16_s16_m (0, 1, p0, z0),
+ svwrite_hor_za16_m (0, 1, p0, z0))
+
+/*
+** write_za16_s16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0_z0, svint16_t,
+ svwrite_hor_za16_s16_m (0, w0, p0, z0),
+ svwrite_hor_za16_m (0, w0, p0, z0))
+
+/*
+** write_za16_s16_0_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.h\[\1, 1\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0p1_z0, svint16_t,
+ svwrite_hor_za16_s16_m (0, w0 + 1, p0, z0),
+ svwrite_hor_za16_m (0, w0 + 1, p0, z0))
+
+/*
+** write_za16_s16_0_w0p7_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.h\[\1, 7\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0p7_z0, svint16_t,
+ svwrite_hor_za16_s16_m (0, w0 + 7, p0, z0),
+ svwrite_hor_za16_m (0, w0 + 7, p0, z0))
+
+/*
+** write_za16_s16_0_w0p8_z0:
+** add (w1[2-5]), w0, #?8
+** mova za0h\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0p8_z0, svint16_t,
+ svwrite_hor_za16_s16_m (0, w0 + 8, p0, z0),
+ svwrite_hor_za16_m (0, w0 + 8, p0, z0))
+
+/*
+** write_za16_s16_0_w0m1_z0:
+** sub (w1[2-5]), w0, #?1
+** mova za0h\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0m1_z0, svint16_t,
+ svwrite_hor_za16_s16_m (0, w0 - 1, p0, z0),
+ svwrite_hor_za16_m (0, w0 - 1, p0, z0))
+
+/*
+** write_za16_s16_1_w0_z0:
+** mov (w1[2-5]), w0
+** mova za1h\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_1_w0_z0, svint16_t,
+ svwrite_hor_za16_s16_m (1, w0, p0, z0),
+ svwrite_hor_za16_m (1, w0, p0, z0))
+
+/*
+** write_za16_s16_1_w0p7_z0:
+** mov (w1[2-5]), w0
+** mova za1h\.h\[\1, 7\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_1_w0p7_z0, svint16_t,
+ svwrite_hor_za16_s16_m (1, w0 + 7, p0, z0),
+ svwrite_hor_za16_m (1, w0 + 7, p0, z0))
+
+/*
+** write_za16_s16_0_w0_z1:
+** mov (w1[2-5]), w0
+** mova za0h\.h\[\1, 0\], p0/m, z1\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0_z1, svint16_t,
+ svwrite_hor_za16_s16_m (0, w0, p0, z1),
+ svwrite_hor_za16_m (0, w0, p0, z1))
+
+/*
+** write_za16_u16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_u16_0_w0_z0, svuint16_t,
+ svwrite_hor_za16_u16_m (0, w0, p0, z0),
+ svwrite_hor_za16_m (0, w0, p0, z0))
+
+/*
+** write_za16_f16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_f16_0_w0_z0, svfloat16_t,
+ svwrite_hor_za16_f16_m (0, w0, p0, z0),
+ svwrite_hor_za16_m (0, w0, p0, z0))
+
+/*
+** write_za16_bf16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_bf16_0_w0_z0, svbfloat16_t,
+ svwrite_hor_za16_bf16_m (0, w0, p0, z0),
+ svwrite_hor_za16_m (0, w0, p0, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za32.c
new file mode 100644
index 0000000..ea2f5ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za32.c
@@ -0,0 +1,143 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** write_za32_s32_0_0_z0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_0_z0, svint32_t,
+ svwrite_hor_za32_s32_m (0, 0, p0, z0),
+ svwrite_hor_za32_m (0, 0, p0, z0))
+
+/*
+** write_za32_s32_0_1_z0:
+** mov (w1[2-5]), #?1
+** mova za0h\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_1_z0, svint32_t,
+ svwrite_hor_za32_s32_m (0, 1, p0, z0),
+ svwrite_hor_za32_m (0, 1, p0, z0))
+
+/*
+** write_za32_s32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0_z0, svint32_t,
+ svwrite_hor_za32_s32_m (0, w0, p0, z0),
+ svwrite_hor_za32_m (0, w0, p0, z0))
+
+/*
+** write_za32_s32_0_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.s\[\1, 1\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0p1_z0, svint32_t,
+ svwrite_hor_za32_s32_m (0, w0 + 1, p0, z0),
+ svwrite_hor_za32_m (0, w0 + 1, p0, z0))
+
+/*
+** write_za32_s32_0_w0p3_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.s\[\1, 3\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0p3_z0, svint32_t,
+ svwrite_hor_za32_s32_m (0, w0 + 3, p0, z0),
+ svwrite_hor_za32_m (0, w0 + 3, p0, z0))
+
+/*
+** write_za32_s32_0_w0p4_z0:
+** add (w1[2-5]), w0, #?4
+** mova za0h\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0p4_z0, svint32_t,
+ svwrite_hor_za32_s32_m (0, w0 + 4, p0, z0),
+ svwrite_hor_za32_m (0, w0 + 4, p0, z0))
+
+/*
+** write_za32_s32_0_w0m1_z0:
+** sub (w1[2-5]), w0, #?1
+** mova za0h\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0m1_z0, svint32_t,
+ svwrite_hor_za32_s32_m (0, w0 - 1, p0, z0),
+ svwrite_hor_za32_m (0, w0 - 1, p0, z0))
+
+/*
+** write_za32_s32_1_w0_z0:
+** mov (w1[2-5]), w0
+** mova za1h\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_1_w0_z0, svint32_t,
+ svwrite_hor_za32_s32_m (1, w0, p0, z0),
+ svwrite_hor_za32_m (1, w0, p0, z0))
+
+/*
+** write_za32_s32_1_w0p3_z0:
+** mov (w1[2-5]), w0
+** mova za1h\.s\[\1, 3\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_1_w0p3_z0, svint32_t,
+ svwrite_hor_za32_s32_m (1, w0 + 3, p0, z0),
+ svwrite_hor_za32_m (1, w0 + 3, p0, z0))
+
+/*
+** write_za32_s32_3_w0_z0:
+** mov (w1[2-5]), w0
+** mova za3h\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_3_w0_z0, svint32_t,
+ svwrite_hor_za32_s32_m (3, w0, p0, z0),
+ svwrite_hor_za32_m (3, w0, p0, z0))
+
+/*
+** write_za32_s32_3_w0p3_z0:
+** mov (w1[2-5]), w0
+** mova za3h\.s\[\1, 3\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_3_w0p3_z0, svint32_t,
+ svwrite_hor_za32_s32_m (3, w0 + 3, p0, z0),
+ svwrite_hor_za32_m (3, w0 + 3, p0, z0))
+
+/*
+** write_za32_s32_0_w0_z1:
+** mov (w1[2-5]), w0
+** mova za0h\.s\[\1, 0\], p0/m, z1\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0_z1, svint32_t,
+ svwrite_hor_za32_s32_m (0, w0, p0, z1),
+ svwrite_hor_za32_m (0, w0, p0, z1))
+
+/*
+** write_za32_u32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_u32_0_w0_z0, svuint32_t,
+ svwrite_hor_za32_u32_m (0, w0, p0, z0),
+ svwrite_hor_za32_m (0, w0, p0, z0))
+
+/*
+** write_za32_f32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_f32_0_w0_z0, svfloat32_t,
+ svwrite_hor_za32_f32_m (0, w0, p0, z0),
+ svwrite_hor_za32_m (0, w0, p0, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za64.c
new file mode 100644
index 0000000..2b0a157
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za64.c
@@ -0,0 +1,133 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** write_za64_s64_0_0_z0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_0_z0, svint64_t,
+ svwrite_hor_za64_s64_m (0, 0, p0, z0),
+ svwrite_hor_za64_m (0, 0, p0, z0))
+
+/*
+** write_za64_s64_0_1_z0:
+** mov (w1[2-5]), #?1
+** mova za0h\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_1_z0, svint64_t,
+ svwrite_hor_za64_s64_m (0, 1, p0, z0),
+ svwrite_hor_za64_m (0, 1, p0, z0))
+
+/*
+** write_za64_s64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_w0_z0, svint64_t,
+ svwrite_hor_za64_s64_m (0, w0, p0, z0),
+ svwrite_hor_za64_m (0, w0, p0, z0))
+
+/*
+** write_za64_s64_0_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.d\[\1, 1\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_w0p1_z0, svint64_t,
+ svwrite_hor_za64_s64_m (0, w0 + 1, p0, z0),
+ svwrite_hor_za64_m (0, w0 + 1, p0, z0))
+
+/*
+** write_za64_s64_0_w0p2_z0:
+** add (w1[2-5]), w0, #?2
+** mova za0h\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_w0p2_z0, svint64_t,
+ svwrite_hor_za64_s64_m (0, w0 + 2, p0, z0),
+ svwrite_hor_za64_m (0, w0 + 2, p0, z0))
+
+/*
+** write_za64_s64_0_w0m1_z0:
+** sub (w1[2-5]), w0, #?1
+** mova za0h\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_w0m1_z0, svint64_t,
+ svwrite_hor_za64_s64_m (0, w0 - 1, p0, z0),
+ svwrite_hor_za64_m (0, w0 - 1, p0, z0))
+
+/*
+** write_za64_s64_1_w0_z0:
+** mov (w1[2-5]), w0
+** mova za1h\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_1_w0_z0, svint64_t,
+ svwrite_hor_za64_s64_m (1, w0, p0, z0),
+ svwrite_hor_za64_m (1, w0, p0, z0))
+
+/*
+** write_za64_s64_1_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za1h\.d\[\1, 1\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_1_w0p1_z0, svint64_t,
+ svwrite_hor_za64_s64_m (1, w0 + 1, p0, z0),
+ svwrite_hor_za64_m (1, w0 + 1, p0, z0))
+
+/*
+** write_za64_s64_7_w0_z0:
+** mov (w1[2-5]), w0
+** mova za7h\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_7_w0_z0, svint64_t,
+ svwrite_hor_za64_s64_m (7, w0, p0, z0),
+ svwrite_hor_za64_m (7, w0, p0, z0))
+
+/*
+** write_za64_s64_7_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za7h\.d\[\1, 1\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_7_w0p1_z0, svint64_t,
+ svwrite_hor_za64_s64_m (7, w0 + 1, p0, z0),
+ svwrite_hor_za64_m (7, w0 + 1, p0, z0))
+
+/*
+** write_za64_s64_0_w0_z1:
+** mov (w1[2-5]), w0
+** mova za0h\.d\[\1, 0\], p0/m, z1\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_w0_z1, svint64_t,
+ svwrite_hor_za64_s64_m (0, w0, p0, z1),
+ svwrite_hor_za64_m (0, w0, p0, z1))
+
+/*
+** write_za64_u64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_u64_0_w0_z0, svuint64_t,
+ svwrite_hor_za64_u64_m (0, w0, p0, z0),
+ svwrite_hor_za64_m (0, w0, p0, z0))
+
+/*
+** write_za64_f64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_f64_0_w0_z0, svfloat64_t,
+ svwrite_hor_za64_f64_m (0, w0, p0, z0),
+ svwrite_hor_za64_m (0, w0, p0, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za8.c
new file mode 100644
index 0000000..683e1a6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_hor_za8.c
@@ -0,0 +1,93 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** write_za8_s8_0_0_z0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_0_z0, svint8_t,
+ svwrite_hor_za8_s8_m (0, 0, p0, z0),
+ svwrite_hor_za8_m (0, 0, p0, z0))
+
+/*
+** write_za8_s8_0_1_z0:
+** mov (w1[2-5]), #?1
+** mova za0h\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_1_z0, svint8_t,
+ svwrite_hor_za8_s8_m (0, 1, p0, z0),
+ svwrite_hor_za8_m (0, 1, p0, z0))
+
+/*
+** write_za8_s8_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0_z0, svint8_t,
+ svwrite_hor_za8_s8_m (0, w0, p0, z0),
+ svwrite_hor_za8_m (0, w0, p0, z0))
+
+/*
+** write_za8_s8_0_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.b\[\1, 1\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0p1_z0, svint8_t,
+ svwrite_hor_za8_s8_m (0, w0 + 1, p0, z0),
+ svwrite_hor_za8_m (0, w0 + 1, p0, z0))
+
+/*
+** write_za8_s8_0_w0p15_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.b\[\1, 15\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0p15_z0, svint8_t,
+ svwrite_hor_za8_s8_m (0, w0 + 15, p0, z0),
+ svwrite_hor_za8_m (0, w0 + 15, p0, z0))
+
+/*
+** write_za8_s8_0_w0p16_z0:
+** add (w1[2-5]), w0, #?16
+** mova za0h\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0p16_z0, svint8_t,
+ svwrite_hor_za8_s8_m (0, w0 + 16, p0, z0),
+ svwrite_hor_za8_m (0, w0 + 16, p0, z0))
+
+/*
+** write_za8_s8_0_w0m1_z0:
+** sub (w1[2-5]), w0, #?1
+** mova za0h\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0m1_z0, svint8_t,
+ svwrite_hor_za8_s8_m (0, w0 - 1, p0, z0),
+ svwrite_hor_za8_m (0, w0 - 1, p0, z0))
+
+/*
+** write_za8_s8_0_w0_z1:
+** mov (w1[2-5]), w0
+** mova za0h\.b\[\1, 0\], p0/m, z1\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0_z1, svint8_t,
+ svwrite_hor_za8_s8_m (0, w0, p0, z1),
+ svwrite_hor_za8_m (0, w0, p0, z1))
+
+/*
+** write_za8_u8_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0h\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_u8_0_w0_z0, svuint8_t,
+ svwrite_hor_za8_u8_m (0, w0, p0, z0),
+ svwrite_hor_za8_m (0, w0, p0, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za128.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za128.c
new file mode 100644
index 0000000..9622e99
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za128.c
@@ -0,0 +1,193 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** write_za128_s8_0_0_z0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_0_z0, svint8_t,
+ svwrite_ver_za128_s8_m (0, 0, p0, z0),
+ svwrite_ver_za128_m (0, 0, p0, z0))
+
+/*
+** write_za128_s8_0_1_z0:
+** mov (w1[2-5]), #?1
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_1_z0, svint8_t,
+ svwrite_ver_za128_s8_m (0, 1, p0, z0),
+ svwrite_ver_za128_m (0, 1, p0, z0))
+
+/*
+** write_za128_s8_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_w0_z0, svint8_t,
+ svwrite_ver_za128_s8_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_s8_0_w0p1_z0:
+** add (w1[2-5]), w0, #?1
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_w0p1_z0, svint8_t,
+ svwrite_ver_za128_s8_m (0, w0 + 1, p0, z0),
+ svwrite_ver_za128_m (0, w0 + 1, p0, z0))
+
+/*
+** write_za128_s8_0_w0m1_z0:
+** sub (w1[2-5]), w0, #?1
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_w0m1_z0, svint8_t,
+ svwrite_ver_za128_s8_m (0, w0 - 1, p0, z0),
+ svwrite_ver_za128_m (0, w0 - 1, p0, z0))
+
+/*
+** write_za128_s8_1_w0_z0:
+** mov (w1[2-5]), w0
+** mova za1v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_1_w0_z0, svint8_t,
+ svwrite_ver_za128_s8_m (1, w0, p0, z0),
+ svwrite_ver_za128_m (1, w0, p0, z0))
+
+/*
+** write_za128_s8_15_w0_z0:
+** mov (w1[2-5]), w0
+** mova za15v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_15_w0_z0, svint8_t,
+ svwrite_ver_za128_s8_m (15, w0, p0, z0),
+ svwrite_ver_za128_m (15, w0, p0, z0))
+
+/*
+** write_za128_s8_0_w0_z1:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z1\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s8_0_w0_z1, svint8_t,
+ svwrite_ver_za128_s8_m (0, w0, p0, z1),
+ svwrite_ver_za128_m (0, w0, p0, z1))
+
+/*
+** write_za128_u8_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_u8_0_w0_z0, svuint8_t,
+ svwrite_ver_za128_u8_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_s16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s16_0_w0_z0, svint16_t,
+ svwrite_ver_za128_s16_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_u16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_u16_0_w0_z0, svuint16_t,
+ svwrite_ver_za128_u16_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_f16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_f16_0_w0_z0, svfloat16_t,
+ svwrite_ver_za128_f16_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_bf16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_bf16_0_w0_z0, svbfloat16_t,
+ svwrite_ver_za128_bf16_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_s32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s32_0_w0_z0, svint32_t,
+ svwrite_ver_za128_s32_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_u32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_u32_0_w0_z0, svuint32_t,
+ svwrite_ver_za128_u32_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_f32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_f32_0_w0_z0, svfloat32_t,
+ svwrite_ver_za128_f32_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_s64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_s64_0_w0_z0, svint64_t,
+ svwrite_ver_za128_s64_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_u64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_u64_0_w0_z0, svuint64_t,
+ svwrite_ver_za128_u64_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
+
+/*
+** write_za128_f64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.q\[\1, 0\], p0/m, z0\.q
+** ret
+*/
+TEST_WRITE_ZA (write_za128_f64_0_w0_z0, svfloat64_t,
+ svwrite_ver_za128_f64_m (0, w0, p0, z0),
+ svwrite_ver_za128_m (0, w0, p0, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za16.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za16.c
new file mode 100644
index 0000000..5430f23
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za16.c
@@ -0,0 +1,133 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** write_za16_s16_0_0_z0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_0_z0, svint16_t,
+ svwrite_ver_za16_s16_m (0, 0, p0, z0),
+ svwrite_ver_za16_m (0, 0, p0, z0))
+
+/*
+** write_za16_s16_0_1_z0:
+** mov (w1[2-5]), #?1
+** mova za0v\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_1_z0, svint16_t,
+ svwrite_ver_za16_s16_m (0, 1, p0, z0),
+ svwrite_ver_za16_m (0, 1, p0, z0))
+
+/*
+** write_za16_s16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0_z0, svint16_t,
+ svwrite_ver_za16_s16_m (0, w0, p0, z0),
+ svwrite_ver_za16_m (0, w0, p0, z0))
+
+/*
+** write_za16_s16_0_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.h\[\1, 1\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0p1_z0, svint16_t,
+ svwrite_ver_za16_s16_m (0, w0 + 1, p0, z0),
+ svwrite_ver_za16_m (0, w0 + 1, p0, z0))
+
+/*
+** write_za16_s16_0_w0p7_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.h\[\1, 7\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0p7_z0, svint16_t,
+ svwrite_ver_za16_s16_m (0, w0 + 7, p0, z0),
+ svwrite_ver_za16_m (0, w0 + 7, p0, z0))
+
+/*
+** write_za16_s16_0_w0p8_z0:
+** add (w1[2-5]), w0, #?8
+** mova za0v\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0p8_z0, svint16_t,
+ svwrite_ver_za16_s16_m (0, w0 + 8, p0, z0),
+ svwrite_ver_za16_m (0, w0 + 8, p0, z0))
+
+/*
+** write_za16_s16_0_w0m1_z0:
+** sub (w1[2-5]), w0, #?1
+** mova za0v\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0m1_z0, svint16_t,
+ svwrite_ver_za16_s16_m (0, w0 - 1, p0, z0),
+ svwrite_ver_za16_m (0, w0 - 1, p0, z0))
+
+/*
+** write_za16_s16_1_w0_z0:
+** mov (w1[2-5]), w0
+** mova za1v\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_1_w0_z0, svint16_t,
+ svwrite_ver_za16_s16_m (1, w0, p0, z0),
+ svwrite_ver_za16_m (1, w0, p0, z0))
+
+/*
+** write_za16_s16_1_w0p7_z0:
+** mov (w1[2-5]), w0
+** mova za1v\.h\[\1, 7\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_1_w0p7_z0, svint16_t,
+ svwrite_ver_za16_s16_m (1, w0 + 7, p0, z0),
+ svwrite_ver_za16_m (1, w0 + 7, p0, z0))
+
+/*
+** write_za16_s16_0_w0_z1:
+** mov (w1[2-5]), w0
+** mova za0v\.h\[\1, 0\], p0/m, z1\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_s16_0_w0_z1, svint16_t,
+ svwrite_ver_za16_s16_m (0, w0, p0, z1),
+ svwrite_ver_za16_m (0, w0, p0, z1))
+
+/*
+** write_za16_u16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_u16_0_w0_z0, svuint16_t,
+ svwrite_ver_za16_u16_m (0, w0, p0, z0),
+ svwrite_ver_za16_m (0, w0, p0, z0))
+
+/*
+** write_za16_f16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_f16_0_w0_z0, svfloat16_t,
+ svwrite_ver_za16_f16_m (0, w0, p0, z0),
+ svwrite_ver_za16_m (0, w0, p0, z0))
+
+/*
+** write_za16_bf16_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.h\[\1, 0\], p0/m, z0\.h
+** ret
+*/
+TEST_WRITE_ZA (write_za16_bf16_0_w0_z0, svbfloat16_t,
+ svwrite_ver_za16_bf16_m (0, w0, p0, z0),
+ svwrite_ver_za16_m (0, w0, p0, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za32.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za32.c
new file mode 100644
index 0000000..960ce16
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za32.c
@@ -0,0 +1,143 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** write_za32_s32_0_0_z0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_0_z0, svint32_t,
+ svwrite_ver_za32_s32_m (0, 0, p0, z0),
+ svwrite_ver_za32_m (0, 0, p0, z0))
+
+/*
+** write_za32_s32_0_1_z0:
+** mov (w1[2-5]), #?1
+** mova za0v\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_1_z0, svint32_t,
+ svwrite_ver_za32_s32_m (0, 1, p0, z0),
+ svwrite_ver_za32_m (0, 1, p0, z0))
+
+/*
+** write_za32_s32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0_z0, svint32_t,
+ svwrite_ver_za32_s32_m (0, w0, p0, z0),
+ svwrite_ver_za32_m (0, w0, p0, z0))
+
+/*
+** write_za32_s32_0_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.s\[\1, 1\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0p1_z0, svint32_t,
+ svwrite_ver_za32_s32_m (0, w0 + 1, p0, z0),
+ svwrite_ver_za32_m (0, w0 + 1, p0, z0))
+
+/*
+** write_za32_s32_0_w0p3_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.s\[\1, 3\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0p3_z0, svint32_t,
+ svwrite_ver_za32_s32_m (0, w0 + 3, p0, z0),
+ svwrite_ver_za32_m (0, w0 + 3, p0, z0))
+
+/*
+** write_za32_s32_0_w0p4_z0:
+** add (w1[2-5]), w0, #?4
+** mova za0v\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0p4_z0, svint32_t,
+ svwrite_ver_za32_s32_m (0, w0 + 4, p0, z0),
+ svwrite_ver_za32_m (0, w0 + 4, p0, z0))
+
+/*
+** write_za32_s32_0_w0m1_z0:
+** sub (w1[2-5]), w0, #?1
+** mova za0v\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0m1_z0, svint32_t,
+ svwrite_ver_za32_s32_m (0, w0 - 1, p0, z0),
+ svwrite_ver_za32_m (0, w0 - 1, p0, z0))
+
+/*
+** write_za32_s32_1_w0_z0:
+** mov (w1[2-5]), w0
+** mova za1v\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_1_w0_z0, svint32_t,
+ svwrite_ver_za32_s32_m (1, w0, p0, z0),
+ svwrite_ver_za32_m (1, w0, p0, z0))
+
+/*
+** write_za32_s32_1_w0p3_z0:
+** mov (w1[2-5]), w0
+** mova za1v\.s\[\1, 3\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_1_w0p3_z0, svint32_t,
+ svwrite_ver_za32_s32_m (1, w0 + 3, p0, z0),
+ svwrite_ver_za32_m (1, w0 + 3, p0, z0))
+
+/*
+** write_za32_s32_3_w0_z0:
+** mov (w1[2-5]), w0
+** mova za3v\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_3_w0_z0, svint32_t,
+ svwrite_ver_za32_s32_m (3, w0, p0, z0),
+ svwrite_ver_za32_m (3, w0, p0, z0))
+
+/*
+** write_za32_s32_3_w0p3_z0:
+** mov (w1[2-5]), w0
+** mova za3v\.s\[\1, 3\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_3_w0p3_z0, svint32_t,
+ svwrite_ver_za32_s32_m (3, w0 + 3, p0, z0),
+ svwrite_ver_za32_m (3, w0 + 3, p0, z0))
+
+/*
+** write_za32_s32_0_w0_z1:
+** mov (w1[2-5]), w0
+** mova za0v\.s\[\1, 0\], p0/m, z1\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_s32_0_w0_z1, svint32_t,
+ svwrite_ver_za32_s32_m (0, w0, p0, z1),
+ svwrite_ver_za32_m (0, w0, p0, z1))
+
+/*
+** write_za32_u32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_u32_0_w0_z0, svuint32_t,
+ svwrite_ver_za32_u32_m (0, w0, p0, z0),
+ svwrite_ver_za32_m (0, w0, p0, z0))
+
+/*
+** write_za32_f32_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.s\[\1, 0\], p0/m, z0\.s
+** ret
+*/
+TEST_WRITE_ZA (write_za32_f32_0_w0_z0, svfloat32_t,
+ svwrite_ver_za32_f32_m (0, w0, p0, z0),
+ svwrite_ver_za32_m (0, w0, p0, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za64.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za64.c
new file mode 100644
index 0000000..962c400
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za64.c
@@ -0,0 +1,133 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** write_za64_s64_0_0_z0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_0_z0, svint64_t,
+ svwrite_ver_za64_s64_m (0, 0, p0, z0),
+ svwrite_ver_za64_m (0, 0, p0, z0))
+
+/*
+** write_za64_s64_0_1_z0:
+** mov (w1[2-5]), #?1
+** mova za0v\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_1_z0, svint64_t,
+ svwrite_ver_za64_s64_m (0, 1, p0, z0),
+ svwrite_ver_za64_m (0, 1, p0, z0))
+
+/*
+** write_za64_s64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_w0_z0, svint64_t,
+ svwrite_ver_za64_s64_m (0, w0, p0, z0),
+ svwrite_ver_za64_m (0, w0, p0, z0))
+
+/*
+** write_za64_s64_0_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.d\[\1, 1\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_w0p1_z0, svint64_t,
+ svwrite_ver_za64_s64_m (0, w0 + 1, p0, z0),
+ svwrite_ver_za64_m (0, w0 + 1, p0, z0))
+
+/*
+** write_za64_s64_0_w0p2_z0:
+** add (w1[2-5]), w0, #?2
+** mova za0v\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_w0p2_z0, svint64_t,
+ svwrite_ver_za64_s64_m (0, w0 + 2, p0, z0),
+ svwrite_ver_za64_m (0, w0 + 2, p0, z0))
+
+/*
+** write_za64_s64_0_w0m1_z0:
+** sub (w1[2-5]), w0, #?1
+** mova za0v\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_w0m1_z0, svint64_t,
+ svwrite_ver_za64_s64_m (0, w0 - 1, p0, z0),
+ svwrite_ver_za64_m (0, w0 - 1, p0, z0))
+
+/*
+** write_za64_s64_1_w0_z0:
+** mov (w1[2-5]), w0
+** mova za1v\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_1_w0_z0, svint64_t,
+ svwrite_ver_za64_s64_m (1, w0, p0, z0),
+ svwrite_ver_za64_m (1, w0, p0, z0))
+
+/*
+** write_za64_s64_1_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za1v\.d\[\1, 1\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_1_w0p1_z0, svint64_t,
+ svwrite_ver_za64_s64_m (1, w0 + 1, p0, z0),
+ svwrite_ver_za64_m (1, w0 + 1, p0, z0))
+
+/*
+** write_za64_s64_7_w0_z0:
+** mov (w1[2-5]), w0
+** mova za7v\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_7_w0_z0, svint64_t,
+ svwrite_ver_za64_s64_m (7, w0, p0, z0),
+ svwrite_ver_za64_m (7, w0, p0, z0))
+
+/*
+** write_za64_s64_7_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za7v\.d\[\1, 1\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_7_w0p1_z0, svint64_t,
+ svwrite_ver_za64_s64_m (7, w0 + 1, p0, z0),
+ svwrite_ver_za64_m (7, w0 + 1, p0, z0))
+
+/*
+** write_za64_s64_0_w0_z1:
+** mov (w1[2-5]), w0
+** mova za0v\.d\[\1, 0\], p0/m, z1\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_s64_0_w0_z1, svint64_t,
+ svwrite_ver_za64_s64_m (0, w0, p0, z1),
+ svwrite_ver_za64_m (0, w0, p0, z1))
+
+/*
+** write_za64_u64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_u64_0_w0_z0, svuint64_t,
+ svwrite_ver_za64_u64_m (0, w0, p0, z0),
+ svwrite_ver_za64_m (0, w0, p0, z0))
+
+/*
+** write_za64_f64_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.d\[\1, 0\], p0/m, z0\.d
+** ret
+*/
+TEST_WRITE_ZA (write_za64_f64_0_w0_z0, svfloat64_t,
+ svwrite_ver_za64_f64_m (0, w0, p0, z0),
+ svwrite_ver_za64_m (0, w0, p0, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za8.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za8.c
new file mode 100644
index 0000000..dd61828
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/write_ver_za8.c
@@ -0,0 +1,93 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme_acle.h"
+
+/*
+** write_za8_s8_0_0_z0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_0_z0, svint8_t,
+ svwrite_ver_za8_s8_m (0, 0, p0, z0),
+ svwrite_ver_za8_m (0, 0, p0, z0))
+
+/*
+** write_za8_s8_0_1_z0:
+** mov (w1[2-5]), #?1
+** mova za0v\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_1_z0, svint8_t,
+ svwrite_ver_za8_s8_m (0, 1, p0, z0),
+ svwrite_ver_za8_m (0, 1, p0, z0))
+
+/*
+** write_za8_s8_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0_z0, svint8_t,
+ svwrite_ver_za8_s8_m (0, w0, p0, z0),
+ svwrite_ver_za8_m (0, w0, p0, z0))
+
+/*
+** write_za8_s8_0_w0p1_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.b\[\1, 1\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0p1_z0, svint8_t,
+ svwrite_ver_za8_s8_m (0, w0 + 1, p0, z0),
+ svwrite_ver_za8_m (0, w0 + 1, p0, z0))
+
+/*
+** write_za8_s8_0_w0p15_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.b\[\1, 15\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0p15_z0, svint8_t,
+ svwrite_ver_za8_s8_m (0, w0 + 15, p0, z0),
+ svwrite_ver_za8_m (0, w0 + 15, p0, z0))
+
+/*
+** write_za8_s8_0_w0p16_z0:
+** add (w1[2-5]), w0, #?16
+** mova za0v\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0p16_z0, svint8_t,
+ svwrite_ver_za8_s8_m (0, w0 + 16, p0, z0),
+ svwrite_ver_za8_m (0, w0 + 16, p0, z0))
+
+/*
+** write_za8_s8_0_w0m1_z0:
+** sub (w1[2-5]), w0, #?1
+** mova za0v\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0m1_z0, svint8_t,
+ svwrite_ver_za8_s8_m (0, w0 - 1, p0, z0),
+ svwrite_ver_za8_m (0, w0 - 1, p0, z0))
+
+/*
+** write_za8_s8_0_w0_z1:
+** mov (w1[2-5]), w0
+** mova za0v\.b\[\1, 0\], p0/m, z1\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_s8_0_w0_z1, svint8_t,
+ svwrite_ver_za8_s8_m (0, w0, p0, z1),
+ svwrite_ver_za8_m (0, w0, p0, z1))
+
+/*
+** write_za8_u8_0_w0_z0:
+** mov (w1[2-5]), w0
+** mova za0v\.b\[\1, 0\], p0/m, z0\.b
+** ret
+*/
+TEST_WRITE_ZA (write_za8_u8_0_w0_z0, svuint8_t,
+ svwrite_ver_za8_u8_m (0, w0, p0, z0),
+ svwrite_ver_za8_m (0, w0, p0, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/zero_mask_za.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/zero_mask_za.c
new file mode 100644
index 0000000..9ce7331
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/zero_mask_za.c
@@ -0,0 +1,130 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#include "test_sme_acle.h"
+
+/*
+** zero_mask_za_0:
+** zero { *}
+** ret
+*/
+PROTO (zero_mask_za_0, void, ()) { svzero_mask_za (0); }
+
+/*
+** zero_mask_za_01:
+** zero { za0\.d }
+** ret
+*/
+PROTO (zero_mask_za_01, void, ()) { svzero_mask_za (0x01); }
+
+/*
+** zero_mask_za_80:
+** zero { za7\.d }
+** ret
+*/
+PROTO (zero_mask_za_80, void, ()) { svzero_mask_za (0x80); }
+
+/*
+** zero_mask_za_03:
+** zero { za0\.d, za1\.d }
+** ret
+*/
+PROTO (zero_mask_za_03, void, ()) { svzero_mask_za (0x03); }
+
+/*
+** zero_mask_za_09:
+** zero { za0\.d, za3\.d }
+** ret
+*/
+PROTO (zero_mask_za_09, void, ()) { svzero_mask_za (0x09); }
+
+/*
+** zero_mask_za_0d:
+** zero { za0\.d, za2\.d, za3\.d }
+** ret
+*/
+PROTO (zero_mask_za_0d, void, ()) { svzero_mask_za (0x0d); }
+
+/*
+** zero_mask_za_3c:
+** zero { za2\.d, za3\.d, za4\.d, za5\.d }
+** ret
+*/
+PROTO (zero_mask_za_3c, void, ()) { svzero_mask_za (0x3c); }
+
+/*
+** zero_mask_za_5a:
+** zero { za1\.d, za3\.d, za4\.d, za6\.d }
+** ret
+*/
+PROTO (zero_mask_za_5a, void, ()) { svzero_mask_za (0x5a); }
+
+/*
+** zero_mask_za_11:
+** zero { za0\.s }
+** ret
+*/
+PROTO (zero_mask_za_11, void, ()) { svzero_mask_za (0x11); }
+
+/*
+** zero_mask_za_88:
+** zero { za3\.s }
+** ret
+*/
+PROTO (zero_mask_za_88, void, ()) { svzero_mask_za (0x88); }
+
+/*
+** zero_mask_za_33:
+** zero { za0\.s, za1\.s }
+** ret
+*/
+PROTO (zero_mask_za_33, void, ()) { svzero_mask_za (0x33); }
+
+/*
+** zero_mask_za_cc:
+** zero { za2\.s, za3\.s }
+** ret
+*/
+PROTO (zero_mask_za_cc, void, ()) { svzero_mask_za (0xcc); }
+
+/*
+** zero_mask_za_55:
+** zero { za0\.h }
+** ret
+*/
+PROTO (zero_mask_za_55, void, ()) { svzero_mask_za (0x55); }
+
+/*
+** zero_mask_za_aa:
+** zero { za1\.h }
+** ret
+*/
+PROTO (zero_mask_za_aa, void, ()) { svzero_mask_za (0xaa); }
+
+/*
+** zero_mask_za_ab:
+** zero { za1\.h, za0\.d }
+** ret
+*/
+PROTO (zero_mask_za_ab, void, ()) { svzero_mask_za (0xab); }
+
+/*
+** zero_mask_za_d7:
+** zero { za0\.h, za1\.d, za7\.d }
+** ret
+*/
+PROTO (zero_mask_za_d7, void, ()) { svzero_mask_za (0xd7); }
+
+/*
+** zero_mask_za_bf:
+** zero { za1\.h, za0\.s, za2\.d }
+** ret
+*/
+PROTO (zero_mask_za_bf, void, ()) { svzero_mask_za (0xbf); }
+
+/*
+** zero_mask_za_ff:
+** zero { za }
+** ret
+*/
+PROTO (zero_mask_za_ff, void, ()) { svzero_mask_za (0xff); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/zero_za.c b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/zero_za.c
new file mode 100644
index 0000000..4688d09
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/acle-asm/zero_za.c
@@ -0,0 +1,11 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#include "test_sme_acle.h"
+
+/*
+** zero_za:
+** zero { za }
+** ret
+*/
+PROTO (zero_za, void, ()) { svzero_za (); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/arm_neon_1.c b/gcc/testsuite/gcc.target/aarch64/sme/arm_neon_1.c
new file mode 100644
index 0000000..5b5346c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/arm_neon_1.c
@@ -0,0 +1,13 @@
+// { dg-options "" }
+
+#include <arm_neon.h>
+
+#pragma GCC target "+nosme"
+
+// { dg-error {inlining failed.*'vhaddq_s32'} "" { target *-*-* } 0 }
+
+int32x4_t
+foo (int32x4_t x, int32x4_t y) [[arm::streaming_compatible]]
+{
+ return vhaddq_s32 (x, y);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/arm_neon_2.c b/gcc/testsuite/gcc.target/aarch64/sme/arm_neon_2.c
new file mode 100644
index 0000000..2092c44
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/arm_neon_2.c
@@ -0,0 +1,11 @@
+// { dg-options "" }
+
+#include <arm_neon.h>
+
+// { dg-error {inlining failed.*'vhaddq_s32'} "" { target *-*-* } 0 }
+
+int32x4_t
+foo (int32x4_t x, int32x4_t y) [[arm::streaming_compatible]]
+{
+ return vhaddq_s32 (x, y);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/arm_neon_3.c b/gcc/testsuite/gcc.target/aarch64/sme/arm_neon_3.c
new file mode 100644
index 0000000..36794e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/arm_neon_3.c
@@ -0,0 +1,11 @@
+// { dg-options "" }
+
+#include <arm_neon.h>
+
+// { dg-error {inlining failed.*'vhaddq_s32'} "" { target *-*-* } 0 }
+
+int32x4_t
+foo (int32x4_t x, int32x4_t y) [[arm::streaming]]
+{
+ return vhaddq_s32 (x, y);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_1.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_1.c
new file mode 100644
index 0000000..a2de557
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_1.c
@@ -0,0 +1,233 @@
+// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls" }
+// { dg-final { check-function-bodies "**" "" } }
+
+void ns_callee ();
+ void s_callee () [[arm::streaming]];
+ void sc_callee () [[arm::streaming_compatible]];
+
+void ns_callee_stack (int, int, int, int, int, int, int, int, int);
+
+struct callbacks {
+ void (*ns_ptr) ();
+ void (*s_ptr) () [[arm::streaming]];
+ void (*sc_ptr) () [[arm::streaming_compatible]];
+};
+
+/*
+** n_caller: { target lp64 }
+** stp x30, (x19|x2[0-8]), \[sp, #?-96\]!
+** cntd x16
+** str x16, \[sp, #?16\]
+** stp d8, d9, \[sp, #?32\]
+** stp d10, d11, \[sp, #?48\]
+** stp d12, d13, \[sp, #?64\]
+** stp d14, d15, \[sp, #?80\]
+** mov \1, x0
+** bl ns_callee
+** smstart sm
+** bl s_callee
+** smstop sm
+** bl sc_callee
+** ldr (x[0-9]+), \[\1\]
+** blr \2
+** ldr (x[0-9]+), \[\1, #?8\]
+** smstart sm
+** blr \3
+** smstop sm
+** ldr (x[0-9]+), \[\1, #?16\]
+** blr \4
+** ldp d8, d9, \[sp, #?32\]
+** ldp d10, d11, \[sp, #?48\]
+** ldp d12, d13, \[sp, #?64\]
+** ldp d14, d15, \[sp, #?80\]
+** ldp x30, \1, \[sp\], #?96
+** ret
+*/
+void
+n_caller (struct callbacks *c)
+{
+ ns_callee ();
+ s_callee ();
+ sc_callee ();
+
+ c->ns_ptr ();
+ c->s_ptr ();
+ c->sc_ptr ();
+}
+
+/*
+** s_caller: { target lp64 }
+** stp x30, (x19|x2[0-8]), \[sp, #?-96\]!
+** cntd x16
+** str x16, \[sp, #?16\]
+** stp d8, d9, \[sp, #?32\]
+** stp d10, d11, \[sp, #?48\]
+** stp d12, d13, \[sp, #?64\]
+** stp d14, d15, \[sp, #?80\]
+** mov \1, x0
+** smstop sm
+** bl ns_callee
+** smstart sm
+** bl s_callee
+** bl sc_callee
+** ldr (x[0-9]+), \[\1\]
+** smstop sm
+** blr \2
+** smstart sm
+** ldr (x[0-9]+), \[\1, #?8\]
+** blr \3
+** ldr (x[0-9]+), \[\1, #?16\]
+** blr \4
+** ldp d8, d9, \[sp, #?32\]
+** ldp d10, d11, \[sp, #?48\]
+** ldp d12, d13, \[sp, #?64\]
+** ldp d14, d15, \[sp, #?80\]
+** ldp x30, \1, \[sp\], #?96
+** ret
+*/
+void
+s_caller (struct callbacks *c) [[arm::streaming]]
+{
+ ns_callee ();
+ s_callee ();
+ sc_callee ();
+
+ c->ns_ptr ();
+ c->s_ptr ();
+ c->sc_ptr ();
+}
+
+/*
+** sc_caller_sme:
+** stp x29, x30, \[sp, #?-96\]!
+** mov x29, sp
+** cntd x16
+** str x16, \[sp, #?24\]
+** stp d8, d9, \[sp, #?32\]
+** stp d10, d11, \[sp, #?48\]
+** stp d12, d13, \[sp, #?64\]
+** stp d14, d15, \[sp, #?80\]
+** mrs x16, svcr
+** str x16, \[x29, #?16\]
+** ldr x16, \[x29, #?16\]
+** tbz x16, 0, .*
+** smstop sm
+** bl ns_callee
+** ldr x16, \[x29, #?16\]
+** tbz x16, 0, .*
+** smstart sm
+** ldr x16, \[x29, #?16\]
+** tbnz x16, 0, .*
+** smstart sm
+** bl s_callee
+** ldr x16, \[x29, #?16\]
+** tbnz x16, 0, .*
+** smstop sm
+** bl sc_callee
+** ldp d8, d9, \[sp, #?32\]
+** ldp d10, d11, \[sp, #?48\]
+** ldp d12, d13, \[sp, #?64\]
+** ldp d14, d15, \[sp, #?80\]
+** ldp x29, x30, \[sp\], #?96
+** ret
+*/
+void
+sc_caller_sme () [[arm::streaming_compatible]]
+{
+ ns_callee ();
+ s_callee ();
+ sc_callee ();
+}
+
+#pragma GCC target "+nosme"
+
+/*
+** sc_caller:
+** stp x29, x30, \[sp, #?-96\]!
+** mov x29, sp
+** cntd x16
+** str x16, \[sp, #?24\]
+** stp d8, d9, \[sp, #?32\]
+** stp d10, d11, \[sp, #?48\]
+** stp d12, d13, \[sp, #?64\]
+** stp d14, d15, \[sp, #?80\]
+** bl __arm_sme_state
+** str x0, \[x29, #?16\]
+** ...
+** bl sc_callee
+** ldp d8, d9, \[sp, #?32\]
+** ldp d10, d11, \[sp, #?48\]
+** ldp d12, d13, \[sp, #?64\]
+** ldp d14, d15, \[sp, #?80\]
+** ldp x29, x30, \[sp\], #?96
+** ret
+*/
+void
+sc_caller () [[arm::streaming_compatible]]
+{
+ ns_callee ();
+ sc_callee ();
+}
+
+/*
+** sc_caller_x0:
+** ...
+** mov x10, x0
+** bl __arm_sme_state
+** ...
+** str wzr, \[x10\]
+** ...
+*/
+void
+sc_caller_x0 (int *ptr) [[arm::streaming_compatible]]
+{
+ *ptr = 0;
+ ns_callee ();
+ sc_callee ();
+}
+
+/*
+** sc_caller_x1:
+** ...
+** mov x10, x0
+** mov x11, x1
+** bl __arm_sme_state
+** ...
+** str w11, \[x10\]
+** ...
+*/
+void
+sc_caller_x1 (int *ptr, int a) [[arm::streaming_compatible]]
+{
+ *ptr = a;
+ ns_callee ();
+ sc_callee ();
+}
+
+/*
+** sc_caller_stack:
+** sub sp, sp, #112
+** stp x29, x30, \[sp, #?16\]
+** add x29, sp, #?16
+** ...
+** stp d8, d9, \[sp, #?48\]
+** ...
+** bl __arm_sme_state
+** str x0, \[x29, #?16\]
+** ...
+** bl ns_callee_stack
+** ldr x16, \[x29, #?16\]
+** tbz x16, 0, .*
+** smstart sm
+** ...
+*/
+void
+sc_caller_stack () [[arm::streaming_compatible]]
+{
+ ns_callee_stack (0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/* { dg-final { scan-assembler {n_caller:(?:(?!ret).)*\.cfi_offset 46, -80\n} } } */
+/* { dg-final { scan-assembler {s_caller:(?:(?!ret).)*\.cfi_offset 46, -80\n} } } */
+/* { dg-final { scan-assembler {sc_caller_sme:(?:(?!ret).)*\.cfi_offset 46, -72\n} } } */
+/* { dg-final { scan-assembler {sc_caller:(?:(?!ret).)*\.cfi_offset 46, -72\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_10.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_10.c
new file mode 100644
index 0000000..49c5e4a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_10.c
@@ -0,0 +1,37 @@
+// { dg-options "" }
+
+#pragma GCC target "+nosme"
+
+void ns_callee ();
+ void s_callee () [[arm::streaming]];
+ void sc_callee () [[arm::streaming_compatible]];
+
+struct callbacks {
+ void (*ns_ptr) ();
+ void (*s_ptr) () [[arm::streaming]];
+ void (*sc_ptr) () [[arm::streaming_compatible]];
+};
+
+void
+n_caller (struct callbacks *c)
+{
+ ns_callee ();
+ s_callee (); // { dg-error "calling a streaming function requires the ISA extension 'sme'" }
+ sc_callee ();
+
+ c->ns_ptr ();
+ c->s_ptr (); // { dg-error "calling a streaming function requires the ISA extension 'sme'" }
+ c->sc_ptr ();
+}
+
+void
+sc_caller_sme (struct callbacks *c) [[arm::streaming_compatible]]
+{
+ ns_callee ();
+ s_callee (); // { dg-error "calling a streaming function requires the ISA extension 'sme'" }
+ sc_callee ();
+
+ c->ns_ptr ();
+ c->s_ptr (); // { dg-error "calling a streaming function requires the ISA extension 'sme'" }
+ c->sc_ptr ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_2.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_2.c
new file mode 100644
index 0000000..890fcbc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_2.c
@@ -0,0 +1,43 @@
+// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls" }
+
+void ns_callee ();
+ void s_callee () [[arm::streaming]];
+ void sc_callee () [[arm::streaming_compatible]];
+
+struct callbacks {
+ void (*ns_ptr) ();
+ void (*s_ptr) () [[arm::streaming]];
+ void (*sc_ptr) () [[arm::streaming_compatible]];
+};
+
+void
+n_caller (struct callbacks *c)
+{
+ ns_callee ();
+ sc_callee ();
+
+ c->ns_ptr ();
+ c->sc_ptr ();
+}
+
+void
+s_caller (struct callbacks *c) [[arm::streaming]]
+{
+ s_callee ();
+ sc_callee ();
+
+ c->s_ptr ();
+ c->sc_ptr ();
+}
+
+void
+sc_caller (struct callbacks *c) [[arm::streaming_compatible]]
+{
+ sc_callee ();
+
+ c->sc_ptr ();
+}
+
+// { dg-final { scan-assembler-not {[dpqz][0-9]+,} } }
+// { dg-final { scan-assembler-not {smstart\tsm} } }
+// { dg-final { scan-assembler-not {smstop\tsm} } }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_3.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_3.c
new file mode 100644
index 0000000..ed999d0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_3.c
@@ -0,0 +1,166 @@
+// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls" }
+// { dg-final { check-function-bodies "**" "" } }
+
+__attribute__((aarch64_vector_pcs)) void ns_callee ();
+__attribute__((aarch64_vector_pcs)) void s_callee () [[arm::streaming]];
+__attribute__((aarch64_vector_pcs)) void sc_callee () [[arm::streaming_compatible]];
+
+struct callbacks {
+ __attribute__((aarch64_vector_pcs)) void (*ns_ptr) ();
+ __attribute__((aarch64_vector_pcs)) void (*s_ptr) () [[arm::streaming]];
+ __attribute__((aarch64_vector_pcs)) void (*sc_ptr) () [[arm::streaming_compatible]];
+};
+
+/*
+** n_caller: { target lp64 }
+** stp x30, (x19|x2[0-8]), \[sp, #?-288\]!
+** cntd x16
+** str x16, \[sp, #?16\]
+** stp q8, q9, \[sp, #?32\]
+** stp q10, q11, \[sp, #?64\]
+** stp q12, q13, \[sp, #?96\]
+** stp q14, q15, \[sp, #?128\]
+** stp q16, q17, \[sp, #?160\]
+** stp q18, q19, \[sp, #?192\]
+** stp q20, q21, \[sp, #?224\]
+** stp q22, q23, \[sp, #?256\]
+** mov \1, x0
+** bl ns_callee
+** smstart sm
+** bl s_callee
+** smstop sm
+** bl sc_callee
+** ldr (x[0-9]+), \[\1\]
+** blr \2
+** ldr (x[0-9]+), \[\1, #?8\]
+** smstart sm
+** blr \3
+** smstop sm
+** ldr (x[0-9]+), \[\1, #?16\]
+** blr \4
+** ldp q8, q9, \[sp, #?32\]
+** ldp q10, q11, \[sp, #?64\]
+** ldp q12, q13, \[sp, #?96\]
+** ldp q14, q15, \[sp, #?128\]
+** ldp q16, q17, \[sp, #?160\]
+** ldp q18, q19, \[sp, #?192\]
+** ldp q20, q21, \[sp, #?224\]
+** ldp q22, q23, \[sp, #?256\]
+** ldp x30, \1, \[sp\], #?288
+** ret
+*/
+void __attribute__((aarch64_vector_pcs))
+n_caller (struct callbacks *c)
+{
+ ns_callee ();
+ s_callee ();
+ sc_callee ();
+
+ c->ns_ptr ();
+ c->s_ptr ();
+ c->sc_ptr ();
+}
+
+/*
+** s_caller: { target lp64 }
+** stp x30, (x19|x2[0-8]), \[sp, #?-288\]!
+** cntd x16
+** str x16, \[sp, #?16\]
+** stp q8, q9, \[sp, #?32\]
+** stp q10, q11, \[sp, #?64\]
+** stp q12, q13, \[sp, #?96\]
+** stp q14, q15, \[sp, #?128\]
+** stp q16, q17, \[sp, #?160\]
+** stp q18, q19, \[sp, #?192\]
+** stp q20, q21, \[sp, #?224\]
+** stp q22, q23, \[sp, #?256\]
+** mov \1, x0
+** smstop sm
+** bl ns_callee
+** smstart sm
+** bl s_callee
+** bl sc_callee
+** ldr (x[0-9]+), \[\1\]
+** smstop sm
+** blr \2
+** smstart sm
+** ldr (x[0-9]+), \[\1, #?8\]
+** blr \3
+** ldr (x[0-9]+), \[\1, #?16\]
+** blr \4
+** ldp q8, q9, \[sp, #?32\]
+** ldp q10, q11, \[sp, #?64\]
+** ldp q12, q13, \[sp, #?96\]
+** ldp q14, q15, \[sp, #?128\]
+** ldp q16, q17, \[sp, #?160\]
+** ldp q18, q19, \[sp, #?192\]
+** ldp q20, q21, \[sp, #?224\]
+** ldp q22, q23, \[sp, #?256\]
+** ldp x30, \1, \[sp\], #?288
+** ret
+*/
+void __attribute__((aarch64_vector_pcs))
+s_caller (struct callbacks *c) [[arm::streaming]]
+{
+ ns_callee ();
+ s_callee ();
+ sc_callee ();
+
+ c->ns_ptr ();
+ c->s_ptr ();
+ c->sc_ptr ();
+}
+
+/*
+** sc_caller:
+** stp x29, x30, \[sp, #?-288\]!
+** mov x29, sp
+** cntd x16
+** str x16, \[sp, #?24\]
+** stp q8, q9, \[sp, #?32\]
+** stp q10, q11, \[sp, #?64\]
+** stp q12, q13, \[sp, #?96\]
+** stp q14, q15, \[sp, #?128\]
+** stp q16, q17, \[sp, #?160\]
+** stp q18, q19, \[sp, #?192\]
+** stp q20, q21, \[sp, #?224\]
+** stp q22, q23, \[sp, #?256\]
+** mrs x16, svcr
+** str x16, \[x29, #?16\]
+** ldr x16, \[x29, #?16\]
+** tbz x16, 0, .*
+** smstop sm
+** bl ns_callee
+** ldr x16, \[x29, #?16\]
+** tbz x16, 0, .*
+** smstart sm
+** ldr x16, \[x29, #?16\]
+** tbnz x16, 0, .*
+** smstart sm
+** bl s_callee
+** ldr x16, \[x29, #?16\]
+** tbnz x16, 0, .*
+** smstop sm
+** bl sc_callee
+** ldp q8, q9, \[sp, #?32\]
+** ldp q10, q11, \[sp, #?64\]
+** ldp q12, q13, \[sp, #?96\]
+** ldp q14, q15, \[sp, #?128\]
+** ldp q16, q17, \[sp, #?160\]
+** ldp q18, q19, \[sp, #?192\]
+** ldp q20, q21, \[sp, #?224\]
+** ldp q22, q23, \[sp, #?256\]
+** ldp x29, x30, \[sp\], #?288
+** ret
+*/
+void __attribute__((aarch64_vector_pcs))
+sc_caller () [[arm::streaming_compatible]]
+{
+ ns_callee ();
+ s_callee ();
+ sc_callee ();
+}
+
+/* { dg-final { scan-assembler {n_caller:(?:(?!ret).)*\.cfi_offset 46, -272\n} } } */
+/* { dg-final { scan-assembler {s_caller:(?:(?!ret).)*\.cfi_offset 46, -272\n} } } */
+/* { dg-final { scan-assembler {sc_caller:(?:(?!ret).)*\.cfi_offset 46, -264\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_4.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_4.c
new file mode 100644
index 0000000..f93a67f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_4.c
@@ -0,0 +1,43 @@
+// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls" }
+
+__attribute__((aarch64_vector_pcs)) void ns_callee ();
+__attribute__((aarch64_vector_pcs)) void s_callee () [[arm::streaming]];
+__attribute__((aarch64_vector_pcs)) void sc_callee () [[arm::streaming_compatible]];
+
+struct callbacks {
+ __attribute__((aarch64_vector_pcs)) void (*ns_ptr) ();
+ __attribute__((aarch64_vector_pcs)) void (*s_ptr) () [[arm::streaming]];
+ __attribute__((aarch64_vector_pcs)) void (*sc_ptr) () [[arm::streaming_compatible]];
+};
+
+void __attribute__((aarch64_vector_pcs))
+n_caller (struct callbacks *c)
+{
+ ns_callee ();
+ sc_callee ();
+
+ c->ns_ptr ();
+ c->sc_ptr ();
+}
+
+void __attribute__((aarch64_vector_pcs))
+s_caller (struct callbacks *c) [[arm::streaming]]
+{
+ s_callee ();
+ sc_callee ();
+
+ c->s_ptr ();
+ c->sc_ptr ();
+}
+
+void __attribute__((aarch64_vector_pcs))
+sc_caller (struct callbacks *c) [[arm::streaming_compatible]]
+{
+ sc_callee ();
+
+ c->sc_ptr ();
+}
+
+// { dg-final { scan-assembler-not {[dpqz][0-9]+,} } }
+// { dg-final { scan-assembler-not {smstart\tsm} } }
+// { dg-final { scan-assembler-not {smstop\tsm} } }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_5.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_5.c
new file mode 100644
index 0000000..be9b5cc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_5.c
@@ -0,0 +1,318 @@
+// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls" }
+// { dg-final { check-function-bodies "**" "" } }
+
+#include <arm_sve.h>
+
+svbool_t ns_callee ();
+ svbool_t s_callee () [[arm::streaming]];
+ svbool_t sc_callee () [[arm::streaming_compatible]];
+
+struct callbacks {
+ svbool_t (*ns_ptr) ();
+ svbool_t (*s_ptr) () [[arm::streaming]];
+ svbool_t (*sc_ptr) () [[arm::streaming_compatible]];
+};
+
+/*
+** n_caller: { target lp64 }
+** stp x30, (x19|x2[0-8]), \[sp, #?-32\]!
+** cntd x16
+** str x16, \[sp, #?16\]
+** addvl sp, sp, #-18
+** str p4, \[sp\]
+** str p5, \[sp, #1, mul vl\]
+** str p6, \[sp, #2, mul vl\]
+** str p7, \[sp, #3, mul vl\]
+** str p8, \[sp, #4, mul vl\]
+** str p9, \[sp, #5, mul vl\]
+** str p10, \[sp, #6, mul vl\]
+** str p11, \[sp, #7, mul vl\]
+** str p12, \[sp, #8, mul vl\]
+** str p13, \[sp, #9, mul vl\]
+** str p14, \[sp, #10, mul vl\]
+** str p15, \[sp, #11, mul vl\]
+** str z8, \[sp, #2, mul vl\]
+** str z9, \[sp, #3, mul vl\]
+** str z10, \[sp, #4, mul vl\]
+** str z11, \[sp, #5, mul vl\]
+** str z12, \[sp, #6, mul vl\]
+** str z13, \[sp, #7, mul vl\]
+** str z14, \[sp, #8, mul vl\]
+** str z15, \[sp, #9, mul vl\]
+** str z16, \[sp, #10, mul vl\]
+** str z17, \[sp, #11, mul vl\]
+** str z18, \[sp, #12, mul vl\]
+** str z19, \[sp, #13, mul vl\]
+** str z20, \[sp, #14, mul vl\]
+** str z21, \[sp, #15, mul vl\]
+** str z22, \[sp, #16, mul vl\]
+** str z23, \[sp, #17, mul vl\]
+** mov \1, x0
+** bl ns_callee
+** smstart sm
+** bl s_callee
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** smstop sm
+** ldr p0, \[sp\]
+** addvl sp, sp, #1
+** bl sc_callee
+** ldr (x[0-9]+), \[\1\]
+** blr \2
+** ldr (x[0-9]+), \[\1, #?8\]
+** smstart sm
+** blr \3
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** smstop sm
+** ldr p0, \[sp\]
+** addvl sp, sp, #1
+** ldr (x[0-9]+), \[\1, #?16\]
+** blr \4
+** ldr z8, \[sp, #2, mul vl\]
+** ldr z9, \[sp, #3, mul vl\]
+** ldr z10, \[sp, #4, mul vl\]
+** ldr z11, \[sp, #5, mul vl\]
+** ldr z12, \[sp, #6, mul vl\]
+** ldr z13, \[sp, #7, mul vl\]
+** ldr z14, \[sp, #8, mul vl\]
+** ldr z15, \[sp, #9, mul vl\]
+** ldr z16, \[sp, #10, mul vl\]
+** ldr z17, \[sp, #11, mul vl\]
+** ldr z18, \[sp, #12, mul vl\]
+** ldr z19, \[sp, #13, mul vl\]
+** ldr z20, \[sp, #14, mul vl\]
+** ldr z21, \[sp, #15, mul vl\]
+** ldr z22, \[sp, #16, mul vl\]
+** ldr z23, \[sp, #17, mul vl\]
+** ldr p4, \[sp\]
+** ldr p5, \[sp, #1, mul vl\]
+** ldr p6, \[sp, #2, mul vl\]
+** ldr p7, \[sp, #3, mul vl\]
+** ldr p8, \[sp, #4, mul vl\]
+** ldr p9, \[sp, #5, mul vl\]
+** ldr p10, \[sp, #6, mul vl\]
+** ldr p11, \[sp, #7, mul vl\]
+** ldr p12, \[sp, #8, mul vl\]
+** ldr p13, \[sp, #9, mul vl\]
+** ldr p14, \[sp, #10, mul vl\]
+** ldr p15, \[sp, #11, mul vl\]
+** addvl sp, sp, #18
+** ldp x30, \1, \[sp\], #?32
+** ret
+*/
+svbool_t
+n_caller (struct callbacks *c)
+{
+ ns_callee ();
+ s_callee ();
+ sc_callee ();
+
+ c->ns_ptr ();
+ c->s_ptr ();
+ return c->sc_ptr ();
+}
+
+/*
+** s_caller: { target lp64 }
+** stp x30, (x19|x2[0-8]), \[sp, #?-32\]!
+** cntd x16
+** str x16, \[sp, #?16\]
+** addvl sp, sp, #-18
+** str p4, \[sp\]
+** str p5, \[sp, #1, mul vl\]
+** str p6, \[sp, #2, mul vl\]
+** str p7, \[sp, #3, mul vl\]
+** str p8, \[sp, #4, mul vl\]
+** str p9, \[sp, #5, mul vl\]
+** str p10, \[sp, #6, mul vl\]
+** str p11, \[sp, #7, mul vl\]
+** str p12, \[sp, #8, mul vl\]
+** str p13, \[sp, #9, mul vl\]
+** str p14, \[sp, #10, mul vl\]
+** str p15, \[sp, #11, mul vl\]
+** str z8, \[sp, #2, mul vl\]
+** str z9, \[sp, #3, mul vl\]
+** str z10, \[sp, #4, mul vl\]
+** str z11, \[sp, #5, mul vl\]
+** str z12, \[sp, #6, mul vl\]
+** str z13, \[sp, #7, mul vl\]
+** str z14, \[sp, #8, mul vl\]
+** str z15, \[sp, #9, mul vl\]
+** str z16, \[sp, #10, mul vl\]
+** str z17, \[sp, #11, mul vl\]
+** str z18, \[sp, #12, mul vl\]
+** str z19, \[sp, #13, mul vl\]
+** str z20, \[sp, #14, mul vl\]
+** str z21, \[sp, #15, mul vl\]
+** str z22, \[sp, #16, mul vl\]
+** str z23, \[sp, #17, mul vl\]
+** mov \1, x0
+** smstop sm
+** bl ns_callee
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** smstart sm
+** ldr p0, \[sp\]
+** addvl sp, sp, #1
+** bl s_callee
+** bl sc_callee
+** ldr (x[0-9]+), \[\1\]
+** smstop sm
+** blr \2
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** smstart sm
+** ldr p0, \[sp\]
+** addvl sp, sp, #1
+** ldr (x[0-9]+), \[\1, #?8\]
+** blr \3
+** ldr (x[0-9]+), \[\1, #?16\]
+** blr \4
+** ldr z8, \[sp, #2, mul vl\]
+** ldr z9, \[sp, #3, mul vl\]
+** ldr z10, \[sp, #4, mul vl\]
+** ldr z11, \[sp, #5, mul vl\]
+** ldr z12, \[sp, #6, mul vl\]
+** ldr z13, \[sp, #7, mul vl\]
+** ldr z14, \[sp, #8, mul vl\]
+** ldr z15, \[sp, #9, mul vl\]
+** ldr z16, \[sp, #10, mul vl\]
+** ldr z17, \[sp, #11, mul vl\]
+** ldr z18, \[sp, #12, mul vl\]
+** ldr z19, \[sp, #13, mul vl\]
+** ldr z20, \[sp, #14, mul vl\]
+** ldr z21, \[sp, #15, mul vl\]
+** ldr z22, \[sp, #16, mul vl\]
+** ldr z23, \[sp, #17, mul vl\]
+** ldr p4, \[sp\]
+** ldr p5, \[sp, #1, mul vl\]
+** ldr p6, \[sp, #2, mul vl\]
+** ldr p7, \[sp, #3, mul vl\]
+** ldr p8, \[sp, #4, mul vl\]
+** ldr p9, \[sp, #5, mul vl\]
+** ldr p10, \[sp, #6, mul vl\]
+** ldr p11, \[sp, #7, mul vl\]
+** ldr p12, \[sp, #8, mul vl\]
+** ldr p13, \[sp, #9, mul vl\]
+** ldr p14, \[sp, #10, mul vl\]
+** ldr p15, \[sp, #11, mul vl\]
+** addvl sp, sp, #18
+** ldp x30, \1, \[sp\], #?32
+** ret
+*/
+svbool_t
+s_caller (struct callbacks *c) [[arm::streaming]]
+{
+ ns_callee ();
+ s_callee ();
+ sc_callee ();
+
+ c->ns_ptr ();
+ c->s_ptr ();
+ return c->sc_ptr ();
+}
+
+/*
+** sc_caller:
+** stp x29, x30, \[sp, #?-32\]!
+** mov x29, sp
+** cntd x16
+** str x16, \[sp, #?24\]
+** addvl sp, sp, #-18
+** str p4, \[sp\]
+** str p5, \[sp, #1, mul vl\]
+** str p6, \[sp, #2, mul vl\]
+** str p7, \[sp, #3, mul vl\]
+** str p8, \[sp, #4, mul vl\]
+** str p9, \[sp, #5, mul vl\]
+** str p10, \[sp, #6, mul vl\]
+** str p11, \[sp, #7, mul vl\]
+** str p12, \[sp, #8, mul vl\]
+** str p13, \[sp, #9, mul vl\]
+** str p14, \[sp, #10, mul vl\]
+** str p15, \[sp, #11, mul vl\]
+** str z8, \[sp, #2, mul vl\]
+** str z9, \[sp, #3, mul vl\]
+** str z10, \[sp, #4, mul vl\]
+** str z11, \[sp, #5, mul vl\]
+** str z12, \[sp, #6, mul vl\]
+** str z13, \[sp, #7, mul vl\]
+** str z14, \[sp, #8, mul vl\]
+** str z15, \[sp, #9, mul vl\]
+** str z16, \[sp, #10, mul vl\]
+** str z17, \[sp, #11, mul vl\]
+** str z18, \[sp, #12, mul vl\]
+** str z19, \[sp, #13, mul vl\]
+** str z20, \[sp, #14, mul vl\]
+** str z21, \[sp, #15, mul vl\]
+** str z22, \[sp, #16, mul vl\]
+** str z23, \[sp, #17, mul vl\]
+** mrs x16, svcr
+** str x16, \[x29, #?16\]
+** ldr x16, \[x29, #?16\]
+** tbz x16, 0, .*
+** smstop sm
+** bl ns_callee
+** ldr x16, \[x29, #?16\]
+** tbz x16, 0, .*
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** smstart sm
+** ldr p0, \[sp\]
+** addvl sp, sp, #1
+** ldr x16, \[x29, #?16\]
+** tbnz x16, 0, .*
+** smstart sm
+** bl s_callee
+** ldr x16, \[x29, #?16\]
+** tbnz x16, 0, .*
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** smstop sm
+** ldr p0, \[sp\]
+** addvl sp, sp, #1
+** bl sc_callee
+** ldr z8, \[sp, #2, mul vl\]
+** ldr z9, \[sp, #3, mul vl\]
+** ldr z10, \[sp, #4, mul vl\]
+** ldr z11, \[sp, #5, mul vl\]
+** ldr z12, \[sp, #6, mul vl\]
+** ldr z13, \[sp, #7, mul vl\]
+** ldr z14, \[sp, #8, mul vl\]
+** ldr z15, \[sp, #9, mul vl\]
+** ldr z16, \[sp, #10, mul vl\]
+** ldr z17, \[sp, #11, mul vl\]
+** ldr z18, \[sp, #12, mul vl\]
+** ldr z19, \[sp, #13, mul vl\]
+** ldr z20, \[sp, #14, mul vl\]
+** ldr z21, \[sp, #15, mul vl\]
+** ldr z22, \[sp, #16, mul vl\]
+** ldr z23, \[sp, #17, mul vl\]
+** ldr p4, \[sp\]
+** ldr p5, \[sp, #1, mul vl\]
+** ldr p6, \[sp, #2, mul vl\]
+** ldr p7, \[sp, #3, mul vl\]
+** ldr p8, \[sp, #4, mul vl\]
+** ldr p9, \[sp, #5, mul vl\]
+** ldr p10, \[sp, #6, mul vl\]
+** ldr p11, \[sp, #7, mul vl\]
+** ldr p12, \[sp, #8, mul vl\]
+** ldr p13, \[sp, #9, mul vl\]
+** ldr p14, \[sp, #10, mul vl\]
+** ldr p15, \[sp, #11, mul vl\]
+** addvl sp, sp, #18
+** ldp x29, x30, \[sp\], #?32
+** ret
+*/
+svbool_t
+sc_caller () [[arm::streaming_compatible]]
+{
+ ns_callee ();
+ s_callee ();
+ return sc_callee ();
+}
+
+/* { dg-final { scan-assembler {n_caller:(?:(?!ret).)*\.cfi_offset 46, -16\n} } } */
+/* { dg-final { scan-assembler {s_caller:(?:(?!ret).)*\.cfi_offset 46, -16\n} } } */
+/* { dg-final { scan-assembler {sc_caller:(?:(?!ret).)*\.cfi_offset 46, -8\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_6.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_6.c
new file mode 100644
index 0000000..0f6bc4f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_6.c
@@ -0,0 +1,45 @@
+// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls" }
+
+#include <arm_sve.h>
+
+svbool_t ns_callee ();
+ svbool_t s_callee () [[arm::streaming]];
+ svbool_t sc_callee () [[arm::streaming_compatible]];
+
+struct callbacks {
+ svbool_t (*ns_ptr) ();
+ svbool_t (*s_ptr) () [[arm::streaming]];
+ svbool_t (*sc_ptr) () [[arm::streaming_compatible]];
+};
+
+svbool_t
+n_caller (struct callbacks *c)
+{
+ ns_callee ();
+ sc_callee ();
+
+ c->ns_ptr ();
+ return c->sc_ptr ();
+}
+
+svbool_t
+s_caller (struct callbacks *c) [[arm::streaming]]
+{
+ s_callee ();
+ sc_callee ();
+
+ c->s_ptr ();
+ return c->sc_ptr ();
+}
+
+svbool_t
+sc_caller (struct callbacks *c) [[arm::streaming_compatible]]
+{
+ sc_callee ();
+
+ return c->sc_ptr ();
+}
+
+// { dg-final { scan-assembler-not {[dpqz][0-9]+,} } }
+// { dg-final { scan-assembler-not {smstart\tsm} } }
+// { dg-final { scan-assembler-not {smstop\tsm} } }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_7.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_7.c
new file mode 100644
index 0000000..6482a48
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_7.c
@@ -0,0 +1,516 @@
+// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls" }
+// { dg-final { check-function-bodies "**" "" } }
+
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+double produce_d0 ();
+void consume_d0 (double);
+
+/*
+** test_d0:
+** ...
+** smstop sm
+** bl produce_d0
+** fmov x10, d0
+** smstart sm
+** fmov d0, x10
+** fmov x10, d0
+** smstop sm
+** fmov d0, x10
+** bl consume_d0
+** ...
+*/
+void
+test_d0 () [[arm::streaming]]
+{
+ double res = produce_d0 ();
+ asm volatile ("");
+ consume_d0 (res);
+}
+
+int8x8_t produce_d0_vec ();
+void consume_d0_vec (int8x8_t);
+
+/*
+** test_d0_vec:
+** ...
+** smstop sm
+** bl produce_d0_vec
+** (
+** fmov x10, d0
+** |
+** umov x10, v0.d\[0\]
+** )
+** smstart sm
+** fmov d0, x10
+** (
+** fmov x10, d0
+** |
+** umov x10, v0.d\[0\]
+** )
+** smstop sm
+** fmov d0, x10
+** bl consume_d0_vec
+** ...
+*/
+void
+test_d0_vec () [[arm::streaming]]
+{
+ int8x8_t res = produce_d0_vec ();
+ asm volatile ("");
+ consume_d0_vec (res);
+}
+
+int8x16_t produce_q0 ();
+void consume_q0 (int8x16_t);
+
+/*
+** test_q0:
+** ...
+** smstop sm
+** bl produce_q0
+** str q0, \[sp, #?-16\]!
+** smstart sm
+** ldr q0, \[sp\], #?16
+** str q0, \[sp, #?-16\]!
+** smstop sm
+** ldr q0, \[sp\], #?16
+** bl consume_q0
+** ...
+*/
+void
+test_q0 () [[arm::streaming]]
+{
+ int8x16_t res = produce_q0 ();
+ asm volatile ("");
+ consume_q0 (res);
+}
+
+int8x16x2_t produce_q1 ();
+void consume_q1 (int8x16x2_t);
+
+/*
+** test_q1:
+** ...
+** smstop sm
+** bl produce_q1
+** stp q0, q1, \[sp, #?-32\]!
+** smstart sm
+** ldp q0, q1, \[sp\], #?32
+** stp q0, q1, \[sp, #?-32\]!
+** smstop sm
+** ldp q0, q1, \[sp\], #?32
+** bl consume_q1
+** ...
+*/
+void
+test_q1 () [[arm::streaming]]
+{
+ int8x16x2_t res = produce_q1 ();
+ asm volatile ("");
+ consume_q1 (res);
+}
+
+int8x16x3_t produce_q2 ();
+void consume_q2 (int8x16x3_t);
+
+/*
+** test_q2:
+** ...
+** smstop sm
+** bl produce_q2
+** stp q0, q1, \[sp, #?-48\]!
+** str q2, \[sp, #?32\]
+** smstart sm
+** ldr q2, \[sp, #?32\]
+** ldp q0, q1, \[sp\], #?48
+** stp q0, q1, \[sp, #?-48\]!
+** str q2, \[sp, #?32\]
+** smstop sm
+** ldr q2, \[sp, #?32\]
+** ldp q0, q1, \[sp\], #?48
+** bl consume_q2
+** ...
+*/
+void
+test_q2 () [[arm::streaming]]
+{
+ int8x16x3_t res = produce_q2 ();
+ asm volatile ("");
+ consume_q2 (res);
+}
+
+int8x16x4_t produce_q3 ();
+void consume_q3 (int8x16x4_t);
+
+/*
+** test_q3:
+** ...
+** smstop sm
+** bl produce_q3
+** stp q0, q1, \[sp, #?-64\]!
+** stp q2, q3, \[sp, #?32\]
+** smstart sm
+** ldp q2, q3, \[sp, #?32\]
+** ldp q0, q1, \[sp\], #?64
+** stp q0, q1, \[sp, #?-64\]!
+** stp q2, q3, \[sp, #?32\]
+** smstop sm
+** ldp q2, q3, \[sp, #?32\]
+** ldp q0, q1, \[sp\], #?64
+** bl consume_q3
+** ...
+*/
+void
+test_q3 () [[arm::streaming]]
+{
+ int8x16x4_t res = produce_q3 ();
+ asm volatile ("");
+ consume_q3 (res);
+}
+
+svint8_t produce_z0 ();
+void consume_z0 (svint8_t);
+
+/*
+** test_z0:
+** ...
+** smstop sm
+** bl produce_z0
+** addvl sp, sp, #-1
+** str z0, \[sp\]
+** smstart sm
+** ldr z0, \[sp\]
+** addvl sp, sp, #1
+** addvl sp, sp, #-1
+** str z0, \[sp\]
+** smstop sm
+** ldr z0, \[sp\]
+** addvl sp, sp, #1
+** bl consume_z0
+** ...
+*/
+void
+test_z0 () [[arm::streaming]]
+{
+ svint8_t res = produce_z0 ();
+ asm volatile ("");
+ consume_z0 (res);
+}
+
+svint8x4_t produce_z3 ();
+void consume_z3 (svint8x4_t);
+
+/*
+** test_z3:
+** ...
+** smstop sm
+** bl produce_z3
+** addvl sp, sp, #-4
+** str z0, \[sp\]
+** str z1, \[sp, #1, mul vl\]
+** str z2, \[sp, #2, mul vl\]
+** str z3, \[sp, #3, mul vl\]
+** smstart sm
+** ldr z0, \[sp\]
+** ldr z1, \[sp, #1, mul vl\]
+** ldr z2, \[sp, #2, mul vl\]
+** ldr z3, \[sp, #3, mul vl\]
+** addvl sp, sp, #4
+** addvl sp, sp, #-4
+** str z0, \[sp\]
+** str z1, \[sp, #1, mul vl\]
+** str z2, \[sp, #2, mul vl\]
+** str z3, \[sp, #3, mul vl\]
+** smstop sm
+** ldr z0, \[sp\]
+** ldr z1, \[sp, #1, mul vl\]
+** ldr z2, \[sp, #2, mul vl\]
+** ldr z3, \[sp, #3, mul vl\]
+** addvl sp, sp, #4
+** bl consume_z3
+** ...
+*/
+void
+test_z3 () [[arm::streaming]]
+{
+ svint8x4_t res = produce_z3 ();
+ asm volatile ("");
+ consume_z3 (res);
+}
+
+svbool_t produce_p0 ();
+void consume_p0 (svbool_t);
+
+/*
+** test_p0:
+** ...
+** smstop sm
+** bl produce_p0
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** smstart sm
+** ldr p0, \[sp\]
+** addvl sp, sp, #1
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** smstop sm
+** ldr p0, \[sp\]
+** addvl sp, sp, #1
+** bl consume_p0
+** ...
+*/
+void
+test_p0 () [[arm::streaming]]
+{
+ svbool_t res = produce_p0 ();
+ asm volatile ("");
+ consume_p0 (res);
+}
+
+void consume_d7 (double, double, double, double, double, double, double,
+ double);
+
+/*
+** test_d7:
+** ...
+** fmov x10, d0
+** fmov x11, d1
+** fmov x12, d2
+** fmov x13, d3
+** fmov x14, d4
+** fmov x15, d5
+** fmov x16, d6
+** fmov x17, d7
+** smstop sm
+** fmov d0, x10
+** fmov d1, x11
+** fmov d2, x12
+** fmov d3, x13
+** fmov d4, x14
+** fmov d5, x15
+** fmov d6, x16
+** fmov d7, x17
+** bl consume_d7
+** ...
+*/
+void
+test_d7 () [[arm::streaming]]
+{
+ consume_d7 (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0);
+}
+
+void consume_d7_vec (int8x8_t, int8x8_t, int8x8_t, int8x8_t, int8x8_t,
+ int8x8_t, int8x8_t, int8x8_t);
+
+/*
+** test_d7_vec:
+** ...
+** (
+** fmov x10, d0
+** fmov x11, d1
+** fmov x12, d2
+** fmov x13, d3
+** fmov x14, d4
+** fmov x15, d5
+** fmov x16, d6
+** fmov x17, d7
+** |
+** umov x10, v0.d\[0\]
+** umov x11, v1.d\[0\]
+** umov x12, v2.d\[0\]
+** umov x13, v3.d\[0\]
+** umov x14, v4.d\[0\]
+** umov x15, v5.d\[0\]
+** umov x16, v6.d\[0\]
+** umov x17, v7.d\[0\]
+** )
+** smstop sm
+** fmov d0, x10
+** fmov d1, x11
+** fmov d2, x12
+** fmov d3, x13
+** fmov d4, x14
+** fmov d5, x15
+** fmov d6, x16
+** fmov d7, x17
+** bl consume_d7_vec
+** ...
+*/
+void
+test_d7_vec (int8x8_t *ptr) [[arm::streaming]]
+{
+ consume_d7_vec (*ptr, *ptr, *ptr, *ptr, *ptr, *ptr, *ptr, *ptr);
+}
+
+void consume_q7 (int8x16_t, int8x16_t, int8x16_t, int8x16_t, int8x16_t,
+ int8x16_t, int8x16_t, int8x16_t);
+
+/*
+** test_q7:
+** ...
+** stp q0, q1, \[sp, #?-128\]!
+** stp q2, q3, \[sp, #?32\]
+** stp q4, q5, \[sp, #?64\]
+** stp q6, q7, \[sp, #?96\]
+** smstop sm
+** ldp q2, q3, \[sp, #?32\]
+** ldp q4, q5, \[sp, #?64\]
+** ldp q6, q7, \[sp, #?96\]
+** ldp q0, q1, \[sp\], #?128
+** bl consume_q7
+** ...
+*/
+void
+test_q7 (int8x16_t *ptr) [[arm::streaming]]
+{
+ consume_q7 (*ptr, *ptr, *ptr, *ptr, *ptr, *ptr, *ptr, *ptr);
+}
+
+void consume_z7 (svint8_t, svint8_t, svint8_t, svint8_t, svint8_t,
+ svint8_t, svint8_t, svint8_t);
+
+/*
+** test_z7:
+** ...
+** addvl sp, sp, #-8
+** str z0, \[sp\]
+** str z1, \[sp, #1, mul vl\]
+** str z2, \[sp, #2, mul vl\]
+** str z3, \[sp, #3, mul vl\]
+** str z4, \[sp, #4, mul vl\]
+** str z5, \[sp, #5, mul vl\]
+** str z6, \[sp, #6, mul vl\]
+** str z7, \[sp, #7, mul vl\]
+** smstop sm
+** ldr z0, \[sp\]
+** ldr z1, \[sp, #1, mul vl\]
+** ldr z2, \[sp, #2, mul vl\]
+** ldr z3, \[sp, #3, mul vl\]
+** ldr z4, \[sp, #4, mul vl\]
+** ldr z5, \[sp, #5, mul vl\]
+** ldr z6, \[sp, #6, mul vl\]
+** ldr z7, \[sp, #7, mul vl\]
+** addvl sp, sp, #8
+** bl consume_z7
+** ...
+*/
+void
+test_z7 (svint8_t *ptr) [[arm::streaming]]
+{
+ consume_z7 (*ptr, *ptr, *ptr, *ptr, *ptr, *ptr, *ptr, *ptr);
+}
+
+void consume_p3 (svbool_t, svbool_t, svbool_t, svbool_t);
+
+/*
+** test_p3:
+** ...
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** str p1, \[sp, #1, mul vl\]
+** str p2, \[sp, #2, mul vl\]
+** str p3, \[sp, #3, mul vl\]
+** smstop sm
+** ldr p0, \[sp\]
+** ldr p1, \[sp, #1, mul vl\]
+** ldr p2, \[sp, #2, mul vl\]
+** ldr p3, \[sp, #3, mul vl\]
+** addvl sp, sp, #1
+** bl consume_p3
+** ...
+*/
+void
+test_p3 (svbool_t *ptr) [[arm::streaming]]
+{
+ consume_p3 (*ptr, *ptr, *ptr, *ptr);
+}
+
+void consume_mixed (float, double, float32x4_t, svfloat32_t,
+ float, double, float64x2_t, svfloat64_t,
+ svbool_t, svbool_t, svbool_t, svbool_t);
+
+/*
+** test_mixed:
+** ...
+** addvl sp, sp, #-3
+** str p0, \[sp\]
+** str p1, \[sp, #1, mul vl\]
+** str p2, \[sp, #2, mul vl\]
+** str p3, \[sp, #3, mul vl\]
+** str z3, \[sp, #1, mul vl\]
+** str z7, \[sp, #2, mul vl\]
+** stp q2, q6, \[sp, #?-32\]!
+** fmov w10, s0
+** fmov x11, d1
+** fmov w12, s4
+** fmov x13, d5
+** smstop sm
+** fmov s0, w10
+** fmov d1, x11
+** fmov s4, w12
+** fmov d5, x13
+** ldp q2, q6, \[sp\], #?32
+** ldr p0, \[sp\]
+** ldr p1, \[sp, #1, mul vl\]
+** ldr p2, \[sp, #2, mul vl\]
+** ldr p3, \[sp, #3, mul vl\]
+** ldr z3, \[sp, #1, mul vl\]
+** ldr z7, \[sp, #2, mul vl\]
+** addvl sp, sp, #3
+** bl consume_mixed
+** ...
+*/
+void
+test_mixed (float32x4_t *float32x4_ptr,
+ svfloat32_t *svfloat32_ptr,
+ float64x2_t *float64x2_ptr,
+ svfloat64_t *svfloat64_ptr,
+ svbool_t *svbool_ptr) [[arm::streaming]]
+{
+ consume_mixed (1.0f, 2.0, *float32x4_ptr, *svfloat32_ptr,
+ 3.0f, 4.0, *float64x2_ptr, *svfloat64_ptr,
+ *svbool_ptr, *svbool_ptr, *svbool_ptr, *svbool_ptr);
+}
+
+void consume_varargs (float, ...);
+
+/*
+** test_varargs:
+** ...
+** stp q3, q7, \[sp, #?-32\]!
+** fmov w10, s0
+** fmov x11, d1
+** (
+** fmov x12, d2
+** |
+** umov x12, v2.d\[0\]
+** )
+** fmov x13, d4
+** fmov x14, d5
+** (
+** fmov x15, d6
+** |
+** umov x15, v6.d\[0\]
+** )
+** smstop sm
+** fmov s0, w10
+** fmov d1, x11
+** fmov d2, x12
+** fmov d4, x13
+** fmov d5, x14
+** fmov d6, x15
+** ldp q3, q7, \[sp\], #?32
+** bl consume_varargs
+** ...
+*/
+void
+test_varargs (float32x2_t *float32x2_ptr,
+ float32x4_t *float32x4_ptr,
+ float64x1_t *float64x1_ptr,
+ float64x2_t *float64x2_ptr) [[arm::streaming]]
+{
+ consume_varargs (1.0f, 2.0, *float32x2_ptr, *float32x4_ptr,
+ 3.0f, 4.0, *float64x1_ptr, *float64x2_ptr);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_8.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_8.c
new file mode 100644
index 0000000..f44724d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_8.c
@@ -0,0 +1,87 @@
+// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls -msve-vector-bits=128" }
+// { dg-final { check-function-bodies "**" "" } }
+
+#include <arm_sve.h>
+
+svint8_t produce_z0 ();
+void consume_z0 (svint8_t);
+
+/*
+** test_z0:
+** ...
+** smstop sm
+** bl produce_z0
+** str q0, \[sp, #?-16\]!
+** smstart sm
+** ldr q0, \[sp\], #?16
+** str q0, \[sp, #?-16\]!
+** smstop sm
+** ldr q0, \[sp\], #?16
+** bl consume_z0
+** ...
+*/
+void
+test_z0 () [[arm::streaming]]
+{
+ svint8_t res = produce_z0 ();
+ asm volatile ("");
+ consume_z0 (res);
+}
+
+svint8x4_t produce_z3 ();
+void consume_z3 (svint8x4_t);
+
+/*
+** test_z3:
+** ...
+** smstop sm
+** bl produce_z3
+** stp q0, q1, \[sp, #?-64\]!
+** stp q2, q3, \[sp, #?32\]
+** smstart sm
+** ldp q2, q3, \[sp, #?32\]
+** ldp q0, q1, \[sp\], #?64
+** stp q0, q1, \[sp, #?-64\]!
+** stp q2, q3, \[sp, #?32\]
+** smstop sm
+** ldp q2, q3, \[sp, #?32\]
+** ldp q0, q1, \[sp\], #?64
+** bl consume_z3
+** ...
+*/
+void
+test_z3 () [[arm::streaming]]
+{
+ svint8x4_t res = produce_z3 ();
+ asm volatile ("");
+ consume_z3 (res);
+}
+
+svbool_t produce_p0 ();
+void consume_p0 (svbool_t);
+
+/*
+** test_p0:
+** ...
+** smstop sm
+** bl produce_p0
+** sub sp, sp, #?16
+** str p0, \[sp\]
+** smstart sm
+** ldr p0, \[sp\]
+** add sp, sp, #?16
+** sub sp, sp, #?16
+** str p0, \[sp\]
+** smstop sm
+** ldr p0, \[sp\]
+** add sp, sp, #?16
+** bl consume_p0
+** ...
+*/
+void
+test_p0 () [[arm::streaming]]
+{
+ svbool_t res = produce_p0 ();
+ asm volatile ("");
+ consume_p0 (res);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_9.c b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_9.c
new file mode 100644
index 0000000..83b4073
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/call_sm_switch_9.c
@@ -0,0 +1,103 @@
+// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls -msve-vector-bits=256" }
+// { dg-final { check-function-bodies "**" "" } }
+
+#include <arm_sve.h>
+
+svint8_t produce_z0 ();
+void consume_z0 (svint8_t);
+
+/*
+** test_z0:
+** ...
+** smstop sm
+** bl produce_z0
+** sub sp, sp, #?32
+** str z0, \[sp\]
+** smstart sm
+** ldr z0, \[sp\]
+** add sp, sp, #?32
+** sub sp, sp, #?32
+** str z0, \[sp\]
+** smstop sm
+** ldr z0, \[sp\]
+** add sp, sp, #?32
+** bl consume_z0
+** ...
+*/
+void
+test_z0 () [[arm::streaming]]
+{
+ svint8_t res = produce_z0 ();
+ asm volatile ("");
+ consume_z0 (res);
+}
+
+svint8x4_t produce_z3 ();
+void consume_z3 (svint8x4_t);
+
+/*
+** test_z3:
+** ...
+** smstop sm
+** bl produce_z3
+** sub sp, sp, #?128
+** str z0, \[sp\]
+** str z1, \[sp, #1, mul vl\]
+** str z2, \[sp, #2, mul vl\]
+** str z3, \[sp, #3, mul vl\]
+** smstart sm
+** ldr z0, \[sp\]
+** ldr z1, \[sp, #1, mul vl\]
+** ldr z2, \[sp, #2, mul vl\]
+** ldr z3, \[sp, #3, mul vl\]
+** add sp, sp, #?128
+** sub sp, sp, #?128
+** str z0, \[sp\]
+** str z1, \[sp, #1, mul vl\]
+** str z2, \[sp, #2, mul vl\]
+** str z3, \[sp, #3, mul vl\]
+** smstop sm
+** ldr z0, \[sp\]
+** ldr z1, \[sp, #1, mul vl\]
+** ldr z2, \[sp, #2, mul vl\]
+** ldr z3, \[sp, #3, mul vl\]
+** add sp, sp, #?128
+** bl consume_z3
+** ...
+*/
+void
+test_z3 () [[arm::streaming]]
+{
+ svint8x4_t res = produce_z3 ();
+ asm volatile ("");
+ consume_z3 (res);
+}
+
+svbool_t produce_p0 ();
+void consume_p0 (svbool_t);
+
+/*
+** test_p0:
+** ...
+** smstop sm
+** bl produce_p0
+** sub sp, sp, #?32
+** str p0, \[sp\]
+** smstart sm
+** ldr p0, \[sp\]
+** add sp, sp, #?32
+** sub sp, sp, #?32
+** str p0, \[sp\]
+** smstop sm
+** ldr p0, \[sp\]
+** add sp, sp, #?32
+** bl consume_p0
+** ...
+*/
+void
+test_p0 () [[arm::streaming]]
+{
+ svbool_t res = produce_p0 ();
+ asm volatile ("");
+ consume_p0 (res);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/clamp_1.c b/gcc/testsuite/gcc.target/aarch64/sme/clamp_1.c
new file mode 100644
index 0000000..fc9d70b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/clamp_1.c
@@ -0,0 +1,38 @@
+// { dg-options "-O" }
+
+#include <arm_sme.h>
+
+#define TEST(TYPE) \
+ TYPE \
+ tied1_##TYPE(TYPE a, TYPE b, TYPE c) __arm_streaming \
+ { \
+ return svmin_x(svptrue_b8(), svmax_x(svptrue_b8(), a, b), c); \
+ } \
+ \
+ TYPE \
+ tied2_##TYPE(TYPE a, TYPE b, TYPE c) __arm_streaming \
+ { \
+ return svmin_x(svptrue_b8(), svmax_x(svptrue_b8(), b, a), c); \
+ }
+
+TEST(svint8_t)
+TEST(svint16_t)
+TEST(svint32_t)
+TEST(svint64_t)
+
+TEST(svuint8_t)
+TEST(svuint16_t)
+TEST(svuint32_t)
+TEST(svuint64_t)
+
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.b, z1\.b, z2\.b\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.h, z1\.h, z2\.h\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.s, z1\.s, z2\.s\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.d, z1\.d, z2\.d\n} 2 } } */
+
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.b, z1\.b, z2\.b\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.h, z1\.h, z2\.h\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.s, z1\.s, z2\.s\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.d, z1\.d, z2\.d\n} 2 } } */
+
+/* { dg-final { scan-assembler-not {\tmovprfx\t} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/clamp_2.c b/gcc/testsuite/gcc.target/aarch64/sme/clamp_2.c
new file mode 100644
index 0000000..67d3816
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/clamp_2.c
@@ -0,0 +1,32 @@
+// { dg-options "-O" }
+
+#include <arm_sme.h>
+
+#define TEST(TYPE) \
+ TYPE \
+ untied_##TYPE(TYPE a, TYPE b, TYPE c, TYPE d) __arm_streaming \
+ { \
+ return svmin_x(svptrue_b8(), svmax_x(svptrue_b8(), b, c), d); \
+ }
+
+TEST(svint8_t)
+TEST(svint16_t)
+TEST(svint32_t)
+TEST(svint64_t)
+
+TEST(svuint8_t)
+TEST(svuint16_t)
+TEST(svuint32_t)
+TEST(svuint64_t)
+
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.b, z2\.b, z3\.b\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.h, z2\.h, z3\.h\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.s, z2\.s, z3\.s\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.d, z2\.d, z3\.d\n} 1 } } */
+
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.b, z2\.b, z3\.b\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.h, z2\.h, z3\.h\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.s, z2\.s, z3\.s\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.d, z2\.d, z3\.d\n} 1 } } */
+
+/* { dg-final { scan-assembler-times {\tmovprfx\tz0, z1\n} 8 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/clamp_3.c b/gcc/testsuite/gcc.target/aarch64/sme/clamp_3.c
new file mode 100644
index 0000000..44959f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/clamp_3.c
@@ -0,0 +1,26 @@
+// { dg-options "-O" }
+
+#include <arm_sme.h>
+
+#define TEST(TYPE) \
+ TYPE \
+ tied1_##TYPE(TYPE a, TYPE b, TYPE c) __arm_streaming \
+ { \
+ return svminnm_x(svptrue_b8(), svmaxnm_x(svptrue_b8(), a, b), c); \
+ } \
+ \
+ TYPE \
+ tied2_##TYPE(TYPE a, TYPE b, TYPE c) __arm_streaming \
+ { \
+ return svminnm_x(svptrue_b8(), svmaxnm_x(svptrue_b8(), b, a), c); \
+ }
+
+TEST(svfloat16_t)
+TEST(svfloat32_t)
+TEST(svfloat64_t)
+
+/* { dg-final { scan-assembler-times {\tfclamp\tz0\.h, z1\.h, z2\.h\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tfclamp\tz0\.s, z1\.s, z2\.s\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tfclamp\tz0\.d, z1\.d, z2\.d\n} 2 } } */
+
+/* { dg-final { scan-assembler-not {\tmovprfx\t} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/clamp_4.c b/gcc/testsuite/gcc.target/aarch64/sme/clamp_4.c
new file mode 100644
index 0000000..643b263
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/clamp_4.c
@@ -0,0 +1,20 @@
+// { dg-options "-O" }
+
+#include <arm_sme.h>
+
+#define TEST(TYPE) \
+ TYPE \
+ untied_##TYPE(TYPE a, TYPE b, TYPE c, TYPE d) __arm_streaming \
+ { \
+ return svminnm_x(svptrue_b8(), svmaxnm_x(svptrue_b8(), b, c), d); \
+ }
+
+TEST(svfloat16_t)
+TEST(svfloat32_t)
+TEST(svfloat64_t)
+
+/* { dg-final { scan-assembler-times {\tfclamp\tz0\.h, z2\.h, z3\.h\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tfclamp\tz0\.s, z2\.s, z3\.s\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tfclamp\tz0\.d, z2\.d, z3\.d\n} 1 } } */
+
+/* { dg-final { scan-assembler-times {\tmovprfx\tz0, z1\n} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_1.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_1.c
new file mode 100644
index 0000000..24dc2b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_1.c
@@ -0,0 +1,47 @@
+/* { dg-options "" } */
+
+inline void __attribute__((always_inline))
+sc_callee () [[arm::streaming_compatible]] {}
+
+inline void __attribute__((always_inline))
+s_callee () [[arm::streaming]] {}
+
+inline void __attribute__((always_inline))
+n_callee () {}
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+sc_ls_callee () [[arm::streaming_compatible]] {}
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+n_ls_callee () {}
+
+inline void __attribute__((always_inline))
+sc_asm_callee () [[arm::streaming_compatible]] { asm (""); }
+
+inline void __attribute__((always_inline))
+s_asm_callee () [[arm::streaming]] { asm (""); } // { dg-error "inlining failed" }
+
+inline void __attribute__((always_inline))
+n_asm_callee () { asm (""); } // { dg-error "inlining failed" }
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+sc_ls_asm_callee () [[arm::streaming_compatible]] { asm (""); } // { dg-error "inlining failed" }
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+n_ls_asm_callee () { asm (""); } // { dg-error "inlining failed" }
+
+void
+sc_caller () [[arm::streaming_compatible]]
+{
+ sc_callee ();
+ s_callee ();
+ n_callee ();
+ sc_ls_callee ();
+ n_ls_callee ();
+
+ sc_asm_callee ();
+ s_asm_callee ();
+ n_asm_callee ();
+ sc_ls_asm_callee ();
+ n_ls_asm_callee ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_10.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_10.c
new file mode 100644
index 0000000..adfd45a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_10.c
@@ -0,0 +1,57 @@
+/* { dg-options "" } */
+
+#include <arm_neon.h>
+#include <arm_sme.h>
+
+uint8x16_t *neon;
+svint64_t *sve;
+int64_t *ptr;
+
+// Gets expanded to addition early, so no error. An error would be
+// more correct though.
+inline void __attribute__((always_inline))
+call_vadd ()
+{
+ neon[4] = vaddq_u8 (neon[5], neon[6]);
+}
+
+inline void __attribute__((always_inline))
+call_vbsl () // { dg-error "inlining failed" }
+{
+ neon[0] = vbslq_u8 (neon[1], neon[2], neon[3]);
+}
+
+inline void __attribute__((always_inline))
+call_svadd ()
+{
+ *sve = svadd_x (svptrue_b8 (), *sve, 1);
+}
+
+inline void __attribute__((always_inline))
+call_svld1_gather () // { dg-error "inlining failed" }
+{
+ *sve = svld1_gather_offset (svptrue_b8 (), ptr, *sve);
+}
+
+inline void __attribute__((always_inline))
+call_svzero () [[arm::inout("za")]]
+{
+ svzero_za ();
+}
+
+inline void __attribute__((always_inline))
+call_svst1_za () [[arm::streaming, arm::inout("za")]] // { dg-error "inlining failed" }
+{
+ svst1_ver_za64 (0, 0, svptrue_b8 (), ptr);
+}
+
+void
+sc_caller () [[arm::inout("za"), arm::streaming_compatible]]
+{
+ call_vadd ();
+ call_vbsl ();
+ call_svadd ();
+ call_svld1_gather ();
+ call_svzero ();
+ call_svst1_za ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_11.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_11.c
new file mode 100644
index 0000000..d05a92c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_11.c
@@ -0,0 +1,57 @@
+/* { dg-options "" } */
+
+#include <arm_neon.h>
+#include <arm_sme.h>
+
+uint8x16_t *neon;
+svint64_t *sve;
+int64_t *ptr;
+
+// Gets expanded to addition early, so no error. An error would be
+// more correct though.
+inline void __attribute__((always_inline))
+call_vadd ()
+{
+ neon[4] = vaddq_u8 (neon[5], neon[6]);
+}
+
+inline void __attribute__((always_inline))
+call_vbsl () // { dg-error "inlining failed" }
+{
+ neon[0] = vbslq_u8 (neon[1], neon[2], neon[3]);
+}
+
+inline void __attribute__((always_inline))
+call_svadd ()
+{
+ *sve = svadd_x (svptrue_b8 (), *sve, 1);
+}
+
+inline void __attribute__((always_inline))
+call_svld1_gather () // { dg-error "inlining failed" }
+{
+ *sve = svld1_gather_offset (svptrue_b8 (), ptr, *sve);
+}
+
+inline void __attribute__((always_inline))
+call_svzero () [[arm::inout("za")]]
+{
+ svzero_za ();
+}
+
+inline void __attribute__((always_inline))
+call_svst1_za () [[arm::streaming, arm::inout("za")]]
+{
+ svst1_ver_za64 (0, 0, svptrue_b8 (), ptr);
+}
+
+void
+sc_caller () [[arm::inout("za"), arm::streaming]]
+{
+ call_vadd ();
+ call_vbsl ();
+ call_svadd ();
+ call_svld1_gather ();
+ call_svzero ();
+ call_svst1_za ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_12.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_12.c
new file mode 100644
index 0000000..366f8b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_12.c
@@ -0,0 +1,15 @@
+/* { dg-options "" } */
+
+#include <arm_sme.h>
+
+inline void __attribute__((always_inline))
+call_svzero () [[arm::inout("za"), arm::streaming_compatible]] // { dg-error "inlining failed" }
+{
+ svzero_za ();
+}
+
+void
+n_caller ()
+{
+ call_svzero ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_13.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_13.c
new file mode 100644
index 0000000..bdbd740
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_13.c
@@ -0,0 +1,15 @@
+/* { dg-options "" } */
+
+#include <arm_sme.h>
+
+inline void __attribute__((always_inline))
+call_svzero () [[arm::inout("za"), arm::streaming_compatible]] // { dg-error "inlining failed" }
+{
+ svzero_za ();
+}
+
+void
+s_caller ()
+{
+ call_svzero ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_14.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_14.c
new file mode 100644
index 0000000..0ce4384
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_14.c
@@ -0,0 +1,15 @@
+/* { dg-options "" } */
+
+#include <arm_sme.h>
+
+inline void __attribute__((always_inline))
+call_svzero () [[arm::inout("za"), arm::streaming_compatible]] // { dg-error "inlining failed" }
+{
+ svzero_za ();
+}
+
+void
+sc_caller ()
+{
+ call_svzero ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_15.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_15.c
new file mode 100644
index 0000000..06fc5d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_15.c
@@ -0,0 +1,27 @@
+/* { dg-options "" } */
+
+#include <arm_sme.h>
+
+inline void
+call_svzero () [[arm::inout("za"), arm::streaming_compatible]]
+{
+ svzero_za ();
+}
+
+void
+n_caller ()
+{
+ call_svzero (); // { dg-error "call to a function that shares 'za' state from a function that has no 'za' state" }
+}
+
+void
+s_caller ()
+{
+ call_svzero (); // { dg-error "call to a function that shares 'za' state from a function that has no 'za' state" }
+}
+
+void
+sc_caller ()
+{
+ call_svzero (); // { dg-error "call to a function that shares 'za' state from a function that has no 'za' state" }
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_2.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_2.c
new file mode 100644
index 0000000..ea2a570
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_2.c
@@ -0,0 +1,47 @@
+/* { dg-options "" } */
+
+inline void __attribute__((always_inline))
+sc_callee () [[arm::streaming_compatible]] {}
+
+inline void __attribute__((always_inline))
+s_callee () [[arm::streaming]] {}
+
+inline void __attribute__((always_inline))
+n_callee () {}
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+sc_ls_callee () [[arm::streaming_compatible]] {}
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+n_ls_callee () {}
+
+inline void __attribute__((always_inline))
+sc_asm_callee () [[arm::streaming_compatible]] { asm (""); }
+
+inline void __attribute__((always_inline))
+s_asm_callee () [[arm::streaming]] { asm (""); }
+
+inline void __attribute__((always_inline))
+n_asm_callee () { asm (""); } // { dg-error "inlining failed" }
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+sc_ls_asm_callee () [[arm::streaming_compatible]] { asm (""); }
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+n_ls_asm_callee () { asm (""); }
+
+void
+s_caller () [[arm::streaming]]
+{
+ sc_callee ();
+ s_callee ();
+ n_callee ();
+ sc_ls_callee ();
+ n_ls_callee ();
+
+ sc_asm_callee ();
+ s_asm_callee ();
+ n_asm_callee ();
+ sc_ls_asm_callee ();
+ n_ls_asm_callee ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_3.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_3.c
new file mode 100644
index 0000000..d7ffb38
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_3.c
@@ -0,0 +1,47 @@
+/* { dg-options "" } */
+
+inline void __attribute__((always_inline))
+sc_callee () [[arm::streaming_compatible]] {}
+
+inline void __attribute__((always_inline))
+s_callee () [[arm::streaming]] {}
+
+inline void __attribute__((always_inline))
+n_callee () {}
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+sc_ls_callee () [[arm::streaming_compatible]] {}
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+n_ls_callee () {}
+
+inline void __attribute__((always_inline))
+sc_asm_callee () [[arm::streaming_compatible]] { asm (""); }
+
+inline void __attribute__((always_inline))
+s_asm_callee () [[arm::streaming]] { asm (""); } // { dg-error "inlining failed" }
+
+inline void __attribute__((always_inline))
+n_asm_callee () { asm (""); }
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+sc_ls_asm_callee () [[arm::streaming_compatible]] { asm (""); } // { dg-error "inlining failed" }
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+n_ls_asm_callee () { asm (""); } // { dg-error "inlining failed" }
+
+void
+n_caller ()
+{
+ sc_callee ();
+ s_callee ();
+ n_callee ();
+ sc_ls_callee ();
+ n_ls_callee ();
+
+ sc_asm_callee ();
+ s_asm_callee ();
+ n_asm_callee ();
+ sc_ls_asm_callee ();
+ n_ls_asm_callee ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_4.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_4.c
new file mode 100644
index 0000000..7892037
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_4.c
@@ -0,0 +1,47 @@
+/* { dg-options "" } */
+
+inline void __attribute__((always_inline))
+sc_callee () [[arm::streaming_compatible]] {}
+
+inline void __attribute__((always_inline))
+s_callee () [[arm::streaming]] {}
+
+inline void __attribute__((always_inline))
+n_callee () {}
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+sc_ls_callee () [[arm::streaming_compatible]] {}
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+n_ls_callee () {}
+
+inline void __attribute__((always_inline))
+sc_asm_callee () [[arm::streaming_compatible]] { asm (""); }
+
+inline void __attribute__((always_inline))
+s_asm_callee () [[arm::streaming]] { asm (""); }
+
+inline void __attribute__((always_inline))
+n_asm_callee () { asm (""); } // { dg-error "inlining failed" }
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+sc_ls_asm_callee () [[arm::streaming_compatible]] { asm (""); }
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+n_ls_asm_callee () { asm (""); }
+
+[[arm::locally_streaming]] void
+sc_ls_caller () [[arm::streaming_compatible]]
+{
+ sc_callee ();
+ s_callee ();
+ n_callee ();
+ sc_ls_callee ();
+ n_ls_callee ();
+
+ sc_asm_callee ();
+ s_asm_callee ();
+ n_asm_callee ();
+ sc_ls_asm_callee ();
+ n_ls_asm_callee ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_5.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_5.c
new file mode 100644
index 0000000..d19cdc4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_5.c
@@ -0,0 +1,47 @@
+/* { dg-options "" } */
+
+inline void __attribute__((always_inline))
+sc_callee () [[arm::streaming_compatible]] {}
+
+inline void __attribute__((always_inline))
+s_callee () [[arm::streaming]] {}
+
+inline void __attribute__((always_inline))
+n_callee () {}
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+sc_ls_callee () [[arm::streaming_compatible]] {}
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+n_ls_callee () {}
+
+inline void __attribute__((always_inline))
+sc_asm_callee () [[arm::streaming_compatible]] { asm (""); }
+
+inline void __attribute__((always_inline))
+s_asm_callee () [[arm::streaming]] { asm (""); }
+
+inline void __attribute__((always_inline))
+n_asm_callee () { asm (""); } // { dg-error "inlining failed" }
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+sc_ls_asm_callee () [[arm::streaming_compatible]] { asm (""); }
+
+[[arm::locally_streaming]] inline void __attribute__((always_inline))
+n_ls_asm_callee () { asm (""); }
+
+[[arm::locally_streaming]] void
+n_ls_caller ()
+{
+ sc_callee ();
+ s_callee ();
+ n_callee ();
+ sc_ls_callee ();
+ n_ls_callee ();
+
+ sc_asm_callee ();
+ s_asm_callee ();
+ n_asm_callee ();
+ sc_ls_asm_callee ();
+ n_ls_asm_callee ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_6.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_6.c
new file mode 100644
index 0000000..a5eb399
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_6.c
@@ -0,0 +1,31 @@
+/* { dg-options "" } */
+
+inline void __attribute__((always_inline))
+shared_callee () [[arm::inout("za")]] {}
+
+[[arm::new("za")]] inline void __attribute__((always_inline))
+new_callee () {} // { dg-error "inlining failed" }
+
+inline void __attribute__((always_inline))
+normal_callee () {}
+
+inline void __attribute__((always_inline))
+shared_asm_callee () [[arm::inout("za")]] { asm volatile ("" ::: "za"); }
+
+[[arm::new("za")]] inline void __attribute__((always_inline))
+new_asm_callee () { asm volatile ("" ::: "za"); } // { dg-error "inlining failed" }
+
+inline void __attribute__((always_inline))
+normal_asm_callee () { asm volatile ("" ::: "za"); } // { dg-error "inlining failed" }
+
+void
+shared_caller () [[arm::inout("za")]]
+{
+ shared_callee ();
+ new_callee ();
+ normal_callee ();
+
+ shared_asm_callee ();
+ new_asm_callee ();
+ normal_asm_callee ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_7.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_7.c
new file mode 100644
index 0000000..0f04628
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_7.c
@@ -0,0 +1,31 @@
+/* { dg-options "" } */
+
+inline void __attribute__((always_inline))
+shared_callee () [[arm::inout("za")]] {}
+
+[[arm::new("za")]] inline void __attribute__((always_inline))
+new_callee () {} // { dg-error "inlining failed" }
+
+inline void __attribute__((always_inline))
+normal_callee () {}
+
+inline void __attribute__((always_inline))
+shared_asm_callee () [[arm::inout("za")]] { asm volatile ("" ::: "za"); }
+
+[[arm::new("za")]] inline void __attribute__((always_inline))
+new_asm_callee () { asm volatile ("" ::: "za"); } // { dg-error "inlining failed" }
+
+inline void __attribute__((always_inline))
+normal_asm_callee () { asm volatile ("" ::: "za"); } // { dg-error "inlining failed" }
+
+[[arm::new("za")]] void
+new_caller ()
+{
+ shared_callee ();
+ new_callee ();
+ normal_callee ();
+
+ shared_asm_callee ();
+ new_asm_callee ();
+ normal_asm_callee ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_8.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_8.c
new file mode 100644
index 0000000..fd8a3a6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_8.c
@@ -0,0 +1,31 @@
+/* { dg-options "" } */
+
+inline void __attribute__((always_inline))
+shared_callee () [[arm::inout("za")]] {} // { dg-error "inlining failed" }
+
+[[arm::new("za")]] inline void __attribute__((always_inline))
+new_callee () {} // { dg-error "inlining failed" }
+
+inline void __attribute__((always_inline))
+normal_callee () {}
+
+inline void __attribute__((always_inline))
+shared_asm_callee () [[arm::inout("za")]] { asm volatile ("" ::: "za"); } // { dg-error "inlining failed" }
+
+[[arm::new("za")]] inline void __attribute__((always_inline))
+new_asm_callee () { asm volatile ("" ::: "za"); } // { dg-error "inlining failed" }
+
+inline void __attribute__((always_inline))
+normal_asm_callee () { asm volatile ("" ::: "za"); }
+
+void
+normal_caller ()
+{
+ shared_callee ();
+ new_callee ();
+ normal_callee ();
+
+ shared_asm_callee ();
+ new_asm_callee ();
+ normal_asm_callee ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/inlining_9.c b/gcc/testsuite/gcc.target/aarch64/sme/inlining_9.c
new file mode 100644
index 0000000..91520e3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/inlining_9.c
@@ -0,0 +1,55 @@
+/* { dg-options "" } */
+
+#include <arm_neon.h>
+#include <arm_sme.h>
+
+uint8x16_t *neon;
+svint64_t *sve;
+int64_t *ptr;
+
+inline void __attribute__((always_inline))
+call_vadd ()
+{
+ neon[4] = vaddq_u8 (neon[5], neon[6]);
+}
+
+inline void __attribute__((always_inline))
+call_vbsl ()
+{
+ neon[0] = vbslq_u8 (neon[1], neon[2], neon[3]);
+}
+
+inline void __attribute__((always_inline))
+call_svadd ()
+{
+ *sve = svadd_x (svptrue_b8 (), *sve, 1);
+}
+
+inline void __attribute__((always_inline))
+call_svld1_gather ()
+{
+ *sve = svld1_gather_offset (svptrue_b8 (), ptr, *sve);
+}
+
+inline void __attribute__((always_inline))
+call_svzero () [[arm::inout("za")]]
+{
+ svzero_za ();
+}
+
+inline void __attribute__((always_inline))
+call_svst1_za () [[arm::streaming, arm::inout("za")]] // { dg-error "inlining failed" }
+{
+ svst1_ver_za64 (0, 0, svptrue_b8 (), ptr);
+}
+
+void
+n_caller () [[arm::inout("za")]]
+{
+ call_vadd ();
+ call_vbsl ();
+ call_svadd ();
+ call_svld1_gather ();
+ call_svzero ();
+ call_svst1_za ();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/keyword_macros_1.c b/gcc/testsuite/gcc.target/aarch64/sme/keyword_macros_1.c
new file mode 100644
index 0000000..22f5fac
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/keyword_macros_1.c
@@ -0,0 +1,10 @@
+/* { dg-options "-std=c90 -pedantic-errors" } */
+
+void f1 () __arm_streaming;
+void f2 () __arm_streaming_compatible;
+void f3 () __arm_in("za");
+void f4 () __arm_out("za");
+void f5 () __arm_inout("za");
+void f6 () __arm_preserves("za");
+__arm_new("za") void f7 () {}
+__arm_locally_streaming void f8 () {}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_1.c b/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_1.c
new file mode 100644
index 0000000..20ff4b8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_1.c
@@ -0,0 +1,466 @@
+// { dg-options "-O -fomit-frame-pointer" }
+// { dg-final { check-function-bodies "**" "" } }
+
+void consume_za () [[arm::streaming, arm::inout("za")]];
+
+/*
+** n_ls:
+** sub sp, sp, #?80
+** cntd x16
+** str x16, \[sp\]
+** stp d8, d9, \[sp, #?16\]
+** stp d10, d11, \[sp, #?32\]
+** stp d12, d13, \[sp, #?48\]
+** stp d14, d15, \[sp, #?64\]
+** smstart sm
+** smstop sm
+** ldp d8, d9, \[sp, #?16\]
+** ldp d10, d11, \[sp, #?32\]
+** ldp d12, d13, \[sp, #?48\]
+** ldp d14, d15, \[sp, #?64\]
+** add sp, sp, #?80
+** ret
+*/
+[[arm::locally_streaming]] void
+n_ls ()
+{
+ asm ("");
+}
+
+/*
+** s_ls:
+** ret
+*/
+[[arm::locally_streaming]] void
+s_ls () [[arm::streaming]]
+{
+ asm ("");
+}
+
+/*
+** sc_ls:
+** stp x29, x30, \[sp, #?-96\]!
+** mov x29, sp
+** cntd x16
+** str x16, \[sp, #?24\]
+** stp d8, d9, \[sp, #?32\]
+** stp d10, d11, \[sp, #?48\]
+** stp d12, d13, \[sp, #?64\]
+** stp d14, d15, \[sp, #?80\]
+** mrs x16, svcr
+** str x16, \[x29, #?16\]
+** tbnz x16, 0, [^\n]+
+** smstart sm
+** ldr x16, \[x29, #?16\]
+** tbnz x16, 0, [^\n]+
+** smstop sm
+** ldp d8, d9, \[sp, #?32\]
+** ldp d10, d11, \[sp, #?48\]
+** ldp d12, d13, \[sp, #?64\]
+** ldp d14, d15, \[sp, #?80\]
+** ldp x29, x30, \[sp\], #?96
+** ret
+*/
+[[arm::locally_streaming]] void
+sc_ls () [[arm::streaming_compatible]]
+{
+ asm ("");
+}
+
+/*
+** n_ls_new_za:
+** str x30, \[sp, #?-80\]!
+** cntd x16
+** str x16, \[sp, #?8\]
+** stp d8, d9, \[sp, #?16\]
+** stp d10, d11, \[sp, #?32\]
+** stp d12, d13, \[sp, #?48\]
+** stp d14, d15, \[sp, #?64\]
+** smstart sm
+** mrs (x[0-9]+), tpidr2_el0
+** cbz \1, [^\n]+
+** bl __arm_tpidr2_save
+** msr tpidr2_el0, xzr
+** zero { za }
+** smstart za
+** bl consume_za
+** smstop za
+** smstop sm
+** ldp d8, d9, \[sp, #?16\]
+** ldp d10, d11, \[sp, #?32\]
+** ldp d12, d13, \[sp, #?48\]
+** ldp d14, d15, \[sp, #?64\]
+** ldr x30, \[sp\], #?80
+** ret
+*/
+[[arm::locally_streaming, arm::new("za")]] void
+n_ls_new_za ()
+{
+ consume_za ();
+ asm ("");
+}
+
+/*
+** s_ls_new_za:
+** str x30, \[sp, #?-16\]!
+** mrs (x[0-9]+), tpidr2_el0
+** cbz \1, [^\n]+
+** bl __arm_tpidr2_save
+** msr tpidr2_el0, xzr
+** zero { za }
+** smstart za
+** bl consume_za
+** smstop za
+** ldr x30, \[sp\], #?16
+** ret
+*/
+[[arm::locally_streaming, arm::new("za")]] void
+s_ls_new_za () [[arm::streaming]]
+{
+ consume_za ();
+ asm ("");
+}
+
+/*
+** sc_ls_new_za:
+** stp x29, x30, \[sp, #?-96\]!
+** mov x29, sp
+** cntd x16
+** str x16, \[sp, #?24\]
+** stp d8, d9, \[sp, #?32\]
+** stp d10, d11, \[sp, #?48\]
+** stp d12, d13, \[sp, #?64\]
+** stp d14, d15, \[sp, #?80\]
+** mrs x16, svcr
+** str x16, \[x29, #?16\]
+** tbnz x16, 0, [^\n]+
+** smstart sm
+** mrs (x[0-9]+), tpidr2_el0
+** cbz \1, [^\n]+
+** bl __arm_tpidr2_save
+** msr tpidr2_el0, xzr
+** zero { za }
+** smstart za
+** bl consume_za
+** smstop za
+** ldr x16, \[x29, #?16\]
+** tbnz x16, 0, [^\n]+
+** smstop sm
+** ldp d8, d9, \[sp, #?32\]
+** ldp d10, d11, \[sp, #?48\]
+** ldp d12, d13, \[sp, #?64\]
+** ldp d14, d15, \[sp, #?80\]
+** ldp x29, x30, \[sp\], #?96
+** ret
+*/
+[[arm::locally_streaming, arm::new("za")]] void
+sc_ls_new_za () [[arm::streaming_compatible]]
+{
+ consume_za ();
+ asm ("");
+}
+
+/*
+** n_ls_shared_za:
+** str x30, \[sp, #?-80\]!
+** cntd x16
+** str x16, \[sp, #?8\]
+** stp d8, d9, \[sp, #?16\]
+** stp d10, d11, \[sp, #?32\]
+** stp d12, d13, \[sp, #?48\]
+** stp d14, d15, \[sp, #?64\]
+** smstart sm
+** bl consume_za
+** smstop sm
+** ldp d8, d9, \[sp, #?16\]
+** ldp d10, d11, \[sp, #?32\]
+** ldp d12, d13, \[sp, #?48\]
+** ldp d14, d15, \[sp, #?64\]
+** ldr x30, \[sp\], #?80
+** ret
+*/
+[[arm::locally_streaming]] void
+n_ls_shared_za () [[arm::inout("za")]]
+{
+ consume_za ();
+ asm ("");
+}
+
+/*
+** s_ls_shared_za:
+** str x30, \[sp, #?-16\]!
+** bl consume_za
+** ldr x30, \[sp\], #?16
+** ret
+*/
+[[arm::locally_streaming]] void
+s_ls_shared_za () [[arm::streaming, arm::inout("za")]]
+{
+ consume_za ();
+ asm ("");
+}
+
+/*
+** sc_ls_shared_za:
+** stp x29, x30, \[sp, #?-96\]!
+** mov x29, sp
+** cntd x16
+** str x16, \[sp, #?24\]
+** stp d8, d9, \[sp, #?32\]
+** stp d10, d11, \[sp, #?48\]
+** stp d12, d13, \[sp, #?64\]
+** stp d14, d15, \[sp, #?80\]
+** mrs x16, svcr
+** str x16, \[x29, #?16\]
+** tbnz x16, 0, [^\n]+
+** smstart sm
+** bl consume_za
+** ldr x16, \[x29, #?16\]
+** tbnz x16, 0, [^\n]+
+** smstop sm
+** ldp d8, d9, \[sp, #?32\]
+** ldp d10, d11, \[sp, #?48\]
+** ldp d12, d13, \[sp, #?64\]
+** ldp d14, d15, \[sp, #?80\]
+** ldp x29, x30, \[sp\], #?96
+** ret
+*/
+[[arm::locally_streaming]] void
+sc_ls_shared_za () [[arm::streaming_compatible, arm::inout("za")]]
+{
+ consume_za ();
+ asm ("");
+}
+
+/*
+** n_ls_vector_pcs:
+** sub sp, sp, #?272
+** cntd x16
+** str x16, \[sp\]
+** stp q8, q9, \[sp, #?16\]
+** stp q10, q11, \[sp, #?48\]
+** stp q12, q13, \[sp, #?80\]
+** stp q14, q15, \[sp, #?112\]
+** stp q16, q17, \[sp, #?144\]
+** stp q18, q19, \[sp, #?176\]
+** stp q20, q21, \[sp, #?208\]
+** stp q22, q23, \[sp, #?240\]
+** smstart sm
+** smstop sm
+** ldp q8, q9, \[sp, #?16\]
+** ldp q10, q11, \[sp, #?48\]
+** ldp q12, q13, \[sp, #?80\]
+** ldp q14, q15, \[sp, #?112\]
+** ldp q16, q17, \[sp, #?144\]
+** ldp q18, q19, \[sp, #?176\]
+** ldp q20, q21, \[sp, #?208\]
+** ldp q22, q23, \[sp, #?240\]
+** add sp, sp, #?272
+** ret
+*/
+[[arm::locally_streaming]] void __attribute__((aarch64_vector_pcs))
+n_ls_vector_pcs ()
+{
+ asm ("");
+}
+
+/*
+** n_ls_sve_pcs:
+** sub sp, sp, #?16
+** cntd x16
+** str x16, \[sp\]
+** addsvl sp, sp, #-18
+** str p4, \[sp\]
+** str p5, \[sp, #1, mul vl\]
+** str p6, \[sp, #2, mul vl\]
+** str p7, \[sp, #3, mul vl\]
+** str p8, \[sp, #4, mul vl\]
+** str p9, \[sp, #5, mul vl\]
+** str p10, \[sp, #6, mul vl\]
+** str p11, \[sp, #7, mul vl\]
+** str p12, \[sp, #8, mul vl\]
+** str p13, \[sp, #9, mul vl\]
+** str p14, \[sp, #10, mul vl\]
+** str p15, \[sp, #11, mul vl\]
+** str z8, \[sp, #2, mul vl\]
+** str z9, \[sp, #3, mul vl\]
+** str z10, \[sp, #4, mul vl\]
+** str z11, \[sp, #5, mul vl\]
+** str z12, \[sp, #6, mul vl\]
+** str z13, \[sp, #7, mul vl\]
+** str z14, \[sp, #8, mul vl\]
+** str z15, \[sp, #9, mul vl\]
+** str z16, \[sp, #10, mul vl\]
+** str z17, \[sp, #11, mul vl\]
+** str z18, \[sp, #12, mul vl\]
+** str z19, \[sp, #13, mul vl\]
+** str z20, \[sp, #14, mul vl\]
+** str z21, \[sp, #15, mul vl\]
+** str z22, \[sp, #16, mul vl\]
+** str z23, \[sp, #17, mul vl\]
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** smstart sm
+** ldr p0, \[sp\]
+** addvl sp, sp, #1
+** smstop sm
+** ldr z8, \[sp, #2, mul vl\]
+** ldr z9, \[sp, #3, mul vl\]
+** ldr z10, \[sp, #4, mul vl\]
+** ldr z11, \[sp, #5, mul vl\]
+** ldr z12, \[sp, #6, mul vl\]
+** ldr z13, \[sp, #7, mul vl\]
+** ldr z14, \[sp, #8, mul vl\]
+** ldr z15, \[sp, #9, mul vl\]
+** ldr z16, \[sp, #10, mul vl\]
+** ldr z17, \[sp, #11, mul vl\]
+** ldr z18, \[sp, #12, mul vl\]
+** ldr z19, \[sp, #13, mul vl\]
+** ldr z20, \[sp, #14, mul vl\]
+** ldr z21, \[sp, #15, mul vl\]
+** ldr z22, \[sp, #16, mul vl\]
+** ldr z23, \[sp, #17, mul vl\]
+** ldr p4, \[sp\]
+** ldr p5, \[sp, #1, mul vl\]
+** ldr p6, \[sp, #2, mul vl\]
+** ldr p7, \[sp, #3, mul vl\]
+** ldr p8, \[sp, #4, mul vl\]
+** ldr p9, \[sp, #5, mul vl\]
+** ldr p10, \[sp, #6, mul vl\]
+** ldr p11, \[sp, #7, mul vl\]
+** ldr p12, \[sp, #8, mul vl\]
+** ldr p13, \[sp, #9, mul vl\]
+** ldr p14, \[sp, #10, mul vl\]
+** ldr p15, \[sp, #11, mul vl\]
+** addsvl sp, sp, #18
+** add sp, sp, #?16
+** ret
+*/
+[[arm::locally_streaming]] void
+n_ls_sve_pcs (__SVBool_t x)
+{
+ asm ("");
+}
+
+/*
+** n_ls_v0:
+** addsvl sp, sp, #-1
+** ...
+** smstart sm
+** add x[0-9]+, [^\n]+
+** smstop sm
+** ...
+** addsvl sp, sp, #1
+** ...
+*/
+#define TEST(VN) __SVInt32_t VN; asm ("" :: "r" (&VN));
+[[arm::locally_streaming]] void
+n_ls_v0 ()
+{
+ TEST (v0);
+}
+
+/*
+** n_ls_v32:
+** addsvl sp, sp, #-32
+** ...
+** smstart sm
+** ...
+** smstop sm
+** ...
+** rdsvl (x[0-9]+), #1
+** lsl (x[0-9]+), \1, #?5
+** add sp, sp, \2
+** ...
+*/
+[[arm::locally_streaming]] void
+n_ls_v32 ()
+{
+ TEST (v0);
+ TEST (v1);
+ TEST (v2);
+ TEST (v3);
+ TEST (v4);
+ TEST (v5);
+ TEST (v6);
+ TEST (v7);
+ TEST (v8);
+ TEST (v9);
+ TEST (v10);
+ TEST (v11);
+ TEST (v12);
+ TEST (v13);
+ TEST (v14);
+ TEST (v15);
+ TEST (v16);
+ TEST (v17);
+ TEST (v18);
+ TEST (v19);
+ TEST (v20);
+ TEST (v21);
+ TEST (v22);
+ TEST (v23);
+ TEST (v24);
+ TEST (v25);
+ TEST (v26);
+ TEST (v27);
+ TEST (v28);
+ TEST (v29);
+ TEST (v30);
+ TEST (v31);
+}
+
+/*
+** n_ls_v33:
+** rdsvl (x[0-9]+), #1
+** mov (x[0-9]+), #?33
+** mul (x[0-9]+), (?:\1, \2|\2, \1)
+** sub sp, sp, \3
+** ...
+** smstart sm
+** ...
+** smstop sm
+** ...
+** rdsvl (x[0-9]+), #1
+** mov (x[0-9]+), #?33
+** mul (x[0-9]+), (?:\4, \5|\5, \4)
+** add sp, sp, \6
+** ...
+*/
+[[arm::locally_streaming]] void
+n_ls_v33 ()
+{
+ TEST (v0);
+ TEST (v1);
+ TEST (v2);
+ TEST (v3);
+ TEST (v4);
+ TEST (v5);
+ TEST (v6);
+ TEST (v7);
+ TEST (v8);
+ TEST (v9);
+ TEST (v10);
+ TEST (v11);
+ TEST (v12);
+ TEST (v13);
+ TEST (v14);
+ TEST (v15);
+ TEST (v16);
+ TEST (v17);
+ TEST (v18);
+ TEST (v19);
+ TEST (v20);
+ TEST (v21);
+ TEST (v22);
+ TEST (v23);
+ TEST (v24);
+ TEST (v25);
+ TEST (v26);
+ TEST (v27);
+ TEST (v28);
+ TEST (v29);
+ TEST (v30);
+ TEST (v31);
+ TEST (v32);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_2.c b/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_2.c
new file mode 100644
index 0000000..0eba993
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_2.c
@@ -0,0 +1,177 @@
+// { dg-options "-O -fomit-frame-pointer" }
+// { dg-final { check-function-bodies "**" "" } }
+
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+/*
+** test_d0:
+** ...
+** smstart sm
+** ...
+** fmov x10, d0
+** smstop sm
+** fmov d0, x10
+** ...
+*/
+[[arm::locally_streaming]] double
+test_d0 ()
+{
+ asm ("");
+ return 1.0f;
+}
+
+/*
+** test_d0_vec:
+** ...
+** smstart sm
+** ...
+** (
+** fmov x10, d0
+** |
+** umov x10, v0.d\[0\]
+** )
+** smstop sm
+** fmov d0, x10
+** ...
+*/
+[[arm::locally_streaming]] int8x8_t
+test_d0_vec ()
+{
+ asm ("");
+ return (int8x8_t) {};
+}
+
+/*
+** test_q0:
+** ...
+** smstart sm
+** ...
+** str q0, \[sp, #?-16\]!
+** smstop sm
+** ldr q0, \[sp\], #?16
+** ...
+*/
+[[arm::locally_streaming]] int8x16_t
+test_q0 ()
+{
+ asm ("");
+ return (int8x16_t) {};
+}
+
+/*
+** test_q1:
+** ...
+** smstart sm
+** ...
+** stp q0, q1, \[sp, #?-32\]!
+** smstop sm
+** ldp q0, q1, \[sp\], #?32
+** ...
+*/
+[[arm::locally_streaming]] int8x16x2_t
+test_q1 ()
+{
+ asm ("");
+ return (int8x16x2_t) {};
+}
+
+/*
+** test_q2:
+** ...
+** smstart sm
+** ...
+** stp q0, q1, \[sp, #?-48\]!
+** str q2, \[sp, #?32\]
+** smstop sm
+** ldr q2, \[sp, #?32\]
+** ldp q0, q1, \[sp\], #?48
+** ...
+*/
+[[arm::locally_streaming]] int8x16x3_t
+test_q2 ()
+{
+ asm ("");
+ return (int8x16x3_t) {};
+}
+
+/*
+** test_q3:
+** ...
+** smstart sm
+** ...
+** stp q0, q1, \[sp, #?-64\]!
+** stp q2, q3, \[sp, #?32\]
+** smstop sm
+** ldp q2, q3, \[sp, #?32\]
+** ldp q0, q1, \[sp\], #?64
+** ...
+*/
+[[arm::locally_streaming]] int8x16x4_t
+test_q3 ()
+{
+ asm ("");
+ return (int8x16x4_t) {};
+}
+
+/*
+** test_z0:
+** ...
+** smstart sm
+** mov z0\.b, #0
+** addvl sp, sp, #-1
+** str z0, \[sp\]
+** smstop sm
+** ldr z0, \[sp\]
+** addvl sp, sp, #1
+** ...
+*/
+[[arm::locally_streaming]] svint8_t
+test_z0 ()
+{
+ asm ("");
+ return (svint8_t) {};
+}
+
+/*
+** test_z3:
+** ...
+** smstart sm
+** ...
+** addvl sp, sp, #-4
+** str z0, \[sp\]
+** str z1, \[sp, #1, mul vl\]
+** str z2, \[sp, #2, mul vl\]
+** str z3, \[sp, #3, mul vl\]
+** smstop sm
+** ldr z0, \[sp\]
+** ldr z1, \[sp, #1, mul vl\]
+** ldr z2, \[sp, #2, mul vl\]
+** ldr z3, \[sp, #3, mul vl\]
+** ...
+*/
+[[arm::locally_streaming]] svint8x4_t
+test_z3 ()
+{
+ asm ("");
+ return (svint8x4_t) {};
+}
+
+/*
+** test_p0:
+** ...
+** smstart sm
+** pfalse p0\.b
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** smstop sm
+** ldr p0, \[sp\]
+** addvl sp, sp, #1
+** ...
+*/
+[[arm::locally_streaming]] svbool_t
+test_p0 ()
+{
+ asm ("");
+ return (svbool_t) {};
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_3.c b/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_3.c
new file mode 100644
index 0000000..2bdea6a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_3.c
@@ -0,0 +1,273 @@
+// { dg-options "-O -fomit-frame-pointer" }
+// { dg-final { check-function-bodies "**" "" } }
+
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+/*
+** test_d0:
+** ...
+** fmov x10, d0
+** smstart sm
+** fmov d0, x10
+** smstop sm
+** ...
+*/
+[[arm::locally_streaming]] void
+test_d0 (double d0)
+{
+ asm ("");
+}
+
+/*
+** test_d7:
+** ...
+** fmov x10, d0
+** fmov x11, d1
+** fmov x12, d2
+** fmov x13, d3
+** fmov x14, d4
+** fmov x15, d5
+** fmov x16, d6
+** fmov x17, d7
+** smstart sm
+** fmov d0, x10
+** fmov d1, x11
+** fmov d2, x12
+** fmov d3, x13
+** fmov d4, x14
+** fmov d5, x15
+** fmov d6, x16
+** fmov d7, x17
+** smstop sm
+** ...
+*/
+[[arm::locally_streaming]] void
+test_d7 (double d0, double d1, double d2, double d3,
+ double d4, double d5, double d6, double d7)
+{
+ asm ("");
+}
+
+/*
+** test_d0_vec:
+** ...
+** (
+** fmov x10, d0
+** |
+** umov x10, v0.d\[0\]
+** )
+** smstart sm
+** fmov d0, x10
+** smstop sm
+** ...
+*/
+[[arm::locally_streaming]] void
+test_d0_vec (int8x8_t d0)
+{
+ asm ("");
+}
+
+/*
+** test_d7_vec:
+** ...
+** (
+** fmov x10, d0
+** fmov x11, d1
+** fmov x12, d2
+** fmov x13, d3
+** fmov x14, d4
+** fmov x15, d5
+** fmov x16, d6
+** fmov x17, d7
+** |
+** umov x10, v0.d\[0\]
+** umov x11, v1.d\[0\]
+** umov x12, v2.d\[0\]
+** umov x13, v3.d\[0\]
+** umov x14, v4.d\[0\]
+** umov x15, v5.d\[0\]
+** umov x16, v6.d\[0\]
+** umov x17, v7.d\[0\]
+** )
+** smstart sm
+** fmov d0, x10
+** fmov d1, x11
+** fmov d2, x12
+** fmov d3, x13
+** fmov d4, x14
+** fmov d5, x15
+** fmov d6, x16
+** fmov d7, x17
+** smstop sm
+** ...
+*/
+[[arm::locally_streaming]] void
+test_d7_vec (int8x8_t d0, int8x8_t d1, int8x8_t d2, int8x8_t d3,
+ int8x8_t d4, int8x8_t d5, int8x8_t d6, int8x8_t d7)
+{
+ asm ("");
+}
+
+/*
+** test_q0:
+** ...
+** str q0, \[sp, #?-16\]!
+** smstart sm
+** ldr q0, \[sp\], #?16
+** smstop sm
+** ...
+*/
+[[arm::locally_streaming]] void
+test_q0 (int8x16_t q0)
+{
+ asm ("");
+}
+
+/*
+** test_q7:
+** ...
+** stp q0, q1, \[sp, #?-128\]!
+** stp q2, q3, \[sp, #?32\]
+** stp q4, q5, \[sp, #?64\]
+** stp q6, q7, \[sp, #?96\]
+** smstart sm
+** ldp q2, q3, \[sp, #?32\]
+** ldp q4, q5, \[sp, #?64\]
+** ldp q6, q7, \[sp, #?96\]
+** ldp q0, q1, \[sp\], #?128
+** smstop sm
+** ...
+*/
+[[arm::locally_streaming]] void
+test_q7 (int8x16x4_t q0, int8x16x4_t q4)
+{
+ asm ("");
+}
+
+/*
+** test_z0:
+** ...
+** addvl sp, sp, #-1
+** str z0, \[sp\]
+** smstart sm
+** ldr z0, \[sp\]
+** addvl sp, sp, #1
+** smstop sm
+** ...
+*/
+[[arm::locally_streaming]] void
+test_z0 (svint8_t z0)
+{
+ asm ("");
+}
+
+/*
+** test_z7:
+** ...
+** addvl sp, sp, #-8
+** str z0, \[sp\]
+** str z1, \[sp, #1, mul vl\]
+** str z2, \[sp, #2, mul vl\]
+** str z3, \[sp, #3, mul vl\]
+** str z4, \[sp, #4, mul vl\]
+** str z5, \[sp, #5, mul vl\]
+** str z6, \[sp, #6, mul vl\]
+** str z7, \[sp, #7, mul vl\]
+** smstart sm
+** ldr z0, \[sp\]
+** ldr z1, \[sp, #1, mul vl\]
+** ldr z2, \[sp, #2, mul vl\]
+** ldr z3, \[sp, #3, mul vl\]
+** ldr z4, \[sp, #4, mul vl\]
+** ldr z5, \[sp, #5, mul vl\]
+** ldr z6, \[sp, #6, mul vl\]
+** ldr z7, \[sp, #7, mul vl\]
+** addvl sp, sp, #8
+** smstop sm
+** ...
+*/
+[[arm::locally_streaming]] void
+test_z7 (svint8x4_t z0, svint8x4_t z4)
+{
+ asm ("");
+}
+
+/*
+** test_p0:
+** ...
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** smstart sm
+** ldr p0, \[sp\]
+** addvl sp, sp, #1
+** smstop sm
+** ...
+*/
+[[arm::locally_streaming]] void
+test_p0 (svbool_t p0)
+{
+ asm ("");
+}
+
+/*
+** test_p3:
+** ...
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** str p1, \[sp, #1, mul vl\]
+** str p2, \[sp, #2, mul vl\]
+** str p3, \[sp, #3, mul vl\]
+** smstart sm
+** ldr p0, \[sp\]
+** ldr p1, \[sp, #1, mul vl\]
+** ldr p2, \[sp, #2, mul vl\]
+** ldr p3, \[sp, #3, mul vl\]
+** addvl sp, sp, #1
+** smstop sm
+** ...
+*/
+[[arm::locally_streaming]] void
+test_p3 (svbool_t p0, svbool_t p1, svbool_t p2, svbool_t p3)
+{
+ asm ("");
+}
+
+/*
+** test_mixed:
+** ...
+** addvl sp, sp, #-3
+** str p0, \[sp\]
+** str p1, \[sp, #1, mul vl\]
+** str p2, \[sp, #2, mul vl\]
+** str p3, \[sp, #3, mul vl\]
+** str z3, \[sp, #1, mul vl\]
+** str z7, \[sp, #2, mul vl\]
+** stp q2, q6, \[sp, #?-32\]!
+** fmov w10, s0
+** fmov x11, d1
+** fmov w12, s4
+** fmov x13, d5
+** smstart sm
+** fmov s0, w10
+** fmov d1, x11
+** fmov s4, w12
+** fmov d5, x13
+** ldp q2, q6, \[sp\], #?32
+** ldr p0, \[sp\]
+** ldr p1, \[sp, #1, mul vl\]
+** ldr p2, \[sp, #2, mul vl\]
+** ldr p3, \[sp, #3, mul vl\]
+** ldr z3, \[sp, #1, mul vl\]
+** ldr z7, \[sp, #2, mul vl\]
+** addvl sp, sp, #3
+** smstop sm
+** ...
+*/
+[[arm::locally_streaming]] void
+test_mixed (float s0, double d1, float32x4_t q2, svfloat32_t z3,
+ float s4, double d5, float64x2_t q6, svfloat64_t z7,
+ svbool_t p0, svbool_t p1, svbool_t p2, svbool_t p3)
+{
+ asm ("");
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_4.c b/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_4.c
new file mode 100644
index 0000000..42adeb1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/locally_streaming_4.c
@@ -0,0 +1,145 @@
+// { dg-options "-O -fomit-frame-pointer" }
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+/*
+** test_d0:
+** ...
+** smstart sm
+** ...
+** fmov x10, d0
+** smstop sm
+** fmov d0, x10
+** ...
+** smstart sm
+** ...
+** smstop sm
+** ...
+*/
+void consume_d0 (double d0);
+
+__arm_locally_streaming void
+test_d0 ()
+{
+ asm ("");
+ consume_d0 (1.0);
+ asm ("");
+}
+
+/*
+** test_d7:
+** ...
+** fmov x10, d0
+** fmov x11, d1
+** fmov x12, d2
+** fmov x13, d3
+** fmov x14, d4
+** fmov x15, d5
+** fmov x16, d6
+** fmov x17, d7
+** smstop sm
+** fmov d0, x10
+** fmov d1, x11
+** fmov d2, x12
+** fmov d3, x13
+** fmov d4, x14
+** fmov d5, x15
+** fmov d6, x16
+** fmov d7, x17
+** ...
+*/
+void consume_d7 (double d0, double d1, double d2, double d3,
+ double d4, double d5, double d6, double d7);
+__arm_locally_streaming void
+test_d7 ()
+{
+ asm ("");
+ consume_d7 (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0);
+ asm ("");
+}
+
+/*
+** test_q7:
+** ...
+** stp q0, q1, \[sp, #?-128\]!
+** stp q2, q3, \[sp, #?32\]
+** stp q4, q5, \[sp, #?64\]
+** stp q6, q7, \[sp, #?96\]
+** smstop sm
+** ldp q2, q3, \[sp, #?32\]
+** ldp q4, q5, \[sp, #?64\]
+** ldp q6, q7, \[sp, #?96\]
+** ldp q0, q1, \[sp\], #?128
+** ...
+*/
+void consume_q7 (int8x16x4_t q0, int8x16x4_t q4);
+
+__arm_locally_streaming void
+test_q7 (int8x16x4_t *ptr)
+{
+ asm ("");
+ consume_q7 (ptr[0], ptr[1]);
+ asm ("");
+}
+
+/*
+** test_z7:
+** ...
+** addvl sp, sp, #-8
+** str z0, \[sp\]
+** str z1, \[sp, #1, mul vl\]
+** str z2, \[sp, #2, mul vl\]
+** str z3, \[sp, #3, mul vl\]
+** str z4, \[sp, #4, mul vl\]
+** str z5, \[sp, #5, mul vl\]
+** str z6, \[sp, #6, mul vl\]
+** str z7, \[sp, #7, mul vl\]
+** smstop sm
+** ldr z0, \[sp\]
+** ldr z1, \[sp, #1, mul vl\]
+** ldr z2, \[sp, #2, mul vl\]
+** ldr z3, \[sp, #3, mul vl\]
+** ldr z4, \[sp, #4, mul vl\]
+** ldr z5, \[sp, #5, mul vl\]
+** ldr z6, \[sp, #6, mul vl\]
+** ldr z7, \[sp, #7, mul vl\]
+** addvl sp, sp, #8
+** ...
+*/
+void consume_z7 (svint8x4_t z0, svint8x4_t z4);
+
+__arm_locally_streaming void
+test_z7 (svint8x4_t *ptr1, svint8x4_t *ptr2)
+{
+ asm ("");
+ consume_z7 (*ptr1, *ptr2);
+ asm ("");
+}
+
+/*
+** test_p3:
+** ...
+** addvl sp, sp, #-1
+** str p0, \[sp\]
+** str p1, \[sp, #1, mul vl\]
+** str p2, \[sp, #2, mul vl\]
+** str p3, \[sp, #3, mul vl\]
+** smstop sm
+** ldr p0, \[sp\]
+** ldr p1, \[sp, #1, mul vl\]
+** ldr p2, \[sp, #2, mul vl\]
+** ldr p3, \[sp, #3, mul vl\]
+** addvl sp, sp, #1
+** ...
+*/
+void consume_p3 (svbool_t p0, svbool_t p1, svbool_t p2, svbool_t p3);
+
+__arm_locally_streaming void
+test_p3 (svbool_t *ptr1, svbool_t *ptr2, svbool_t *ptr3, svbool_t *ptr4)
+{
+ asm ("");
+ consume_p3 (*ptr1, *ptr2, *ptr3, *ptr4);
+ asm ("");
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_1.c b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_1.c
new file mode 100644
index 0000000..4e3869f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_1.c
@@ -0,0 +1,58 @@
+/* { dg-options "-O2 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+void run(void (*)());
+
+/*
+** foo:
+** ...
+** mrs x16, svcr
+** ...
+** str x16, (.*)
+** ...
+** ldr x16, \1
+** tbz x16, 0, .*
+** smstop sm
+** bl __clear_cache
+** ldr x16, \1
+** tbz x16, 0, .*
+** smstart sm
+** add x0, .*
+** ldr x16, \1
+** tbz x16, 0, .*
+** smstop sm
+** bl run
+** ldr x16, \1
+** tbz x16, 0, .*
+** smstart sm
+** mov w0, 1
+** ...
+** ret
+** ldr x16, \1
+** tbz x16, 0, .*
+** smstart sm
+** mov w0, 0
+** ...
+*/
+int
+foo (int *ptr) __arm_streaming_compatible
+{
+ __label__ failure;
+
+ void bar () { *ptr += 1; goto failure; }
+ run (bar);
+ return 1;
+
+failure:
+ return 0;
+}
+
+// { dg-final { scan-assembler {\tstp\tx19, x20,} } }
+// { dg-final { scan-assembler {\tstp\tx21, x22,} } }
+// { dg-final { scan-assembler {\tstp\tx23, x24,} } }
+// { dg-final { scan-assembler {\tstp\tx25, x26,} } }
+// { dg-final { scan-assembler {\tstp\tx27, x28,} } }
+// { dg-final { scan-assembler {\tstp\td8, d9,} } }
+// { dg-final { scan-assembler {\tstp\td10, d11,} } }
+// { dg-final { scan-assembler {\tstp\td12, d13,} } }
+// { dg-final { scan-assembler {\tstp\td14, d15,} } }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_2.c b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_2.c
new file mode 100644
index 0000000..2a2db72
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_2.c
@@ -0,0 +1,44 @@
+/* { dg-options "-O2 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+void run(void (*)());
+
+/*
+** foo:
+** ...
+** smstop sm
+** bl __clear_cache
+** smstart sm
+** add x0, .*
+** smstop sm
+** bl run
+** smstart sm
+** mov w0, 1
+** ...
+** ret
+** smstart sm
+** mov w0, 0
+** ...
+*/
+int
+foo (int *ptr) __arm_streaming
+{
+ __label__ failure;
+
+ void bar () { *ptr += 1; goto failure; }
+ run (bar);
+ return 1;
+
+failure:
+ return 0;
+}
+
+// { dg-final { scan-assembler {\tstp\tx19, x20,} } }
+// { dg-final { scan-assembler {\tstp\tx21, x22,} } }
+// { dg-final { scan-assembler {\tstp\tx23, x24,} } }
+// { dg-final { scan-assembler {\tstp\tx25, x26,} } }
+// { dg-final { scan-assembler {\tstp\tx27, x28,} } }
+// { dg-final { scan-assembler {\tstp\td8, d9,} } }
+// { dg-final { scan-assembler {\tstp\td10, d11,} } }
+// { dg-final { scan-assembler {\tstp\td12, d13,} } }
+// { dg-final { scan-assembler {\tstp\td14, d15,} } }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_3.c b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_3.c
new file mode 100644
index 0000000..022b040
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_3.c
@@ -0,0 +1,46 @@
+/* { dg-options "-O2 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+void run(void (*)());
+
+/*
+** foo:
+** ...
+** smstart sm
+** ...
+** smstop sm
+** bl __clear_cache
+** smstart sm
+** add x0, .*
+** smstop sm
+** bl run
+** smstart sm
+** mov w0, 1
+** ...
+** smstart sm
+** mov w0, 0
+** smstop sm
+** ...
+*/
+__arm_locally_streaming int
+foo (int *ptr)
+{
+ __label__ failure;
+
+ void bar () { *ptr += 1; goto failure; }
+ run (bar);
+ return 1;
+
+failure:
+ return 0;
+}
+
+// { dg-final { scan-assembler {\tstp\tx19, x20,} } }
+// { dg-final { scan-assembler {\tstp\tx21, x22,} } }
+// { dg-final { scan-assembler {\tstp\tx23, x24,} } }
+// { dg-final { scan-assembler {\tstp\tx25, x26,} } }
+// { dg-final { scan-assembler {\tstp\tx27, x28,} } }
+// { dg-final { scan-assembler {\tstp\td8, d9,} } }
+// { dg-final { scan-assembler {\tstp\td10, d11,} } }
+// { dg-final { scan-assembler {\tstp\td12, d13,} } }
+// { dg-final { scan-assembler {\tstp\td14, d15,} } }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_4.c b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_4.c
new file mode 100644
index 0000000..0446076
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_4.c
@@ -0,0 +1,25 @@
+/* { dg-options "-O2 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+void run(void (*)());
+
+/*
+** bar.0:
+** ...
+** smstart sm
+** ...
+** smstop sm
+** br x[0-9]+
+*/
+int
+foo (int *ptr)
+{
+ __label__ failure;
+
+ __arm_locally_streaming void bar () { *ptr += 1; goto failure; }
+ run (bar);
+ return 1;
+
+failure:
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_5.c b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_5.c
new file mode 100644
index 0000000..4246aec
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_5.c
@@ -0,0 +1,26 @@
+/* { dg-options "-O2 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+void run(void (*)() __arm_streaming);
+
+/*
+** bar.0:
+** ...
+** smstop sm
+** br x[0-9]+
+*/
+int
+foo (int *ptr)
+{
+ __label__ failure;
+
+ void bar () __arm_streaming { *ptr += 1; goto failure; }
+ run (bar);
+ return 1;
+
+failure:
+ return 0;
+}
+
+// { dg-final { scan-assembler-not {smstart\t} } }
+// { dg-final { scan-assembler-not {mrs\t} } }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_6.c b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_6.c
new file mode 100644
index 0000000..151e2f2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_6.c
@@ -0,0 +1,31 @@
+/* { dg-options "-O2 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+void run(void (*)() __arm_streaming_compatible);
+
+/*
+** bar.0:
+** ...
+** mrs x16, svcr
+** ...
+** str x16, (.*)
+** ...
+** ldr x16, \1
+** tbz x16, 0, .*
+** smstop sm
+** br x[0-9]+
+*/
+int
+foo (int *ptr)
+{
+ __label__ failure;
+
+ void bar () __arm_streaming_compatible { *ptr += 1; goto failure; }
+ run (bar);
+ return 1;
+
+failure:
+ return 0;
+}
+
+// { dg-final { scan-assembler-not {smstart\t} } }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_7.c b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_7.c
new file mode 100644
index 0000000..9cc3ad5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_7.c
@@ -0,0 +1,25 @@
+/* { dg-options "-O2 -fno-schedule-insns -fno-schedule-insns2" } */
+
+void run(void (*)() __arm_inout("za"));
+void callee () __arm_inout("za");
+
+int
+foo (int *ptr)
+{
+ __label__ failure;
+
+ void bar () __arm_inout("za")
+ {
+ callee ();
+ *ptr += 1;
+ goto failure;
+ }
+ run (bar);
+ return 1;
+
+failure:
+ return 0;
+}
+
+// { dg-final { scan-assembler-not {\tsmstart\t} } }
+// { dg-final { scan-assembler-not {\tsmstop\t} } }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/sibcall_1.c b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_1.c
new file mode 100644
index 0000000..c7530de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_1.c
@@ -0,0 +1,45 @@
+/* { dg-options "-O2" } */
+
+void sc_callee () [[arm::streaming_compatible]];
+void s_callee () [[arm::streaming]];
+void n_callee ();
+
+[[arm::locally_streaming]] __attribute__((noipa)) void
+sc_ls_callee () [[arm::streaming_compatible]] {}
+[[arm::locally_streaming]] __attribute__((noipa)) void
+n_ls_callee () {}
+
+void
+sc_to_sc () [[arm::streaming_compatible]]
+{
+ sc_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tsc_callee} } } */
+
+void
+sc_to_s () [[arm::streaming_compatible]]
+{
+ s_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\ts_callee} } } */
+
+void
+sc_to_n () [[arm::streaming_compatible]]
+{
+ n_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\tn_callee} } } */
+
+void
+sc_to_sc_ls () [[arm::streaming_compatible]]
+{
+ sc_ls_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tsc_ls_callee} } } */
+
+void
+sc_to_n_ls () [[arm::streaming_compatible]]
+{
+ n_ls_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\tn_ls_callee} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/sibcall_2.c b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_2.c
new file mode 100644
index 0000000..8d1c8a9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_2.c
@@ -0,0 +1,45 @@
+/* { dg-options "-O2" } */
+
+void sc_callee () [[arm::streaming_compatible]];
+void s_callee () [[arm::streaming]];
+void n_callee ();
+
+[[arm::locally_streaming]] __attribute__((noipa)) void
+sc_ls_callee () [[arm::streaming_compatible]] {}
+[[arm::locally_streaming]] __attribute__((noipa)) void
+n_ls_callee () {}
+
+void
+s_to_sc () [[arm::streaming]]
+{
+ sc_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tsc_callee} } } */
+
+void
+s_to_s () [[arm::streaming]]
+{
+ s_callee ();
+}
+/* { dg-final { scan-assembler {\tb\ts_callee} } } */
+
+void
+s_to_n () [[arm::streaming]]
+{
+ n_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\tn_callee} } } */
+
+void
+s_to_sc_ls () [[arm::streaming]]
+{
+ sc_ls_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tsc_ls_callee} } } */
+
+void
+s_to_n_ls () [[arm::streaming]]
+{
+ n_ls_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\tn_ls_callee} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/sibcall_3.c b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_3.c
new file mode 100644
index 0000000..2ae937f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_3.c
@@ -0,0 +1,45 @@
+/* { dg-options "-O2" } */
+
+void sc_callee () [[arm::streaming_compatible]];
+void s_callee () [[arm::streaming]];
+void n_callee ();
+
+[[arm::locally_streaming]] __attribute__((noipa)) void
+sc_ls_callee () [[arm::streaming_compatible]] {}
+[[arm::locally_streaming]] __attribute__((noipa)) void
+n_ls_callee () {}
+
+void
+n_to_sc ()
+{
+ sc_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tsc_callee} } } */
+
+void
+n_to_s ()
+{
+ s_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\ts_callee} } } */
+
+void
+n_to_n ()
+{
+ n_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tn_callee} } } */
+
+void
+n_to_sc_ls ()
+{
+ sc_ls_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tsc_ls_callee} } } */
+
+void
+n_to_n_ls ()
+{
+ n_ls_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tn_ls_callee} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/sibcall_4.c b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_4.c
new file mode 100644
index 0000000..6935a1b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_4.c
@@ -0,0 +1,45 @@
+/* { dg-options "-O2" } */
+
+void sc_callee () [[arm::streaming_compatible]];
+void s_callee () [[arm::streaming]];
+void n_callee ();
+
+[[arm::locally_streaming]] __attribute__((noipa)) void
+sc_ls_callee () [[arm::streaming_compatible]] {}
+[[arm::locally_streaming]] __attribute__((noipa)) void
+n_ls_callee () {}
+
+[[arm::locally_streaming]] void
+sc_to_sc () [[arm::streaming_compatible]]
+{
+ sc_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tsc_callee} } } */
+
+[[arm::locally_streaming]] void
+sc_to_s () [[arm::streaming_compatible]]
+{
+ s_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\ts_callee} } } */
+
+[[arm::locally_streaming]] void
+sc_to_n () [[arm::streaming_compatible]]
+{
+ n_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\tn_callee} } } */
+
+[[arm::locally_streaming]] void
+sc_to_sc_ls () [[arm::streaming_compatible]]
+{
+ sc_ls_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tsc_ls_callee} } } */
+
+[[arm::locally_streaming]] void
+sc_to_n_ls () [[arm::streaming_compatible]]
+{
+ n_ls_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\tn_ls_callee} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/sibcall_5.c b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_5.c
new file mode 100644
index 0000000..7aaf58d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_5.c
@@ -0,0 +1,45 @@
+/* { dg-options "-O2" } */
+
+void sc_callee () [[arm::streaming_compatible]];
+void s_callee () [[arm::streaming]];
+void n_callee ();
+
+[[arm::locally_streaming]] __attribute__((noipa)) void
+sc_ls_callee () [[arm::streaming_compatible]] {}
+[[arm::locally_streaming]] __attribute__((noipa)) void
+n_ls_callee () {}
+
+[[arm::locally_streaming]] void
+n_to_sc ()
+{
+ sc_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tsc_callee} } } */
+
+[[arm::locally_streaming]] void
+n_to_s ()
+{
+ s_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\ts_callee} } } */
+
+[[arm::locally_streaming]] void
+n_to_n ()
+{
+ n_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tn_callee} } } */
+
+[[arm::locally_streaming]] void
+n_to_sc_ls ()
+{
+ sc_ls_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tsc_ls_callee} } } */
+
+[[arm::locally_streaming]] void
+n_to_n_ls ()
+{
+ n_ls_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tn_ls_callee} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/sibcall_6.c b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_6.c
new file mode 100644
index 0000000..e568edb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_6.c
@@ -0,0 +1,26 @@
+/* { dg-options "-O2" } */
+
+void shared_callee () [[arm::inout("za")]];
+[[arm::new("za")]] __attribute__((noipa)) void new_callee () {}
+void normal_callee ();
+
+void
+shared_to_shared () [[arm::inout("za")]]
+{
+ shared_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tshared_callee} } } */
+
+void
+shared_to_new () [[arm::inout("za")]]
+{
+ new_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\tnew_callee} } } */
+
+void
+shared_to_normal () [[arm::inout("za")]]
+{
+ normal_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\tnormal_callee} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/sibcall_7.c b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_7.c
new file mode 100644
index 0000000..a5f576d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_7.c
@@ -0,0 +1,26 @@
+/* { dg-options "-O2" } */
+
+void shared_callee () [[arm::inout("za")]];
+[[arm::new("za")]] __attribute__((noipa)) void new_callee () {}
+void normal_callee ();
+
+[[arm::new("za")]] void
+new_to_shared ()
+{
+ shared_callee ();
+}
+/* { dg-final { scan-assembler {\tbl\tshared_callee} } } */
+
+[[arm::new("za")]] void
+new_to_new ()
+{
+ new_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tnew_callee} } } */
+
+[[arm::new("za")]] void
+new_to_normal ()
+{
+ normal_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tnormal_callee} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/sibcall_8.c b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_8.c
new file mode 100644
index 0000000..33370f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/sibcall_8.c
@@ -0,0 +1,19 @@
+/* { dg-options "-O2" } */
+
+void shared_callee () [[arm::inout("za")]];
+[[arm::new("za")]] __attribute__((noipa)) void new_callee () {}
+void normal_callee ();
+
+void
+normal_to_new ()
+{
+ new_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tnew_callee} } } */
+
+void
+normal_to_normal ()
+{
+ normal_callee ();
+}
+/* { dg-final { scan-assembler {\tb\tnormal_callee} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_1.c b/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_1.c
new file mode 100644
index 0000000..8874b05
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_1.c
@@ -0,0 +1,130 @@
+// { dg-options "" }
+
+void sc_a () [[arm::streaming_compatible]];
+void sc_a (); // { dg-error "conflicting types" }
+
+void sc_b ();
+void sc_b () [[arm::streaming_compatible]]; // { dg-error "conflicting types" }
+
+void sc_c () [[arm::streaming_compatible]];
+void sc_c () {} // Inherits attribute from declaration (confusingly).
+
+void sc_d ();
+void sc_d () [[arm::streaming_compatible]] {} // { dg-error "conflicting types" }
+
+void sc_e () [[arm::streaming_compatible]] {}
+void sc_e (); // { dg-error "conflicting types" }
+
+void sc_f () {}
+void sc_f () [[arm::streaming_compatible]]; // { dg-error "conflicting types" }
+
+extern void (*sc_g) ();
+extern void (*sc_g) () [[arm::streaming_compatible]]; // { dg-error "conflicting types" }
+
+extern void (*sc_h) () [[arm::streaming_compatible]];
+extern void (*sc_h) (); // { dg-error "conflicting types" }
+
+//----------------------------------------------------------------------------
+
+void s_a () [[arm::streaming]];
+void s_a (); // { dg-error "conflicting types" }
+
+void s_b ();
+void s_b () [[arm::streaming]]; // { dg-error "conflicting types" }
+
+void s_c () [[arm::streaming]];
+void s_c () {} // Inherits attribute from declaration (confusingly).
+
+void s_d ();
+void s_d () [[arm::streaming]] {} // { dg-error "conflicting types" }
+
+void s_e () [[arm::streaming]] {}
+void s_e (); // { dg-error "conflicting types" }
+
+void s_f () {}
+void s_f () [[arm::streaming]]; // { dg-error "conflicting types" }
+
+extern void (*s_g) ();
+extern void (*s_g) () [[arm::streaming]]; // { dg-error "conflicting types" }
+
+extern void (*s_h) () [[arm::streaming]];
+extern void (*s_h) (); // { dg-error "conflicting types" }
+
+//----------------------------------------------------------------------------
+
+void mixed_a () [[arm::streaming]];
+void mixed_a () [[arm::streaming_compatible]]; // { dg-error "conflicting types" }
+
+void mixed_b () [[arm::streaming_compatible]];
+void mixed_b () [[arm::streaming]]; // { dg-error "conflicting types" }
+
+void mixed_c () [[arm::streaming]];
+void mixed_c () [[arm::streaming_compatible]] {} // { dg-error "conflicting types" }
+
+void mixed_d () [[arm::streaming_compatible]];
+void mixed_d () [[arm::streaming]] {} // { dg-error "conflicting types" }
+
+void mixed_e () [[arm::streaming]] {}
+void mixed_e () [[arm::streaming_compatible]]; // { dg-error "conflicting types" }
+
+void mixed_f () [[arm::streaming_compatible]] {}
+void mixed_f () [[arm::streaming]]; // { dg-error "conflicting types" }
+
+extern void (*mixed_g) () [[arm::streaming_compatible]];
+extern void (*mixed_g) () [[arm::streaming]]; // { dg-error "conflicting types" }
+
+extern void (*mixed_h) () [[arm::streaming]];
+extern void (*mixed_h) () [[arm::streaming_compatible]]; // { dg-error "conflicting types" }
+
+//----------------------------------------------------------------------------
+
+void contradiction_1 () [[arm::streaming, arm::streaming_compatible]]; // { dg-warning "conflicts with attribute" }
+void contradiction_2 () [[arm::streaming_compatible, arm::streaming]]; // { dg-warning "conflicts with attribute" }
+
+int [[arm::streaming_compatible]] int_attr; // { dg-warning "only applies to function types" }
+void [[arm::streaming_compatible]] ret_attr (); // { dg-warning "only applies to function types" }
+void *[[arm::streaming]] ptr_attr; // { dg-warning "only applies to function types" }
+
+typedef void s_callback () [[arm::streaming]];
+typedef void sc_callback () [[arm::streaming_compatible]];
+
+typedef void contradiction_callback_1 () [[arm::streaming, arm::streaming_compatible]]; // { dg-warning "conflicts with attribute" }
+typedef void contradiction_callback_2 () [[arm::streaming_compatible, arm::streaming]]; // { dg-warning "conflicts with attribute" }
+
+void (*contradiction_callback_ptr_1) () [[arm::streaming, arm::streaming_compatible]]; // { dg-warning "conflicts with attribute" }
+void (*contradiction_callback_ptr_2) () [[arm::streaming_compatible, arm::streaming]]; // { dg-warning "conflicts with attribute" }
+
+struct s {
+ void (*contradiction_callback_ptr_1) () [[arm::streaming, arm::streaming_compatible]]; // { dg-warning "conflicts with attribute" }
+ void (*contradiction_callback_ptr_2) () [[arm::streaming_compatible, arm::streaming]]; // { dg-warning "conflicts with attribute" }
+};
+
+//----------------------------------------------------------------------------
+
+void keyword_ok_1 () __arm_streaming;
+void keyword_ok_1 () __arm_streaming;
+
+void keyword_ok_2 () __arm_streaming;
+void keyword_ok_2 () [[arm::streaming]];
+
+void keyword_ok_3 () [[arm::streaming]];
+void keyword_ok_3 () __arm_streaming;
+
+void keyword_ok_4 () __arm_streaming [[arm::streaming]];
+
+void keyword_ok_5 () __arm_streaming_compatible;
+void keyword_ok_5 () [[arm::streaming_compatible]];
+
+//----------------------------------------------------------------------------
+
+void keyword_contradiction_1 () __arm_streaming;
+void keyword_contradiction_1 (); // { dg-error "conflicting types" }
+
+void keyword_contradiction_2 ();
+void keyword_contradiction_2 () __arm_streaming; // { dg-error "conflicting types" }
+
+void keyword_contradiction_3 () __arm_streaming;
+void keyword_contradiction_3 () [[arm::streaming_compatible]]; // { dg-error "conflicting types" }
+
+void keyword_contradiction_4 () [[arm::streaming_compatible]];
+void keyword_contradiction_4 () __arm_streaming; // { dg-error "conflicting types" }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_2.c b/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_2.c
new file mode 100644
index 0000000..e8be0f8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_2.c
@@ -0,0 +1,25 @@
+// { dg-options "" }
+
+void sc_fn () [[arm::streaming_compatible]];
+void s_fn () [[arm::streaming]];
+void ns_fn ();
+
+void (*sc_fn_ptr) () [[arm::streaming_compatible]];
+void (*s_fn_ptr) () [[arm::streaming]];
+void (*ns_fn_ptr) ();
+
+void
+f ()
+{
+ sc_fn_ptr = sc_fn;
+ sc_fn_ptr = s_fn; // { dg-error "incompatible pointer type" }
+ sc_fn_ptr = ns_fn; // { dg-error "incompatible pointer type" }
+
+ s_fn_ptr = sc_fn; // { dg-error "incompatible pointer type" }
+ s_fn_ptr = s_fn;
+ s_fn_ptr = ns_fn; // { dg-error "incompatible pointer type" }
+
+ ns_fn_ptr = sc_fn; // { dg-error "incompatible pointer type" }
+ ns_fn_ptr = s_fn; // { dg-error "incompatible pointer type" }
+ ns_fn_ptr = ns_fn;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_3.c b/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_3.c
new file mode 100644
index 0000000..45ec923
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_3.c
@@ -0,0 +1,63 @@
+// { dg-options "" }
+
+#pragma GCC target "+nosme"
+
+void sc_a () [[arm::streaming_compatible]] {}
+void s_a () [[arm::streaming]] {} // { dg-error "streaming functions require the ISA extension 'sme'" }
+void ns_a () {}
+
+void sc_b () [[arm::streaming_compatible]] {}
+void ns_b () {}
+void s_b () [[arm::streaming]] {} // { dg-error "streaming functions require the ISA extension 'sme'" }
+
+void sc_c () [[arm::streaming_compatible]] {}
+void sc_d () [[arm::streaming_compatible]] {}
+
+void s_c () [[arm::streaming]] {} // { dg-error "streaming functions require the ISA extension 'sme'" }
+void s_d () [[arm::streaming]] {} // { dg-error "streaming functions require the ISA extension 'sme'" }
+
+void ns_c () {}
+void ns_d () {}
+
+void sc_e () [[arm::streaming_compatible]];
+void s_e () [[arm::streaming]];
+void ns_e ();
+
+#pragma GCC target "+sme"
+
+void sc_f () [[arm::streaming_compatible]] {}
+void s_f () [[arm::streaming]] {}
+void ns_f () {}
+
+void sc_g () [[arm::streaming_compatible]] {}
+void ns_g () {}
+void s_g () [[arm::streaming]] {}
+
+void sc_h () [[arm::streaming_compatible]] {}
+void sc_i () [[arm::streaming_compatible]] {}
+
+void s_h () [[arm::streaming]] {}
+void s_i () [[arm::streaming]] {}
+
+void ns_h () {}
+void ns_i () {}
+
+void sc_j () [[arm::streaming_compatible]];
+void s_j () [[arm::streaming]];
+void ns_j ();
+
+#pragma GCC target "+sme"
+
+void sc_k () [[arm::streaming_compatible]] {}
+
+#pragma GCC target "+nosme"
+#pragma GCC target "+sme"
+
+void s_k () [[arm::streaming]] {}
+
+#pragma GCC target "+nosme"
+#pragma GCC target "+sme"
+
+void ns_k () {}
+
+#pragma GCC target "+nosme"
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_4.c b/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_4.c
new file mode 100644
index 0000000..50e92f2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/streaming_mode_4.c
@@ -0,0 +1,22 @@
+// { dg-options "-mgeneral-regs-only" }
+
+void sc_a () [[arm::streaming_compatible]] {}
+void s_a () [[arm::streaming]] {} // { dg-error "streaming functions require the ISA extension 'sme'" }
+void ns_a () {}
+
+void sc_b () [[arm::streaming_compatible]] {}
+void ns_b () {}
+void s_b () [[arm::streaming]] {} // { dg-error "streaming functions require the ISA extension 'sme'" }
+
+void sc_c () [[arm::streaming_compatible]] {}
+void sc_d () [[arm::streaming_compatible]] {}
+
+void s_c () [[arm::streaming]] {} // { dg-error "streaming functions require the ISA extension 'sme'" }
+void s_d () [[arm::streaming]] {} // { dg-error "streaming functions require the ISA extension 'sme'" }
+
+void ns_c () {}
+void ns_d () {}
+
+void sc_e () [[arm::streaming_compatible]];
+void s_e () [[arm::streaming]];
+void ns_e ();
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/za_state_1.c b/gcc/testsuite/gcc.target/aarch64/sme/za_state_1.c
new file mode 100644
index 0000000..856880e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/za_state_1.c
@@ -0,0 +1,154 @@
+// { dg-options "" }
+
+void shared_a () [[arm::inout("za")]];
+void shared_a (); // { dg-error "conflicting types" }
+
+void shared_b ();
+void shared_b () [[arm::inout("za")]]; // { dg-error "conflicting types" }
+
+void shared_c () [[arm::inout("za")]];
+void shared_c () {} // Inherits attribute from declaration (confusingly).
+
+void shared_d ();
+void shared_d () [[arm::inout("za")]] {} // { dg-error "conflicting types" }
+
+void shared_e () [[arm::inout("za")]] {}
+void shared_e (); // { dg-error "conflicting types" }
+
+void shared_f () {}
+void shared_f () [[arm::inout("za")]]; // { dg-error "conflicting types" }
+
+extern void (*shared_g) ();
+extern void (*shared_g) () [[arm::inout("za")]]; // { dg-error "conflicting types" }
+
+extern void (*shared_h) () [[arm::inout("za")]];
+extern void (*shared_h) (); // { dg-error "conflicting types" }
+
+//----------------------------------------------------------------------------
+
+void preserved_a () [[arm::preserves("za")]];
+void preserved_a (); // { dg-error "conflicting types" }
+
+void preserved_b ();
+void preserved_b () [[arm::preserves("za")]]; // { dg-error "conflicting types" }
+
+void preserved_c () [[arm::preserves("za")]];
+void preserved_c () {} // Inherits attribute from declaration (confusingly).
+
+void preserved_d ();
+void preserved_d () [[arm::preserves("za")]] {} // { dg-error "conflicting types" }
+
+void preserved_e () [[arm::preserves("za")]] {}
+void preserved_e (); // { dg-error "conflicting types" }
+
+void preserved_f () {}
+void preserved_f () [[arm::preserves("za")]]; // { dg-error "conflicting types" }
+
+extern void (*preserved_g) ();
+extern void (*preserved_g) () [[arm::preserves("za")]]; // { dg-error "conflicting types" }
+
+extern void (*preserved_h) () [[arm::preserves("za")]];
+extern void (*preserved_h) (); // { dg-error "conflicting types" }
+
+//----------------------------------------------------------------------------
+
+void replicated_1 () [[arm::in("za", "za"), arm::in("za")]];
+void replicated_2 () [[arm::out("za", "za"), arm::out("za")]];
+void replicated_3 () [[arm::inout("za", "za"), arm::inout("za")]];
+void replicated_4 () [[arm::preserves("za", "za"), arm::preserves("za")]];
+
+//----------------------------------------------------------------------------
+
+void invalid_1 () [[arm::in]]; // { dg-error "wrong number of arguments" }
+void invalid_2 () [[arm::in()]]; // { dg-error "parentheses must be omitted" }
+ // { dg-error "wrong number of arguments" "" { target *-*-* } .-1 }
+void invalid_3 () [[arm::in("")]]; // { dg-error "unrecognized state string ''" }
+void invalid_4 () [[arm::in("foo")]]; // { dg-error "unrecognized state string 'foo'" }
+void invalid_5 () [[arm::in(42)]]; // { dg-error "the arguments to 'in' must be constant strings" }
+void invalid_6 () [[arm::in(*(int *)0 ? "za" : "za")]]; // { dg-error "the arguments to 'in' must be constant strings" }
+
+//----------------------------------------------------------------------------
+
+void mixed_a () [[arm::preserves("za")]];
+void mixed_a () [[arm::inout("za")]]; // { dg-error "conflicting types" }
+
+void mixed_b () [[arm::inout("za")]];
+void mixed_b () [[arm::preserves("za")]]; // { dg-error "conflicting types" }
+
+void mixed_c () [[arm::preserves("za")]];
+void mixed_c () [[arm::in("za")]] {} // { dg-error "conflicting types" }
+
+void mixed_d () [[arm::inout("za")]];
+void mixed_d () [[arm::in("za")]] {} // { dg-error "conflicting types" }
+
+void mixed_e () [[arm::out("za")]] {}
+void mixed_e () [[arm::in("za")]]; // { dg-error "conflicting types" }
+
+void mixed_f () [[arm::inout("za")]] {}
+void mixed_f () [[arm::out("za")]]; // { dg-error "conflicting types" }
+
+extern void (*mixed_g) () [[arm::in("za")]];
+extern void (*mixed_g) () [[arm::preserves("za")]]; // { dg-error "conflicting types" }
+
+extern void (*mixed_h) () [[arm::preserves("za")]];
+extern void (*mixed_h) () [[arm::out("za")]]; // { dg-error "conflicting types" }
+
+//----------------------------------------------------------------------------
+
+void contradiction_1 () [[arm::preserves("za"), arm::inout("za")]]; // { dg-error "inconsistent attributes for state 'za'" }
+void contradiction_2 () [[arm::inout("za"), arm::preserves("za")]]; // { dg-error "inconsistent attributes for state 'za'" }
+
+int [[arm::inout("za")]] int_attr; // { dg-warning "only applies to function types" }
+void *[[arm::preserves("za")]] ptr_attr; // { dg-warning "only applies to function types" }
+
+typedef void preserved_callback () [[arm::preserves("za")]];
+typedef void shared_callback () [[arm::inout("za")]];
+
+void (*preserved_callback_ptr) () [[arm::preserves("za")]];
+void (*shared_callback_ptr) () [[arm::inout("za")]];
+
+typedef void contradiction_callback_1 () [[arm::preserves("za"), arm::inout("za")]]; // { dg-error "inconsistent attributes for state 'za'" }
+typedef void contradiction_callback_2 () [[arm::inout("za"), arm::preserves("za")]]; // { dg-error "inconsistent attributes for state 'za'" }
+
+void (*contradiction_callback_ptr_1) () [[arm::preserves("za"), arm::inout("za")]]; // { dg-error "inconsistent attributes for state 'za'" }
+void (*contradiction_callback_ptr_2) () [[arm::inout("za"), arm::preserves("za")]]; // { dg-error "inconsistent attributes for state 'za'" }
+
+struct s {
+ void (*contradiction_callback_ptr_1) () [[arm::preserves("za"), arm::inout("za")]]; // { dg-error "inconsistent attributes for state 'za'" }
+ void (*contradiction_callback_ptr_2) () [[arm::inout("za"), arm::preserves("za")]]; // { dg-error "inconsistent attributes for state 'za'" }
+};
+
+//----------------------------------------------------------------------------
+
+void keyword_ok_1 () __arm_inout("za");
+void keyword_ok_1 () __arm_inout("za");
+
+void keyword_ok_2 () __arm_in("za");
+void keyword_ok_2 () [[arm::in("za")]];
+
+void keyword_ok_3 () [[arm::out("za")]];
+void keyword_ok_3 () __arm_out("za");
+
+void keyword_ok_4 () __arm_inout("za") [[arm::inout("za")]];
+
+void keyword_ok_5 () __arm_preserves("za");
+void keyword_ok_5 () [[arm::preserves("za")]];
+
+__arm_new("za") void keyword_ok_6 () {}
+
+//----------------------------------------------------------------------------
+
+void keyword_conflict_1 () __arm_inout("za");
+void keyword_conflict_1 (); // { dg-error "conflicting types" }
+
+void keyword_conflict_2 ();
+void keyword_conflict_2 () __arm_inout("za"); // { dg-error "conflicting types" }
+
+void keyword_conflict_3 () __arm_inout("za");
+void keyword_conflict_3 () [[arm::preserves("za")]]; // { dg-error "conflicting types" }
+
+void keyword_conflict_4 () [[arm::preserves("za")]];
+void keyword_conflict_4 () __arm_inout("za"); // { dg-error "conflicting types" }
+
+__arm_new("za") void keyword_conflict_5 () __arm_inout("za") {} // { dg-error "cannot create a new 'za' scope since 'za' is shared with callers" }
+__arm_new("za") void keyword_conflict_6 () __arm_preserves("za") {} // { dg-error "cannot create a new 'za' scope since 'za' is shared with callers" }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/za_state_2.c b/gcc/testsuite/gcc.target/aarch64/sme/za_state_2.c
new file mode 100644
index 0000000..572ff30
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/za_state_2.c
@@ -0,0 +1,73 @@
+// { dg-options "" }
+
+[[arm::new("za")]] void new_za_a ();
+void new_za_a ();
+
+void new_za_b ();
+[[arm::new("za")]] void new_za_b ();
+
+[[arm::new("za")]] void new_za_c ();
+void new_za_c () {}
+
+void new_za_d ();
+[[arm::new("za")]] void new_za_d () {}
+
+[[arm::new("za")]] void new_za_e () {}
+void new_za_e ();
+
+void new_za_f () {}
+[[arm::new("za")]] void new_za_f (); // { dg-error "cannot apply attribute 'new' to 'new_za_f' after the function has been defined" }
+
+//----------------------------------------------------------------------------
+
+[[arm::new("za")]] void shared_a ();
+void shared_a () [[arm::inout("za")]]; // { dg-error "conflicting types" }
+
+void shared_b () [[arm::inout("za")]];
+[[arm::new("za")]] void shared_b (); // { dg-error "conflicting types" }
+
+[[arm::new("za")]] void shared_c ();
+void shared_c () [[arm::in("za")]] {} // { dg-error "conflicting types" }
+
+void shared_d () [[arm::in("za")]];
+[[arm::new("za")]] void shared_d () {} // { dg-error "cannot create a new 'za' scope since 'za' is shared with callers" }
+
+[[arm::new("za")]] void shared_e () {}
+void shared_e () [[arm::out("za")]]; // { dg-error "conflicting types" }
+
+void shared_f () [[arm::out("za")]] {}
+[[arm::new("za")]] void shared_f (); // { dg-error "conflicting types" }
+
+[[arm::new("za")]] void shared_g () {}
+void shared_g () [[arm::preserves("za")]]; // { dg-error "conflicting types" }
+
+void shared_h () [[arm::preserves("za")]] {}
+[[arm::new("za")]] void shared_h (); // { dg-error "conflicting types" }
+
+//----------------------------------------------------------------------------
+
+[[arm::new("za")]] void contradiction_1 () [[arm::inout("za")]]; // { dg-error "cannot create a new 'za' scope since 'za' is shared with callers" }
+void contradiction_2 [[arm::new("za")]] () [[arm::inout("za")]]; // { dg-error "cannot create a new 'za' scope since 'za' is shared with callers" }
+[[arm::new("za")]] void contradiction_3 () [[arm::preserves("za")]]; // { dg-error "cannot create a new 'za' scope since 'za' is shared with callers" }
+void contradiction_4 [[arm::new("za")]] () [[arm::preserves("za")]]; // { dg-error "cannot create a new 'za' scope since 'za' is shared with callers" }
+
+int [[arm::new("za")]] int_attr; // { dg-warning "does not apply to types" }
+[[arm::new("za")]] int int_var_attr; // { dg-error "applies only to function definitions" }
+typedef void new_za_callback () [[arm::new("za")]]; // { dg-warning "does not apply to types" }
+[[arm::new("za")]] void (*new_za_var_callback) (); // { dg-error "applies only to function definitions" }
+
+//----------------------------------------------------------------------------
+
+[[arm::new("za")]] void complementary_1 () [[arm::streaming]] {}
+void complementary_2 [[arm::new("za")]] () [[arm::streaming]] {}
+[[arm::new("za")]] void complementary_3 () [[arm::streaming_compatible]] {}
+void complementary_4 [[arm::new("za")]] () [[arm::streaming_compatible]] {}
+
+//----------------------------------------------------------------------------
+
+#pragma GCC target "+nosme"
+
+[[arm::new("za")]] void bereft_1 ();
+[[arm::new("za")]] void bereft_2 () {} // { dg-error "functions with SME state require the ISA extension 'sme'" }
+void bereft_3 () [[arm::inout("za")]];
+void bereft_4 () [[arm::inout("za")]] {} // { dg-error "functions with SME state require the ISA extension 'sme'" }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/za_state_3.c b/gcc/testsuite/gcc.target/aarch64/sme/za_state_3.c
new file mode 100644
index 0000000..203f6ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/za_state_3.c
@@ -0,0 +1,31 @@
+// { dg-options "" }
+
+void normal_callee ();
+void in_callee () [[arm::in("za")]];
+void out_callee () [[arm::out("za")]];
+void inout_callee () [[arm::inout("za")]];
+void preserves_callee () [[arm::preserves("za")]];
+
+struct callbacks {
+ void (*normal_ptr) ();
+ void (*in_ptr) () [[arm::in("za")]];
+ void (*out_ptr) () [[arm::out("za")]];
+ void (*inout_ptr) () [[arm::inout("za")]];
+ void (*preserves_ptr) () [[arm::preserves("za")]];
+};
+
+void
+normal_caller (struct callbacks *c)
+{
+ normal_callee ();
+ in_callee (); // { dg-error {call to a function that shares 'za' state from a function that has no 'za' state} }
+ out_callee (); // { dg-error {call to a function that shares 'za' state from a function that has no 'za' state} }
+ inout_callee (); // { dg-error {call to a function that shares 'za' state from a function that has no 'za' state} }
+ preserves_callee (); // { dg-error {call to a function that shares SME state from a function that has no SME state} }
+
+ c->normal_ptr ();
+ c->in_ptr (); // { dg-error {call to a function that shares 'za' state from a function that has no 'za' state} }
+ c->out_ptr (); // { dg-error {call to a function that shares 'za' state from a function that has no 'za' state} }
+ c->inout_ptr (); // { dg-error {call to a function that shares 'za' state from a function that has no 'za' state} }
+ c->preserves_ptr (); // { dg-error {call to a function that shares SME state from a function that has no SME state} }
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/za_state_4.c b/gcc/testsuite/gcc.target/aarch64/sme/za_state_4.c
new file mode 100644
index 0000000..cec0abf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/za_state_4.c
@@ -0,0 +1,585 @@
+// { dg-options "-O -fno-optimize-sibling-calls" }
+// { dg-final { check-function-bodies "**" "" } }
+
+void private_za();
+void out_za() __arm_out("za");
+void in_za() __arm_in("za");
+void inout_za() __arm_inout("za");
+void preserves_za() __arm_preserves("za");
+
+/*
+** test1:
+** ret
+*/
+__arm_new("za") void test1()
+{
+}
+
+/*
+** test2:
+** ldr w0, \[x0\]
+** ret
+*/
+__arm_new("za") int test2(int *ptr)
+{
+ return *ptr;
+}
+
+/*
+** test3:
+** stp [^\n]+
+** mov x29, sp
+** bl private_za
+** (
+** mov w0, 0
+** ldp [^\n]+
+** |
+** ldp [^\n]+
+** mov w0, 0
+** )
+** ret
+*/
+__arm_new("za") int test3()
+{
+ private_za();
+ return 0;
+}
+
+/*
+** test4:
+** ...
+** mrs x0, tpidr2_el0
+** cbz x0, [^\n]+
+** bl __arm_tpidr2_save
+** msr tpidr2_el0, xzr
+** zero { za }
+** smstart za
+** bl in_za
+** smstop za
+** ldp [^\n]+
+** ret
+*/
+__arm_new("za") void test4()
+{
+ in_za(); // Uses zeroed contents.
+}
+
+/*
+** test5:
+** ...
+** mrs x0, tpidr2_el0
+** cbz x0, [^\n]+
+** bl __arm_tpidr2_save
+** msr tpidr2_el0, xzr
+** smstop za
+** bl private_za
+** smstart za
+** bl out_za
+** bl in_za
+** smstop za
+** bl private_za
+** ldp [^\n]+
+** ret
+*/
+__arm_new("za") void test5()
+{
+ private_za();
+ out_za();
+ in_za();
+ private_za();
+}
+
+// Despite the long test, there shouldn't be too much scope for variation
+// here. The point is both to test correctness and code quality.
+/*
+** test6:
+** stp [^\n]+
+** mov x29, sp
+** mrs x0, tpidr2_el0
+** cbz x0, [^\n]+
+** bl __arm_tpidr2_save
+** msr tpidr2_el0, xzr
+** smstart za
+** bl out_za
+** rdsvl (x[0-9]+), #1
+** mul (x[0-9]+), \1, \1
+** sub sp, sp, \2
+** mov (x[0-9]+), sp
+** stp \3, \1, \[x29, #?16\]
+** add (x[0-9]+), x29, #?16
+** msr tpidr2_el0, \4
+** bl private_za
+** (
+** add (x[0-9]+), x29, #?16
+** mrs (x[0-9]+), tpidr2_el0
+** cbnz \6, [^\n]+
+** smstart za
+** mov x0, \5
+** |
+** add x0, x29, #?16
+** mrs (x[0-9]+), tpidr2_el0
+** cbnz \6, [^\n]+
+** smstart za
+** )
+** bl __arm_tpidr2_restore
+** msr tpidr2_el0, xzr
+** bl in_za
+** smstop za
+** mov sp, x29
+** ldp [^\n]+
+** ret
+*/
+__arm_new("za") void test6()
+{
+ out_za();
+ private_za();
+ in_za();
+}
+
+// Rely on previous tests for the part leading up to the smstart.
+/*
+** test7:
+** ...
+** smstart za
+** bl out_za
+** bl in_za
+** smstop za
+** bl private_za
+** smstart za
+** bl out_za
+** bl in_za
+** smstop za
+** ldp [^\n]+
+** ret
+*/
+__arm_new("za") void test7()
+{
+ out_za();
+ in_za();
+ private_za();
+ out_za();
+ in_za();
+}
+
+/*
+** test8:
+** ...
+** smstart za
+** bl out_za
+** bl in_za
+** smstop za
+** bl private_za
+** smstart za
+** bl out_za
+** bl in_za
+** smstop za
+** bl private_za
+** ldp [^\n]+
+** ret
+*/
+__arm_new("za") void test8()
+{
+ out_za();
+ in_za();
+ private_za();
+ out_za();
+ in_za();
+ private_za();
+}
+
+/*
+** test9:
+** ...
+** msr tpidr2_el0, x[0-9]+
+** bl private_za
+** bl private_za
+** bl private_za
+** bl private_za
+** add x[0-9]+, x29, #?16
+** mrs x[0-9]+, tpidr2_el0
+** ...
+*/
+__arm_new("za") void test9()
+{
+ out_za();
+ private_za();
+ private_za();
+ private_za();
+ private_za();
+ in_za();
+}
+
+/*
+** test10:
+** ldr (w[0-9]+), \[x0\]
+** cbz \1, [^\n]+
+** ldr [^\n]+
+** add [^\n]+
+** str [^\n]+
+** ret
+** ...
+*/
+__arm_new("za") void test10(volatile int *ptr)
+{
+ if (__builtin_expect (*ptr != 0, 1))
+ *ptr = *ptr + 1;
+ else
+ inout_za();
+}
+
+/*
+** test11:
+** ...
+** ldr w[0-9]+, [^\n]+
+** add (w[0-9]+), [^\n]+
+** str \1, [^\n]+
+** ...
+** ret
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstart za
+** bl inout_za
+** ldr (w[0-9]+), [^\n]+
+** cbnz \2, [^\n]+
+** smstop za
+** ...
+*/
+__arm_new("za") void test11(volatile int *ptr)
+{
+ if (__builtin_expect (*ptr == 0, 0))
+ do
+ inout_za();
+ while (*ptr);
+ else
+ *ptr += 1;
+}
+
+__arm_new("za") void test12(volatile int *ptr)
+{
+ do
+ {
+ inout_za();
+ private_za();
+ }
+ while (*ptr);
+ out_za();
+ in_za();
+}
+
+/*
+** test13:
+** stp [^\n]+
+** ...
+** stp [^\n]+
+** ...
+** bl __arm_tpidr2_save
+** ...
+** msr tpidr2_el0, x[0-9]+
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** bl inout_za
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** cbnz [^\n]+
+** smstart za
+** msr tpidr2_el0, xzr
+** bl out_za
+** bl in_za
+** ...
+** smstop za
+** ...
+*/
+__arm_new("za") void test13(volatile int *ptr)
+{
+ do
+ {
+ private_za();
+ inout_za();
+ private_za();
+ }
+ while (*ptr);
+ out_za();
+ in_za();
+}
+
+/*
+** test14:
+** ...
+** bl __arm_tpidr2_save
+** ...
+** smstart za
+** bl inout_za
+** ldr [^\n]+
+** cbnz [^\n]+
+** bl out_za
+** bl in_za
+** smstop za
+** ...
+*/
+__arm_new("za") void test14(volatile int *ptr)
+{
+ do
+ inout_za();
+ while (*ptr);
+ out_za();
+ in_za();
+}
+
+/*
+** test15:
+** ...
+** bl __arm_tpidr2_save
+** ...
+** smstart za
+** bl out_za
+** bl in_za
+** ldr [^\n]+
+** cbnz [^\n]+
+** smstop za
+** bl private_za
+** ldr [^\n]+
+** ldp [^\n]+
+** ret
+*/
+__arm_new("za") void test15(volatile int *ptr)
+{
+ do
+ {
+ out_za();
+ in_za();
+ }
+ while (*ptr);
+ private_za();
+}
+
+/*
+** test16:
+** ...
+** bl __arm_tpidr2_save
+** ...
+** smstart za
+** b [^\n]+
+-- loop:
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** msr tpidr2_el0, xzr
+-- loop_entry:
+** bl inout_za
+** ...
+** msr tpidr2_el0, x[0-9]+
+** bl private_za
+** ldr [^\n]+
+** cbnz [^\n]+
+** msr tpidr2_el0, xzr
+** smstop za
+** bl private_za
+** ...
+*/
+__arm_new("za") void test16(volatile int *ptr)
+{
+ do
+ {
+ inout_za();
+ private_za();
+ }
+ while (*ptr);
+ private_za();
+}
+
+/*
+** test17:
+** ...
+** bl private_za
+** ldr [^\n]+
+** cbnz [^\n]+
+** ...
+** msr tpidr2_el0, xzr
+** ...
+** smstop za
+** ...
+*/
+__arm_new("za") void test17(volatile int *ptr)
+{
+ do
+ {
+ inout_za();
+ private_za();
+ }
+ while (*ptr);
+}
+
+/*
+** test18:
+** ldr w[0-9]+, [^\n]+
+** cbnz w[0-9]+, [^\n]+
+** ret
+** ...
+** smstop za
+** bl private_za
+** ...
+*/
+__arm_new("za") void test18(volatile int *ptr)
+{
+ if (__builtin_expect (*ptr, 0))
+ {
+ out_za();
+ in_za();
+ private_za();
+ }
+}
+
+/*
+** test19:
+** ...
+** ldr w[0-9]+, [^\n]+
+** cbz w[0-9]+, [^\n]+
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstop za
+** bl private_za
+** ...
+*/
+__arm_new("za") void test19(volatile int *ptr)
+{
+ if (__builtin_expect (*ptr != 0, 1))
+ private_za();
+ else
+ do
+ {
+ inout_za();
+ private_za();
+ }
+ while (*ptr);
+}
+
+/*
+** test20:
+** ...
+** bl a20
+** (?:(?!x0).)*
+** bl b20
+** ...
+** mov ([wx][0-9]+), [wx]0
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** mov [wx]0, \1
+** ...
+** bl c20
+** ...
+*/
+__arm_new("za") void test20()
+{
+ extern int a20() __arm_inout("za");
+ extern int b20(int);
+ extern void c20(int) __arm_inout("za");
+ c20(b20(a20()));
+}
+
+/*
+** test21:
+** ...
+** bl a21
+** (?:(?!x0).)*
+** bl b21
+** ...
+** mov (x[0-9]+), x0
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** mov x0, \1
+** ...
+** bl c21
+** ...
+*/
+__arm_new("za") void test21()
+{
+ extern __UINT64_TYPE__ a21() __arm_inout("za");
+ extern __UINT64_TYPE__ b21(__UINT64_TYPE__);
+ extern void c21(__UINT64_TYPE__) __arm_inout("za");
+ c21(b21(a21()));
+}
+
+/*
+** test22:
+** (?:(?!rdsvl).)*
+** rdsvl x[0-9]+, #1
+** (?:(?!rdsvl).)*
+*/
+__arm_new("za") void test22(volatile int *ptr)
+{
+ inout_za();
+ if (*ptr)
+ *ptr += 1;
+ else
+ private_za();
+ private_za();
+ in_za();
+}
+
+/*
+** test23:
+** (?:(?!__arm_tpidr2_save).)*
+** bl __arm_tpidr2_save
+** (?:(?!__arm_tpidr2_save).)*
+*/
+__arm_new("za") void test23(volatile int *ptr)
+{
+ if (*ptr)
+ *ptr += 1;
+ else
+ inout_za();
+ inout_za();
+}
+
+/*
+** test24:
+** ...
+** bl in_za
+** ...
+** incb x1
+** ...
+** bl out_za
+** bl inout_za
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** incb x1
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** incb x1
+** ...
+** smstop za
+** ...
+** bl private_za
+** ...
+** ret
+*/
+__arm_new("za") void test24()
+{
+ in_za();
+ asm ("incb\tx1" ::: "x1", "za");
+ out_za();
+ inout_za();
+ private_za();
+ asm ("incb\tx1" ::: "x1", "za");
+ private_za();
+ asm ("incb\tx1" ::: "x1", "za");
+ in_za();
+ private_za();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/za_state_5.c b/gcc/testsuite/gcc.target/aarch64/sme/za_state_5.c
new file mode 100644
index 0000000..d54840d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/za_state_5.c
@@ -0,0 +1,595 @@
+// { dg-options "-O2 -fno-optimize-sibling-calls" }
+// { dg-final { check-function-bodies "**" "" } }
+
+void private_za();
+void out_za() __arm_out("za");
+void in_za() __arm_in("za");
+void inout_za() __arm_inout("za");
+void preserves_za() __arm_preserves("za");
+
+/*
+** test1:
+** ret
+*/
+void test1() __arm_inout("za")
+{
+}
+
+/*
+** test2:
+** ldr w0, \[x0\]
+** ret
+*/
+int test2(int *ptr) __arm_inout("za")
+{
+ return *ptr;
+}
+
+/*
+** test3:
+** ...
+** sub sp, sp, x[0-9]+
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstart za
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** msr tpidr2_el0, xzr
+** ...
+*/
+int test3() __arm_inout("za")
+{
+ private_za();
+ return 0;
+}
+
+/*
+** test4:
+** stp [^\n]+
+** [^\n]+
+** bl in_za
+** ldp [^\n]+
+** ret
+*/
+void test4() __arm_inout("za")
+{
+ in_za();
+}
+
+/*
+** test5:
+** ...
+** smstop za
+** ...
+** bl private_za
+** smstart za
+** bl out_za
+** bl in_za
+** ...
+** sub sp, sp, x[0-9]+
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstart za
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** msr tpidr2_el0, xzr
+** ...
+*/
+void test5() __arm_inout("za")
+{
+ private_za();
+ out_za();
+ in_za();
+ private_za();
+}
+
+/*
+** test6:
+** ...
+** bl out_za
+** ...
+** sub sp, sp, x[0-9]+
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstart za
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** msr tpidr2_el0, xzr
+** ...
+** bl in_za
+** ...
+*/
+void test6() __arm_inout("za")
+{
+ out_za();
+ private_za();
+ in_za();
+}
+
+/*
+** test7:
+** stp [^\n]+
+** [^\n]+
+** bl out_za
+** bl in_za
+** smstop za
+** bl private_za
+** smstart za
+** bl out_za
+** bl in_za
+** ldp [^\n]+
+** ret
+*/
+void test7() __arm_inout("za")
+{
+ out_za();
+ in_za();
+ private_za();
+ out_za();
+ in_za();
+}
+
+/*
+** test8:
+** stp [^\n]+
+** [^\n]+
+** bl out_za
+** bl in_za
+** smstop za
+** bl private_za
+** smstart za
+** bl out_za
+** bl in_za
+** ...
+** sub sp, sp, x[0-9]+
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstart za
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** msr tpidr2_el0, xzr
+** ...
+** ret
+*/
+void test8() __arm_inout("za")
+{
+ out_za();
+ in_za();
+ private_za();
+ out_za();
+ in_za();
+ private_za();
+}
+
+/*
+** test9:
+** stp [^\n]+
+** [^\n]+
+** bl out_za
+** ...
+** msr tpidr2_el0, x[0-9]+
+** bl private_za
+** bl private_za
+** bl private_za
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstart za
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** msr tpidr2_el0, xzr
+** ...
+*/
+void test9() __arm_inout("za")
+{
+ out_za();
+ private_za();
+ private_za();
+ private_za();
+ private_za();
+ in_za();
+}
+
+/*
+** test10:
+** ldr (w[0-9]+), \[x0\]
+** cbz \1, [^\n]+
+** ldr [^\n]+
+** add [^\n]+
+** str [^\n]+
+** ret
+** ...
+*/
+void test10(volatile int *ptr) __arm_inout("za")
+{
+ if (__builtin_expect (*ptr != 0, 1))
+ *ptr = *ptr + 1;
+ else
+ inout_za();
+}
+
+/*
+** test11:
+** (?!.*(\t__arm|\tza|tpidr2_el0)).*
+*/
+void test11(volatile int *ptr) __arm_inout("za")
+{
+ if (__builtin_expect (*ptr == 0, 0))
+ do
+ inout_za();
+ while (*ptr);
+ else
+ *ptr += 1;
+}
+
+void test12(volatile int *ptr) __arm_inout("za")
+{
+ do
+ {
+ inout_za();
+ private_za();
+ }
+ while (*ptr);
+ out_za();
+ in_za();
+}
+
+/*
+** test13:
+** stp [^\n]+
+** ...
+** stp [^\n]+
+** ...
+-- loop:
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstart za
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** msr tpidr2_el0, xzr
+** bl inout_za
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ldr [^\n]+
+** cbnz [^\n]+
+** smstart za
+** msr tpidr2_el0, xzr
+** bl out_za
+** bl in_za
+** [^\n]+
+** [^\n]+
+** ldp [^\n]+
+** ret
+*/
+void test13(volatile int *ptr) __arm_inout("za")
+{
+ do
+ {
+ private_za();
+ inout_za();
+ private_za();
+ }
+ while (*ptr);
+ out_za();
+ in_za();
+}
+
+/*
+** test14:
+** ...
+** bl inout_za
+** ldr [^\n]+
+** cbnz [^\n]+
+** bl out_za
+** bl in_za
+** ...
+*/
+void test14(volatile int *ptr) __arm_inout("za")
+{
+ do
+ inout_za();
+ while (*ptr);
+ out_za();
+ in_za();
+}
+
+/*
+** test15:
+** ...
+** bl out_za
+** bl in_za
+** ldr [^\n]+
+** cbnz [^\n]+
+** ...
+** stp [^\n]+
+** ...
+** msr tpidr2_el0, [^\n]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** msr tpidr2_el0, xzr
+** ...
+*/
+void test15(volatile int *ptr) __arm_inout("za")
+{
+ do
+ {
+ out_za();
+ in_za();
+ }
+ while (*ptr);
+ private_za();
+}
+
+/*
+** test16:
+** stp [^\n]+
+** ...
+** stp [^\n]+
+** ...
+** b [^\n]+
+-- loop:
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** msr tpidr2_el0, xzr
+-- loop_entry:
+** bl inout_za
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** msr tpidr2_el0, xzr
+** ...
+*/
+void test16(volatile int *ptr) __arm_inout("za")
+{
+ do
+ {
+ inout_za();
+ private_za();
+ }
+ while (*ptr);
+ private_za();
+}
+
+/*
+** test17:
+** ...
+-- loop:
+** bl inout_za
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstart za
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** msr tpidr2_el0, xzr
+** ...
+** cbnz [^\n]+
+** [^\n]+
+** [^\n]+
+** ldp [^\n]+
+** ret
+*/
+void test17(volatile int *ptr) __arm_inout("za")
+{
+ do
+ {
+ inout_za();
+ private_za();
+ while (*ptr)
+ ptr += 1;
+ }
+ while (*ptr);
+}
+
+/*
+** test18:
+** ldr w[0-9]+, [^\n]+
+** cbnz w[0-9]+, [^\n]+
+** ret
+** ...
+** bl out_za
+** bl in_za
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** msr tpidr2_el0, xzr
+** ...
+*/
+void test18(volatile int *ptr) __arm_inout("za")
+{
+ if (__builtin_expect (*ptr, 0))
+ {
+ out_za();
+ in_za();
+ private_za();
+ }
+}
+
+void test19(volatile int *ptr) __arm_inout("za")
+{
+ if (__builtin_expect (*ptr != 0, 1))
+ private_za();
+ else
+ do
+ {
+ inout_za();
+ private_za();
+ }
+ while (*ptr);
+}
+
+/*
+** test20:
+** ...
+** bl a20
+** (?:(?!x0).)*
+** bl b20
+** ...
+** mov ([wx][0-9]+), [wx]0
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** mov [wx]0, \1
+** ...
+** bl c20
+** ...
+*/
+void test20() __arm_inout("za")
+{
+ extern int a20() __arm_inout("za");
+ extern int b20(int);
+ extern void c20(int) __arm_inout("za");
+ c20(b20(a20()));
+}
+
+/*
+** test21:
+** ...
+** bl a21
+** (?:(?!x0).)*
+** bl b21
+** ...
+** mov (x[0-9]+), x0
+** ...
+** bl __arm_tpidr2_restore
+** ...
+** mov x0, \1
+** ...
+** bl c21
+** ...
+*/
+void test21() __arm_inout("za")
+{
+ extern __UINT64_TYPE__ a21() __arm_inout("za");
+ extern __UINT64_TYPE__ b21(__UINT64_TYPE__);
+ extern void c21(__UINT64_TYPE__) __arm_inout("za");
+ c21(b21(a21()));
+}
+
+/*
+** test22:
+** (?:(?!rdsvl).)*
+** rdsvl x[0-9]+, #1
+** (?:(?!rdsvl).)*
+*/
+void test22(volatile int *ptr) __arm_inout("za")
+{
+ inout_za();
+ if (*ptr)
+ *ptr += 1;
+ else
+ private_za();
+ private_za();
+ in_za();
+}
+
+void test23(volatile int *ptr) __arm_inout("za")
+{
+ if (*ptr)
+ *ptr += 1;
+ else
+ inout_za();
+ inout_za();
+}
+
+/*
+** test24:
+** ...
+** bl in_za
+** ...
+** incb x1
+** ...
+** bl out_za
+** bl inout_za
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** incb x1
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** incb x1
+** ...
+** msr tpidr2_el0, x[0-9]+
+** ...
+** bl private_za
+** ...
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** ret
+*/
+void test24() __arm_inout("za")
+{
+ in_za();
+ asm ("incb\tx1" ::: "x1", "za");
+ out_za();
+ inout_za();
+ private_za();
+ asm ("incb\tx1" ::: "x1", "za");
+ private_za();
+ asm ("incb\tx1" ::: "x1", "za");
+ in_za();
+ private_za();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/za_state_6.c b/gcc/testsuite/gcc.target/aarch64/sme/za_state_6.c
new file mode 100644
index 0000000..d5b226a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/za_state_6.c
@@ -0,0 +1,23 @@
+// { dg-options "-O -fno-optimize-sibling-calls -fomit-frame-pointer" }
+
+void private_za();
+void out_za() __arm_out("za");
+void in_za() __arm_in("za");
+
+__arm_new("za") void test20(volatile int *ptr)
+{
+ if (*ptr)
+ out_za();
+ else
+ *ptr += 1;
+ *ptr += 1;
+ if (*ptr)
+ in_za();
+ else
+ *ptr += 1;
+}
+
+// { dg-final { scan-assembler {\tbl\t__arm_tpidr2_save\n} } }
+// { dg-final { scan-assembler {\tsmstart\tza\n} } }
+// { dg-final { scan-assembler {\tsmstop\tza\n} } }
+// { dg-final { scan-assembler-not {\tsub\tsp, sp, x[0-9]+\n} } }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_1.c b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_1.c
new file mode 100644
index 0000000..05da587
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_1.c
@@ -0,0 +1,65 @@
+// { dg-options "" }
+
+#pragma GCC target "+sme2"
+
+void share_za_zt0_a() __arm_inout("za", "zt0");
+void share_za_zt0_b() __arm_inout("za", "zt0");
+
+void share_za_preserve_zt0() __arm_inout("za") __arm_preserves("zt0");
+void share_zt0_preserve_za() __arm_inout("zt0") __arm_preserves("za");
+
+__arm_new("za", "zt0") void new_za_zt0_a() {
+ share_za_zt0_a();
+ share_za_zt0_b();
+}
+
+__arm_new("zt0", "za") void new_za_zt0_b() {
+ share_za_zt0_a();
+ share_za_zt0_b();
+}
+
+__arm_new("zt0") void new_za_zt0_c();
+__arm_new("za") void new_za_zt0_c() {
+ share_za_zt0_a();
+ share_za_zt0_b();
+}
+
+__arm_new("za") void new_za_zt0_d();
+__arm_new("zt0") void new_za_zt0_d() {
+ share_za_zt0_a();
+ share_za_zt0_b();
+}
+
+__arm_new("zt0", "za") void new_za_zt0_e();
+void new_za_zt0_e() {
+ share_za_zt0_a();
+ share_za_zt0_b();
+}
+
+__arm_new("zt0") void new_zt0_a() {
+ share_za_zt0_a(); // { dg-error {call to a function that shares 'za' state from a function that has no 'za' state} }
+}
+
+__arm_new("zt0") void new_zt0_b();
+void new_zt0_b() {
+ share_za_preserve_zt0(); // { dg-error {call to a function that shares 'za' state from a function that has no 'za' state} }
+}
+
+__arm_new("zt0") void new_zt0_c();
+void new_zt0_c() {
+ share_zt0_preserve_za();
+}
+
+__arm_new("za") void new_za_a() {
+ share_za_zt0_a(); // { dg-error {call to a function that shares 'zt0' state from a function that has no 'zt0' state} }
+}
+
+__arm_new("za") void new_za_b();
+void new_za_b() {
+ share_za_preserve_zt0();
+}
+
+__arm_new("za") void new_za_c();
+void new_za_c() {
+ share_zt0_preserve_za(); // { dg-error {call to a function that shares 'zt0' state from a function that has no 'zt0' state} }
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_2.c b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_2.c
new file mode 100644
index 0000000..17cd844
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_2.c
@@ -0,0 +1,31 @@
+// { dg-options "" }
+
+void invalid_a() __arm_inout("za");
+void invalid_a() __arm_inout("za", "zt0"); // { dg-error {conflicting types} }
+
+void invalid_b() __arm_inout("za", "zt0");
+void invalid_b() __arm_inout("zt0"); // { dg-error {conflicting types} }
+
+void invalid_c() __arm_in("zt0") __arm_inout("za");
+void invalid_c() __arm_inout("zt0", "za"); // { dg-error {conflicting types} }
+
+void invalid_d() __arm_inout("zt0");
+void invalid_d() __arm_out("zt0"); // { dg-error {conflicting types} }
+
+void invalid_e() __arm_in("zt0");
+void invalid_e() __arm_out("zt0"); // { dg-error {conflicting types} }
+
+void invalid_f() __arm_in("zt0");
+void invalid_f() __arm_preserves("zt0"); // { dg-error {conflicting types} }
+
+void valid_a() __arm_inout("zt0") __arm_inout("za");
+void valid_a() __arm_inout("zt0", "za");
+
+void valid_b() __arm_inout("za") __arm_inout("zt0");
+void valid_b() __arm_inout("zt0") __arm_inout("za");
+
+void valid_c() __arm_inout("za", "zt0");
+void valid_c() __arm_inout("zt0", "za");
+
+void valid_d() __arm_inout("zt0", "za");
+void valid_d() __arm_inout("za", "zt0");
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_3.c b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_3.c
new file mode 100644
index 0000000..2489ea2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_3.c
@@ -0,0 +1,6 @@
+// { dg-options "" }
+
+#pragma GCC target "+sme2"
+
+void foo() __arm_inout("zt0");
+void bar() __arm_inout("za", "zt0") { foo(); } // { dg-message {call to a function that shares state other than 'za' from a function that has 'za' state} }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_4.c b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_4.c
new file mode 100644
index 0000000..2999900
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_4.c
@@ -0,0 +1,53 @@
+// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls" }
+// { dg-final { check-function-bodies "**" "" } }
+
+#pragma GCC target "+sme2"
+
+void inout_za() __arm_inout("za");
+void inout_za_zt0() __arm_inout("za", "zt0");
+
+void inout_za_out_zt0() __arm_inout("za") __arm_out("zt0");
+void inout_za_in_zt0() __arm_inout("za") __arm_in("zt0");
+
+/*
+** test1:
+** str x30, \[sp, #?-16\]!
+** bl inout_za_zt0
+** ldr x30, \[sp\], #?16
+** ret
+*/
+void test1() __arm_inout("za", "zt0")
+{
+ inout_za_zt0();
+}
+
+/*
+** test2:
+** ...
+** str zt0, \[(?:x[0-9]+|sp)\]
+** ...
+** bl inout_za
+** ...
+** ldr zt0, \[(?:x[0-9]+|sp)\]
+** ...
+** ret
+*/
+void test2() __arm_inout("za", "zt0")
+{
+ inout_za();
+}
+
+/*
+** test3:
+** ...
+** bl inout_za
+** bl inout_za_out_zt0
+** [^\n]+
+** ret
+*/
+void test3() __arm_inout("za", "zt0")
+{
+ inout_za_in_zt0();
+ inout_za();
+ inout_za_out_zt0();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_5.c b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_5.c
new file mode 100644
index 0000000..e18b395
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_5.c
@@ -0,0 +1,260 @@
+// { dg-options "-O -fno-optimize-sibling-calls" }
+// { dg-final { check-function-bodies "**" "" } }
+
+#pragma GCC target "+sme2"
+
+void private_zt0();
+void out_zt0() __arm_out("zt0");
+void in_zt0() __arm_in("zt0");
+void inout_zt0() __arm_inout("zt0");
+void preserves_zt0() __arm_preserves("zt0");
+
+/*
+** test1:
+** ret
+*/
+__arm_new("zt0") void test1()
+{
+}
+
+/*
+** test2:
+** ldr w0, \[x0\]
+** ret
+*/
+__arm_new("zt0") int test2(int *ptr)
+{
+ return *ptr;
+}
+
+/*
+** test3:
+** stp [^\n]+
+** mov x29, sp
+** bl private_zt0
+** (
+** mov w0, 0
+** ldp [^\n]+
+** |
+** ldp [^\n]+
+** mov w0, 0
+** )
+** ret
+*/
+__arm_new("zt0") int test3()
+{
+ private_zt0();
+ return 0;
+}
+
+/*
+** test4:
+** ...
+** mrs x0, tpidr2_el0
+** cbz x0, [^\n]+
+** bl __arm_tpidr2_save
+** msr tpidr2_el0, xzr
+** smstart za
+** bl in_zt0
+** smstop za
+** ldp [^\n]+
+** ret
+*/
+__arm_new("zt0") void test4()
+{
+ in_zt0(); // Uses zeroed contents.
+}
+
+/*
+** test5:
+** ...
+** mrs x0, tpidr2_el0
+** cbz x0, [^\n]+
+** bl __arm_tpidr2_save
+** msr tpidr2_el0, xzr
+** smstop za
+** bl private_zt0
+** smstart za
+** bl out_zt0
+** bl in_zt0
+** ...
+** smstop za
+** bl private_zt0
+** ldp [^\n]+
+** ret
+*/
+__arm_new("zt0") void test5()
+{
+ private_zt0();
+ out_zt0();
+ in_zt0();
+ private_zt0();
+}
+
+// Despite the long test, there shouldn't be too much scope for variation
+// here. The point is both to test correctness and code quality.
+/*
+** test6:
+** stp [^\n]+
+** mov x29, sp
+** mrs x0, tpidr2_el0
+** cbz x0, [^\n]+
+** bl __arm_tpidr2_save
+** msr tpidr2_el0, xzr
+** smstart za
+** bl out_zt0
+** ...
+** str zt0, [^\n]+
+** smstop za
+** bl private_zt0
+** smstart za
+** ...
+** ldr zt0, [^\n]+
+** bl in_zt0
+** smstop za
+** ldp [^\n]+
+** ret
+*/
+__arm_new("zt0") void test6()
+{
+ out_zt0();
+ private_zt0();
+ in_zt0();
+}
+
+// Rely on previous tests for the part leading up to the smstart.
+/*
+** test7:
+** ...
+** smstart za
+** bl out_zt0
+** bl in_zt0
+** ...
+** smstop za
+** bl private_zt0
+** smstart za
+** bl out_zt0
+** bl in_zt0
+** smstop za
+** ldp [^\n]+
+** ret
+*/
+__arm_new("zt0") void test7()
+{
+ out_zt0();
+ in_zt0();
+ private_zt0();
+ out_zt0();
+ in_zt0();
+}
+
+/*
+** test8:
+** ...
+** smstart za
+** bl out_zt0
+** bl in_zt0
+** ...
+** smstop za
+** bl private_zt0
+** smstart za
+** bl out_zt0
+** bl in_zt0
+** ...
+** smstop za
+** bl private_zt0
+** ldp [^\n]+
+** ret
+*/
+__arm_new("zt0") void test8()
+{
+ out_zt0();
+ in_zt0();
+ private_zt0();
+ out_zt0();
+ in_zt0();
+ private_zt0();
+}
+
+/*
+** test9:
+** ...
+** str zt0, [^\n]+
+** smstop za
+** bl private_zt0
+** bl private_zt0
+** bl private_zt0
+** bl private_zt0
+** smstart za
+** ...
+** ldr zt0, [^\n]+
+** bl in_zt0
+** smstop za
+** ...
+*/
+__arm_new("zt0") void test9()
+{
+ out_zt0();
+ private_zt0();
+ private_zt0();
+ private_zt0();
+ private_zt0();
+ in_zt0();
+}
+
+/*
+** test10:
+** ldr (w[0-9]+), \[x0\]
+** cbz \1, [^\n]+
+** ldr [^\n]+
+** add [^\n]+
+** str [^\n]+
+** ret
+** ...
+*/
+__arm_new("zt0") void test10(volatile int *ptr)
+{
+ if (__builtin_expect (*ptr != 0, 1))
+ *ptr = *ptr + 1;
+ else
+ inout_zt0();
+}
+
+/*
+** test11:
+** ...
+** ldr w[0-9]+, [^\n]+
+** add (w[0-9]+), [^\n]+
+** str \1, [^\n]+
+** ...
+** ret
+** mrs x[0-9]+, tpidr2_el0
+** ...
+** smstart za
+** bl inout_zt0
+** ldr (w[0-9]+), [^\n]+
+** cbnz \2, [^\n]+
+** smstop za
+** ...
+*/
+__arm_new("zt0") void test11(volatile int *ptr)
+{
+ if (__builtin_expect (*ptr == 0, 0))
+ do
+ inout_zt0();
+ while (*ptr);
+ else
+ *ptr += 1;
+}
+
+__arm_new("zt0") void test12(volatile int *ptr)
+{
+ do
+ {
+ inout_zt0();
+ private_zt0();
+ }
+ while (*ptr);
+ out_zt0();
+ in_zt0();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_6.c b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_6.c
new file mode 100644
index 0000000..c62a804
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme/zt0_state_6.c
@@ -0,0 +1,54 @@
+// { dg-options "-O -fomit-frame-pointer -fno-optimize-sibling-calls" }
+// { dg-final { check-function-bodies "**" "" } }
+
+#pragma GCC target "+sme2"
+
+void inout_zt0() __arm_inout("zt0");
+void out_zt0() __arm_out("zt0");
+void normal();
+
+/*
+** test1:
+** str x30, \[sp, #?-16\]!
+** bl inout_zt0
+** ldr x30, \[sp\], #?16
+** ret
+*/
+void test1() __arm_inout("zt0")
+{
+ inout_zt0();
+}
+
+/*
+** test2:
+** str x30, \[sp, #?-80\]!
+** add (x[0-9]+), sp, #?16
+** str zt0, \[\1\]
+** smstop za
+** bl normal
+** smstart za
+** add (x[0-9]+), sp, #?16
+** ldr zt0, \[\1\]
+** ldr x30, \[sp\], #?80
+** ret
+*/
+void test2() __arm_inout("zt0")
+{
+ normal();
+}
+
+/*
+** test3:
+** ...
+** smstop za
+** bl normal
+** smstart za
+** bl out_zt0
+** ldr [^\n]+
+** ret
+*/
+void test3() __arm_inout("zt0")
+{
+ normal();
+ out_zt0();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/aarch64-sme2-acle-asm.exp b/gcc/testsuite/gcc.target/aarch64/sme2/aarch64-sme2-acle-asm.exp
new file mode 100644
index 0000000..5b8cfe4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/aarch64-sme2-acle-asm.exp
@@ -0,0 +1,81 @@
+# Assembly-based regression-test driver for the SME2 ACLE.
+# Copyright (C) 2009-2023 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>. */
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't an AArch64 target.
+if {![istarget aarch64*-*-*] } {
+ return
+}
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# Initialize `dg'.
+dg-init
+
+# Force SME2 if we're not testing it already.
+if { [check_effective_target_aarch64_sme2] } {
+ set sme2_flags ""
+} else {
+ set sme2_flags "-march=armv9-a+sme2"
+}
+
+# Turn off any codegen tweaks by default that may affect expected assembly.
+# Tests relying on those should turn them on explicitly.
+set sme2_flags "$sme2_flags -mtune=generic -moverride=tune=none"
+
+global gcc_runtest_parallelize_limit_minor
+if { [info exists gcc_runtest_parallelize_limit_minor] } {
+ set old_limit_minor $gcc_runtest_parallelize_limit_minor
+ set gcc_runtest_parallelize_limit_minor 1
+}
+
+torture-init
+set-torture-options {
+ "-std=c90 -O0 -g"
+ "-std=c99 -Og -g"
+ "-std=c11 -Os -g"
+ "-std=c23 -O2 -fno-schedule-insns -fno-schedule-insns2 -DCHECK_ASM --save-temps"
+ "-std=gnu90 -O3 -g"
+ "-std=gnu23 -Ofast -g"
+} {
+ "-DTEST_FULL"
+ "-DTEST_OVERLOADS"
+}
+
+# Main loop.
+set files [glob -nocomplain $srcdir/$subdir/acle-asm/*.c]
+set save-dg-do-what-default ${dg-do-what-default}
+if { [check_effective_target_aarch64_asm_sme2_ok] } {
+ set dg-do-what-default assemble
+} else {
+ set dg-do-what-default compile
+}
+gcc-dg-runtest [lsort $files] "" "$sme2_flags -fno-ipa-icf"
+set dg-do-what-default ${save-dg-do-what-default}
+
+torture-finish
+
+if { [info exists gcc_runtest_parallelize_limit_minor] } {
+ set gcc_runtest_parallelize_limit_minor $old_limit_minor
+}
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s16_x2.c
new file mode 100644
index 0000000..d193bfd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s16_x2.c
@@ -0,0 +1,115 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svint16x2_t, svint16_t, z24,
+ svadd_single_s16_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** add {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svint16x2_t, svint16_t, z24,
+ svadd_single_s16_x2 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** add {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svint16x2_t, svint16_t, z24,
+ svadd_single_s16_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svint16x2_t, svint16_t, z1,
+ svadd_single_s16_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svint16x2_t, svint16_t, z1,
+ svadd_single_s16_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** add {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svint16x2_t, svint16_t, z18,
+ svadd_single_s16_x2 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svint16x2_t, svint16_t,
+ z0_res = svadd_single_s16_x2 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svint16x2_t, svint16_t,
+ z0 = svadd_single_s16_x2 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svint16x2_t, svint16_t, z24,
+ svadd_single_s16_x2 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s16_x4.c
new file mode 100644
index 0000000..ae7fd4f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s16_x4.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svint16x4_t, svint16_t, z24,
+ svadd_single_s16_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** add {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svint16x4_t, svint16_t, z24,
+ svadd_single_s16_x4 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svint16x4_t, svint16_t, z24,
+ svadd_single_s16_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svint16x4_t, svint16_t, z1,
+ svadd_single_s16_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svint16x4_t, svint16_t, z1,
+ svadd_single_s16_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svint16x4_t, svint16_t, z18,
+ svadd_single_s16_x4 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svint16x4_t, svint16_t,
+ z0_res = svadd_single_s16_x4 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svint16x4_t, svint16_t,
+ z0 = svadd_single_s16_x4 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svint16x4_t, svint16_t, z24,
+ svadd_single_s16_x4 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s32_x2.c
new file mode 100644
index 0000000..86fa39c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s32_x2.c
@@ -0,0 +1,115 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svint32x2_t, svint32_t, z24,
+ svadd_single_s32_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** add {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svint32x2_t, svint32_t, z24,
+ svadd_single_s32_x2 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** add {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svint32x2_t, svint32_t, z24,
+ svadd_single_s32_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svint32x2_t, svint32_t, z1,
+ svadd_single_s32_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svint32x2_t, svint32_t, z1,
+ svadd_single_s32_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** add {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svint32x2_t, svint32_t, z18,
+ svadd_single_s32_x2 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svint32x2_t, svint32_t,
+ z0_res = svadd_single_s32_x2 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svint32x2_t, svint32_t,
+ z0 = svadd_single_s32_x2 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svint32x2_t, svint32_t, z24,
+ svadd_single_s32_x2 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s32_x4.c
new file mode 100644
index 0000000..75eadeb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s32_x4.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svint32x4_t, svint32_t, z24,
+ svadd_single_s32_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** add {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svint32x4_t, svint32_t, z24,
+ svadd_single_s32_x4 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svint32x4_t, svint32_t, z24,
+ svadd_single_s32_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svint32x4_t, svint32_t, z1,
+ svadd_single_s32_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svint32x4_t, svint32_t, z1,
+ svadd_single_s32_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svint32x4_t, svint32_t, z18,
+ svadd_single_s32_x4 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svint32x4_t, svint32_t,
+ z0_res = svadd_single_s32_x4 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svint32x4_t, svint32_t,
+ z0 = svadd_single_s32_x4 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svint32x4_t, svint32_t, z24,
+ svadd_single_s32_x4 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s64_x2.c
new file mode 100644
index 0000000..9d51064
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s64_x2.c
@@ -0,0 +1,115 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svint64x2_t, svint64_t, z24,
+ svadd_single_s64_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** add {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svint64x2_t, svint64_t, z24,
+ svadd_single_s64_x2 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** add {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svint64x2_t, svint64_t, z24,
+ svadd_single_s64_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svint64x2_t, svint64_t, z1,
+ svadd_single_s64_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svint64x2_t, svint64_t, z1,
+ svadd_single_s64_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** add {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svint64x2_t, svint64_t, z18,
+ svadd_single_s64_x2 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svint64x2_t, svint64_t,
+ z0_res = svadd_single_s64_x2 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svint64x2_t, svint64_t,
+ z0 = svadd_single_s64_x2 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svint64x2_t, svint64_t, z24,
+ svadd_single_s64_x2 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s64_x4.c
new file mode 100644
index 0000000..ac5e959
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s64_x4.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svint64x4_t, svint64_t, z24,
+ svadd_single_s64_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** add {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svint64x4_t, svint64_t, z24,
+ svadd_single_s64_x4 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svint64x4_t, svint64_t, z24,
+ svadd_single_s64_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svint64x4_t, svint64_t, z1,
+ svadd_single_s64_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svint64x4_t, svint64_t, z1,
+ svadd_single_s64_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svint64x4_t, svint64_t, z18,
+ svadd_single_s64_x4 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svint64x4_t, svint64_t,
+ z0_res = svadd_single_s64_x4 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svint64x4_t, svint64_t,
+ z0 = svadd_single_s64_x4 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svint64x4_t, svint64_t, z24,
+ svadd_single_s64_x4 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s8_x2.c
new file mode 100644
index 0000000..5ac04c0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s8_x2.c
@@ -0,0 +1,115 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svint8x2_t, svint8_t, z24,
+ svadd_single_s8_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** |
+** add {z28\.b - z29\.b}, {z28\.b - z29\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svint8x2_t, svint8_t, z24,
+ svadd_single_s8_x2 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** add {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svint8x2_t, svint8_t, z24,
+ svadd_single_s8_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svint8x2_t, svint8_t, z1,
+ svadd_single_s8_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svint8x2_t, svint8_t, z1,
+ svadd_single_s8_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** add {z18\.b - z19\.b}, {z18\.b - z19\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svint8x2_t, svint8_t, z18,
+ svadd_single_s8_x2 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svint8x2_t, svint8_t,
+ z0_res = svadd_single_s8_x2 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.b - z1\.b}, {z0\.b - z1\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svint8x2_t, svint8_t,
+ z0 = svadd_single_s8_x2 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.b - z25\.b}, {z24\.b - z25\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svint8x2_t, svint8_t, z24,
+ svadd_single_s8_x2 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s8_x4.c
new file mode 100644
index 0000000..df91a6e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_s8_x4.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svint8x4_t, svint8_t, z24,
+ svadd_single_s8_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** |
+** add {z28\.b - z31\.b}, {z28\.b - z31\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svint8x4_t, svint8_t, z24,
+ svadd_single_s8_x4 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svint8x4_t, svint8_t, z24,
+ svadd_single_s8_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svint8x4_t, svint8_t, z1,
+ svadd_single_s8_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svint8x4_t, svint8_t, z1,
+ svadd_single_s8_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svint8x4_t, svint8_t, z18,
+ svadd_single_s8_x4 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svint8x4_t, svint8_t,
+ z0_res = svadd_single_s8_x4 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.b - z3\.b}, {z0\.b - z3\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svint8x4_t, svint8_t,
+ z0 = svadd_single_s8_x4 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.b - z27\.b}, {z24\.b - z27\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svint8x4_t, svint8_t, z24,
+ svadd_single_s8_x4 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u16_x2.c
new file mode 100644
index 0000000..06866f6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u16_x2.c
@@ -0,0 +1,115 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svuint16x2_t, svuint16_t, z24,
+ svadd_single_u16_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** add {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svuint16x2_t, svuint16_t, z24,
+ svadd_single_u16_x2 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** add {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svuint16x2_t, svuint16_t, z24,
+ svadd_single_u16_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svuint16x2_t, svuint16_t, z1,
+ svadd_single_u16_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svuint16x2_t, svuint16_t, z1,
+ svadd_single_u16_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** add {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svuint16x2_t, svuint16_t, z18,
+ svadd_single_u16_x2 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svuint16x2_t, svuint16_t,
+ z0_res = svadd_single_u16_x2 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svuint16x2_t, svuint16_t,
+ z0 = svadd_single_u16_x2 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svuint16x2_t, svuint16_t, z24,
+ svadd_single_u16_x2 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u16_x4.c
new file mode 100644
index 0000000..a00959f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u16_x4.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svuint16x4_t, svuint16_t, z24,
+ svadd_single_u16_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** add {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svuint16x4_t, svuint16_t, z24,
+ svadd_single_u16_x4 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svuint16x4_t, svuint16_t, z24,
+ svadd_single_u16_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svuint16x4_t, svuint16_t, z1,
+ svadd_single_u16_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svuint16x4_t, svuint16_t, z1,
+ svadd_single_u16_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svuint16x4_t, svuint16_t, z18,
+ svadd_single_u16_x4 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svuint16x4_t, svuint16_t,
+ z0_res = svadd_single_u16_x4 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svuint16x4_t, svuint16_t,
+ z0 = svadd_single_u16_x4 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svuint16x4_t, svuint16_t, z24,
+ svadd_single_u16_x4 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u32_x2.c
new file mode 100644
index 0000000..6672a6a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u32_x2.c
@@ -0,0 +1,115 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svuint32x2_t, svuint32_t, z24,
+ svadd_single_u32_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** add {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svuint32x2_t, svuint32_t, z24,
+ svadd_single_u32_x2 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** add {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svuint32x2_t, svuint32_t, z24,
+ svadd_single_u32_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svuint32x2_t, svuint32_t, z1,
+ svadd_single_u32_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svuint32x2_t, svuint32_t, z1,
+ svadd_single_u32_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** add {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svuint32x2_t, svuint32_t, z18,
+ svadd_single_u32_x2 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svuint32x2_t, svuint32_t,
+ z0_res = svadd_single_u32_x2 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svuint32x2_t, svuint32_t,
+ z0 = svadd_single_u32_x2 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svuint32x2_t, svuint32_t, z24,
+ svadd_single_u32_x2 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u32_x4.c
new file mode 100644
index 0000000..7e3a718
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u32_x4.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svuint32x4_t, svuint32_t, z24,
+ svadd_single_u32_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** add {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svuint32x4_t, svuint32_t, z24,
+ svadd_single_u32_x4 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svuint32x4_t, svuint32_t, z24,
+ svadd_single_u32_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svuint32x4_t, svuint32_t, z1,
+ svadd_single_u32_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svuint32x4_t, svuint32_t, z1,
+ svadd_single_u32_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svuint32x4_t, svuint32_t, z18,
+ svadd_single_u32_x4 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svuint32x4_t, svuint32_t,
+ z0_res = svadd_single_u32_x4 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svuint32x4_t, svuint32_t,
+ z0 = svadd_single_u32_x4 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svuint32x4_t, svuint32_t, z24,
+ svadd_single_u32_x4 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u64_x2.c
new file mode 100644
index 0000000..6800d14
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u64_x2.c
@@ -0,0 +1,115 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svuint64x2_t, svuint64_t, z24,
+ svadd_single_u64_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** add {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svuint64x2_t, svuint64_t, z24,
+ svadd_single_u64_x2 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** add {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svuint64x2_t, svuint64_t, z24,
+ svadd_single_u64_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svuint64x2_t, svuint64_t, z1,
+ svadd_single_u64_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svuint64x2_t, svuint64_t, z1,
+ svadd_single_u64_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** add {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svuint64x2_t, svuint64_t, z18,
+ svadd_single_u64_x2 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svuint64x2_t, svuint64_t,
+ z0_res = svadd_single_u64_x2 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svuint64x2_t, svuint64_t,
+ z0 = svadd_single_u64_x2 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svuint64x2_t, svuint64_t, z24,
+ svadd_single_u64_x2 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u64_x4.c
new file mode 100644
index 0000000..91ced4c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u64_x4.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svuint64x4_t, svuint64_t, z24,
+ svadd_single_u64_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** add {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svuint64x4_t, svuint64_t, z24,
+ svadd_single_u64_x4 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svuint64x4_t, svuint64_t, z24,
+ svadd_single_u64_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svuint64x4_t, svuint64_t, z1,
+ svadd_single_u64_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svuint64x4_t, svuint64_t, z1,
+ svadd_single_u64_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svuint64x4_t, svuint64_t, z18,
+ svadd_single_u64_x4 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svuint64x4_t, svuint64_t,
+ z0_res = svadd_single_u64_x4 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svuint64x4_t, svuint64_t,
+ z0 = svadd_single_u64_x4 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svuint64x4_t, svuint64_t, z24,
+ svadd_single_u64_x4 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u8_x2.c
new file mode 100644
index 0000000..d726009
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u8_x2.c
@@ -0,0 +1,115 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svuint8x2_t, svuint8_t, z24,
+ svadd_single_u8_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** |
+** add {z28\.b - z29\.b}, {z28\.b - z29\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svuint8x2_t, svuint8_t, z24,
+ svadd_single_u8_x2 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** add {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svuint8x2_t, svuint8_t, z24,
+ svadd_single_u8_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svuint8x2_t, svuint8_t, z1,
+ svadd_single_u8_x2 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svuint8x2_t, svuint8_t, z1,
+ svadd_single_u8_x2 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** add {z18\.b - z19\.b}, {z18\.b - z19\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svuint8x2_t, svuint8_t, z18,
+ svadd_single_u8_x2 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svuint8x2_t, svuint8_t,
+ z0_res = svadd_single_u8_x2 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.b - z1\.b}, {z0\.b - z1\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svuint8x2_t, svuint8_t,
+ z0 = svadd_single_u8_x2 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.b - z25\.b}, {z24\.b - z25\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svuint8x2_t, svuint8_t, z24,
+ svadd_single_u8_x2 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u8_x4.c
new file mode 100644
index 0000000..11fa766
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_u8_x4.c
@@ -0,0 +1,125 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_z24_z24_z0:
+** add {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z0, svuint8x4_t, svuint8_t, z24,
+ svadd_single_u8_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** |
+** add {z28\.b - z31\.b}, {z28\.b - z31\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z28_z0, svuint8x4_t, svuint8_t, z24,
+ svadd_single_u8_x4 (z28, z0),
+ svadd (z28, z0))
+
+/*
+** add_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z1_z0, svuint8x4_t, svuint8_t, z24,
+ svadd_single_u8_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z1_z24_z0:
+** add {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z24_z0, svuint8x4_t, svuint8_t, z1,
+ svadd_single_u8_x4 (z24, z0),
+ svadd (z24, z0))
+
+/*
+** add_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z1_z1_z0, svuint8x4_t, svuint8_t, z1,
+ svadd_single_u8_x4 (z1, z0),
+ svadd (z1, z0))
+
+/*
+** add_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (add_z18_z18_z0, svuint8x4_t, svuint8_t, z18,
+ svadd_single_u8_x4 (z18, z0),
+ svadd (z18, z0))
+
+/*
+** add_awkward:
+** ...
+** add ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (add_awkward, svuint8x4_t, svuint8_t,
+ z0_res = svadd_single_u8_x4 (z1, z0),
+ z0_res = svadd (z1, z0))
+
+/*
+** add_z0_z0_z15:
+** ...
+** add {z0\.b - z3\.b}, {z0\.b - z3\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (add_z0_z0_z15, svuint8x4_t, svuint8_t,
+ z0 = svadd_single_u8_x4 (z0, z15),
+ z0 = svadd (z0, z15))
+
+/*
+** add_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** add {z24\.b - z27\.b}, {z24\.b - z27\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (add_z24_z24_z16, svuint8x4_t, svuint8_t, z24,
+ svadd_single_u8_x4 (z24, z16),
+ svadd (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_s32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_s32_vg1x2.c
new file mode 100644
index 0000000..19db69d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_s32_vg1x2.c
@@ -0,0 +1,180 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_0_z0_z0, svint32x2_t,
+ svadd_write_za32_s32_vg1x2 (0, z0, z0),
+ svadd_write_za32_vg1x2 (0, z0, z0))
+
+/*
+** add_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w0_z0_z0, svint32x2_t,
+ svadd_write_za32_s32_vg1x2 (w0, z0, z0),
+ svadd_write_za32_vg1x2 (w0, z0, z0))
+
+/*
+** add_write_w8_z0_z4:
+** add za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z4, svint32x2_t,
+ svadd_write_za32_s32_vg1x2 (w8, z0, z4),
+ svadd_write_za32_vg1x2 (w8, z0, z4))
+
+/*
+** add_write_w8_z4_z18:
+** add za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z4_z18, svint32x2_t,
+ svadd_write_za32_s32_vg1x2 (w8, z4, z18),
+ svadd_write_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_write_w8_z23_z0:
+** ...
+** add za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z23_z0, svint32x2_t,
+ svadd_write_za32_s32_vg1x2 (w8, z23, z0),
+ svadd_write_za32_vg1x2 (w8, z23, z0))
+
+/*
+** add_write_w8_z18_z23:
+** ...
+** add za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z18_z23, svint32x2_t,
+ svadd_write_za32_s32_vg1x2 (w8, z18, z23),
+ svadd_write_za32_vg1x2 (w8, z18, z23))
+
+/*
+** add_write_w8_z4_z28:
+** add za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z4_z28, svint32x2_t,
+ svadd_write_za32_s32_vg1x2 (w8, z4, z28),
+ svadd_write_za32_vg1x2 (w8, z4, z28))
+
+/*
+** add_write_w8p7_z4_z0:
+** add za\.s\[w8, 7, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p7_z4_z0, svint32x2_t,
+ svadd_write_za32_s32_vg1x2 (w8 + 7, z4, z0),
+ svadd_write_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** add_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p8_z4_z4, svint32x2_t,
+ svadd_write_za32_s32_vg1x2 (w8 + 8, z4, z4),
+ svadd_write_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** add_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8m1_z4_z0, svint32x2_t,
+ svadd_write_za32_s32_vg1x2 (w8 - 1, z4, z0),
+ svadd_write_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** add_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_0_z1_z0, svint32x2_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x2 (0, z1, z0),
+ svadd_write_za32_vg1x2 (0, z1, z0))
+
+/*
+** add_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0_z1_z0, svint32x2_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x2 (w0, z1, z0),
+ svadd_write_za32_vg1x2 (w0, z1, z0))
+
+/*
+** add_write_single_w8_z1_z0:
+** add za\.s\[w8, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z1_z0, svint32x2_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x2 (w8, z1, z0),
+ svadd_write_za32_vg1x2 (w8, z1, z0))
+
+/*
+** add_write_single_w8p7_z1_z0:
+** add za\.s\[w8, 7, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p7_z1_z0, svint32x2_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x2 (w8 + 7, z1, z0),
+ svadd_write_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** add_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p8_z1_z0, svint32x2_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x2 (w8 + 8, z1, z0),
+ svadd_write_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** add_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** add za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0m1_z1_z0, svint32x2_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x2 (w0 - 1, z1, z0),
+ svadd_write_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** add_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** add za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (add_write_single_w8_z0_z15, svint32x2_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x2 (w8, z0, z15),
+ svadd_write_za32_vg1x2 (w8, z0, z15))
+
+/*
+** add_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** add za\.s\[w8, 0, vgx2\], {z20\.s - z21\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z20_z16, svint32x2_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x2 (w8, z20, z16),
+ svadd_write_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_s32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_s32_vg1x4.c
new file mode 100644
index 0000000..40d0153
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_s32_vg1x4.c
@@ -0,0 +1,172 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_0_z0_z0, svint32x4_t,
+ svadd_write_za32_s32_vg1x4 (0, z0, z0),
+ svadd_write_za32_vg1x4 (0, z0, z0))
+
+/*
+** add_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w0_z0_z0, svint32x4_t,
+ svadd_write_za32_s32_vg1x4 (w0, z0, z0),
+ svadd_write_za32_vg1x4 (w0, z0, z0))
+
+/*
+** add_write_w8_z0_z4:
+** add za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z4, svint32x4_t,
+ svadd_write_za32_s32_vg1x4 (w8, z0, z4),
+ svadd_write_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_write_w8_z0_z18:
+** ...
+** add za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z18, svint32x4_t,
+ svadd_write_za32_s32_vg1x4 (w8, z0, z18),
+ svadd_write_za32_vg1x4 (w8, z0, z18))
+
+/*
+** add_write_w8_z18_z28:
+** ...
+** add za\.s\[w8, 0, vgx4\], [^\n]+, {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z18_z28, svint32x4_t,
+ svadd_write_za32_s32_vg1x4 (w8, z18, z28),
+ svadd_write_za32_vg1x4 (w8, z18, z28))
+
+/*
+** add_write_w8_z28_z23:
+** ...
+** add za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z28_z23, svint32x4_t,
+ svadd_write_za32_s32_vg1x4 (w8, z28, z23),
+ svadd_write_za32_vg1x4 (w8, z28, z23))
+
+/*
+** add_write_w8p7_z4_z0:
+** add za\.s\[w8, 7, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p7_z4_z0, svint32x4_t,
+ svadd_write_za32_s32_vg1x4 (w8 + 7, z4, z0),
+ svadd_write_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** add_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p8_z4_z4, svint32x4_t,
+ svadd_write_za32_s32_vg1x4 (w8 + 8, z4, z4),
+ svadd_write_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** add_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8m1_z4_z0, svint32x4_t,
+ svadd_write_za32_s32_vg1x4 (w8 - 1, z4, z0),
+ svadd_write_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** add_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_0_z1_z0, svint32x4_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x4 (0, z1, z0),
+ svadd_write_za32_vg1x4 (0, z1, z0))
+
+/*
+** add_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0_z1_z0, svint32x4_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x4 (w0, z1, z0),
+ svadd_write_za32_vg1x4 (w0, z1, z0))
+
+/*
+** add_write_single_w8_z1_z0:
+** add za\.s\[w8, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z1_z0, svint32x4_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x4 (w8, z1, z0),
+ svadd_write_za32_vg1x4 (w8, z1, z0))
+
+/*
+** add_write_single_w8p7_z1_z0:
+** add za\.s\[w8, 7, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p7_z1_z0, svint32x4_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x4 (w8 + 7, z1, z0),
+ svadd_write_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** add_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p8_z1_z0, svint32x4_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x4 (w8 + 8, z1, z0),
+ svadd_write_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** add_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** add za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0m1_z1_z0, svint32x4_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x4 (w0 - 1, z1, z0),
+ svadd_write_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** add_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** add za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (add_write_single_w8_z0_z15, svint32x4_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x4 (w8, z0, z15),
+ svadd_write_za32_vg1x4 (w8, z0, z15))
+
+/*
+** add_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** add za\.s\[w8, 0, vgx4\], {z20\.s - z23\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z20_z16, svint32x4_t, svint32_t,
+ svadd_write_single_za32_s32_vg1x4 (w8, z20, z16),
+ svadd_write_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_u32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_u32_vg1x2.c
new file mode 100644
index 0000000..65851f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_u32_vg1x2.c
@@ -0,0 +1,180 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_0_z0_z0, svuint32x2_t,
+ svadd_write_za32_u32_vg1x2 (0, z0, z0),
+ svadd_write_za32_vg1x2 (0, z0, z0))
+
+/*
+** add_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w0_z0_z0, svuint32x2_t,
+ svadd_write_za32_u32_vg1x2 (w0, z0, z0),
+ svadd_write_za32_vg1x2 (w0, z0, z0))
+
+/*
+** add_write_w8_z0_z4:
+** add za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z4, svuint32x2_t,
+ svadd_write_za32_u32_vg1x2 (w8, z0, z4),
+ svadd_write_za32_vg1x2 (w8, z0, z4))
+
+/*
+** add_write_w8_z4_z18:
+** add za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z4_z18, svuint32x2_t,
+ svadd_write_za32_u32_vg1x2 (w8, z4, z18),
+ svadd_write_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_write_w8_z23_z0:
+** ...
+** add za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z23_z0, svuint32x2_t,
+ svadd_write_za32_u32_vg1x2 (w8, z23, z0),
+ svadd_write_za32_vg1x2 (w8, z23, z0))
+
+/*
+** add_write_w8_z18_z23:
+** ...
+** add za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z18_z23, svuint32x2_t,
+ svadd_write_za32_u32_vg1x2 (w8, z18, z23),
+ svadd_write_za32_vg1x2 (w8, z18, z23))
+
+/*
+** add_write_w8_z4_z28:
+** add za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z4_z28, svuint32x2_t,
+ svadd_write_za32_u32_vg1x2 (w8, z4, z28),
+ svadd_write_za32_vg1x2 (w8, z4, z28))
+
+/*
+** add_write_w8p7_z4_z0:
+** add za\.s\[w8, 7, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p7_z4_z0, svuint32x2_t,
+ svadd_write_za32_u32_vg1x2 (w8 + 7, z4, z0),
+ svadd_write_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** add_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p8_z4_z4, svuint32x2_t,
+ svadd_write_za32_u32_vg1x2 (w8 + 8, z4, z4),
+ svadd_write_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** add_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8m1_z4_z0, svuint32x2_t,
+ svadd_write_za32_u32_vg1x2 (w8 - 1, z4, z0),
+ svadd_write_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** add_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_0_z1_z0, svuint32x2_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x2 (0, z1, z0),
+ svadd_write_za32_vg1x2 (0, z1, z0))
+
+/*
+** add_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0_z1_z0, svuint32x2_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x2 (w0, z1, z0),
+ svadd_write_za32_vg1x2 (w0, z1, z0))
+
+/*
+** add_write_single_w8_z1_z0:
+** add za\.s\[w8, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z1_z0, svuint32x2_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x2 (w8, z1, z0),
+ svadd_write_za32_vg1x2 (w8, z1, z0))
+
+/*
+** add_write_single_w8p7_z1_z0:
+** add za\.s\[w8, 7, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p7_z1_z0, svuint32x2_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x2 (w8 + 7, z1, z0),
+ svadd_write_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** add_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p8_z1_z0, svuint32x2_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x2 (w8 + 8, z1, z0),
+ svadd_write_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** add_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** add za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0m1_z1_z0, svuint32x2_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x2 (w0 - 1, z1, z0),
+ svadd_write_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** add_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** add za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (add_write_single_w8_z0_z15, svuint32x2_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x2 (w8, z0, z15),
+ svadd_write_za32_vg1x2 (w8, z0, z15))
+
+/*
+** add_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** add za\.s\[w8, 0, vgx2\], {z20\.s - z21\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z20_z16, svuint32x2_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x2 (w8, z20, z16),
+ svadd_write_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_u32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_u32_vg1x4.c
new file mode 100644
index 0000000..747d9c1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za32_u32_vg1x4.c
@@ -0,0 +1,172 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_0_z0_z0, svuint32x4_t,
+ svadd_write_za32_u32_vg1x4 (0, z0, z0),
+ svadd_write_za32_vg1x4 (0, z0, z0))
+
+/*
+** add_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w0_z0_z0, svuint32x4_t,
+ svadd_write_za32_u32_vg1x4 (w0, z0, z0),
+ svadd_write_za32_vg1x4 (w0, z0, z0))
+
+/*
+** add_write_w8_z0_z4:
+** add za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z4, svuint32x4_t,
+ svadd_write_za32_u32_vg1x4 (w8, z0, z4),
+ svadd_write_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_write_w8_z0_z18:
+** ...
+** add za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z18, svuint32x4_t,
+ svadd_write_za32_u32_vg1x4 (w8, z0, z18),
+ svadd_write_za32_vg1x4 (w8, z0, z18))
+
+/*
+** add_write_w8_z18_z28:
+** ...
+** add za\.s\[w8, 0, vgx4\], [^\n]+, {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z18_z28, svuint32x4_t,
+ svadd_write_za32_u32_vg1x4 (w8, z18, z28),
+ svadd_write_za32_vg1x4 (w8, z18, z28))
+
+/*
+** add_write_w8_z28_z23:
+** ...
+** add za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z28_z23, svuint32x4_t,
+ svadd_write_za32_u32_vg1x4 (w8, z28, z23),
+ svadd_write_za32_vg1x4 (w8, z28, z23))
+
+/*
+** add_write_w8p7_z4_z0:
+** add za\.s\[w8, 7, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p7_z4_z0, svuint32x4_t,
+ svadd_write_za32_u32_vg1x4 (w8 + 7, z4, z0),
+ svadd_write_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** add_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p8_z4_z4, svuint32x4_t,
+ svadd_write_za32_u32_vg1x4 (w8 + 8, z4, z4),
+ svadd_write_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** add_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_write_w8m1_z4_z0, svuint32x4_t,
+ svadd_write_za32_u32_vg1x4 (w8 - 1, z4, z0),
+ svadd_write_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** add_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_0_z1_z0, svuint32x4_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x4 (0, z1, z0),
+ svadd_write_za32_vg1x4 (0, z1, z0))
+
+/*
+** add_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0_z1_z0, svuint32x4_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x4 (w0, z1, z0),
+ svadd_write_za32_vg1x4 (w0, z1, z0))
+
+/*
+** add_write_single_w8_z1_z0:
+** add za\.s\[w8, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z1_z0, svuint32x4_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x4 (w8, z1, z0),
+ svadd_write_za32_vg1x4 (w8, z1, z0))
+
+/*
+** add_write_single_w8p7_z1_z0:
+** add za\.s\[w8, 7, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p7_z1_z0, svuint32x4_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x4 (w8 + 7, z1, z0),
+ svadd_write_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** add_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p8_z1_z0, svuint32x4_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x4 (w8 + 8, z1, z0),
+ svadd_write_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** add_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** add za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0m1_z1_z0, svuint32x4_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x4 (w0 - 1, z1, z0),
+ svadd_write_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** add_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** add za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (add_write_single_w8_z0_z15, svuint32x4_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x4 (w8, z0, z15),
+ svadd_write_za32_vg1x4 (w8, z0, z15))
+
+/*
+** add_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** add za\.s\[w8, 0, vgx4\], {z20\.s - z23\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z20_z16, svuint32x4_t, svuint32_t,
+ svadd_write_single_za32_u32_vg1x4 (w8, z20, z16),
+ svadd_write_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_s64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_s64_vg1x2.c
new file mode 100644
index 0000000..05e6d30
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_s64_vg1x2.c
@@ -0,0 +1,182 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** add_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_0_z0_z0, svint64x2_t,
+ svadd_write_za64_s64_vg1x2 (0, z0, z0),
+ svadd_write_za64_vg1x2 (0, z0, z0))
+
+/*
+** add_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w0_z0_z0, svint64x2_t,
+ svadd_write_za64_s64_vg1x2 (w0, z0, z0),
+ svadd_write_za64_vg1x2 (w0, z0, z0))
+
+/*
+** add_write_w8_z0_z4:
+** add za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z4, svint64x2_t,
+ svadd_write_za64_s64_vg1x2 (w8, z0, z4),
+ svadd_write_za64_vg1x2 (w8, z0, z4))
+
+/*
+** add_write_w8_z4_z18:
+** add za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z4_z18, svint64x2_t,
+ svadd_write_za64_s64_vg1x2 (w8, z4, z18),
+ svadd_write_za64_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_write_w8_z23_z0:
+** ...
+** add za\.d\[w8, 0, vgx2\], [^\n]+, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z23_z0, svint64x2_t,
+ svadd_write_za64_s64_vg1x2 (w8, z23, z0),
+ svadd_write_za64_vg1x2 (w8, z23, z0))
+
+/*
+** add_write_w8_z18_z23:
+** ...
+** add za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z18_z23, svint64x2_t,
+ svadd_write_za64_s64_vg1x2 (w8, z18, z23),
+ svadd_write_za64_vg1x2 (w8, z18, z23))
+
+/*
+** add_write_w8_z4_z28:
+** add za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z4_z28, svint64x2_t,
+ svadd_write_za64_s64_vg1x2 (w8, z4, z28),
+ svadd_write_za64_vg1x2 (w8, z4, z28))
+
+/*
+** add_write_w8p7_z4_z0:
+** add za\.d\[w8, 7, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p7_z4_z0, svint64x2_t,
+ svadd_write_za64_s64_vg1x2 (w8 + 7, z4, z0),
+ svadd_write_za64_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** add_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p8_z4_z4, svint64x2_t,
+ svadd_write_za64_s64_vg1x2 (w8 + 8, z4, z4),
+ svadd_write_za64_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** add_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8m1_z4_z0, svint64x2_t,
+ svadd_write_za64_s64_vg1x2 (w8 - 1, z4, z0),
+ svadd_write_za64_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** add_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_0_z1_z0, svint64x2_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x2 (0, z1, z0),
+ svadd_write_za64_vg1x2 (0, z1, z0))
+
+/*
+** add_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0_z1_z0, svint64x2_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x2 (w0, z1, z0),
+ svadd_write_za64_vg1x2 (w0, z1, z0))
+
+/*
+** add_write_single_w8_z1_z0:
+** add za\.d\[w8, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z1_z0, svint64x2_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x2 (w8, z1, z0),
+ svadd_write_za64_vg1x2 (w8, z1, z0))
+
+/*
+** add_write_single_w8p7_z1_z0:
+** add za\.d\[w8, 7, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p7_z1_z0, svint64x2_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x2 (w8 + 7, z1, z0),
+ svadd_write_za64_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** add_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p8_z1_z0, svint64x2_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x2 (w8 + 8, z1, z0),
+ svadd_write_za64_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** add_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** add za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0m1_z1_z0, svint64x2_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x2 (w0 - 1, z1, z0),
+ svadd_write_za64_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** add_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** add za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (add_write_single_w8_z0_z15, svint64x2_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x2 (w8, z0, z15),
+ svadd_write_za64_vg1x2 (w8, z0, z15))
+
+/*
+** add_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** add za\.d\[w8, 0, vgx2\], {z20\.d - z21\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z20_z16, svint64x2_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x2 (w8, z20, z16),
+ svadd_write_za64_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_s64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_s64_vg1x4.c
new file mode 100644
index 0000000..1c69490
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_s64_vg1x4.c
@@ -0,0 +1,174 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** add_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_0_z0_z0, svint64x4_t,
+ svadd_write_za64_s64_vg1x4 (0, z0, z0),
+ svadd_write_za64_vg1x4 (0, z0, z0))
+
+/*
+** add_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w0_z0_z0, svint64x4_t,
+ svadd_write_za64_s64_vg1x4 (w0, z0, z0),
+ svadd_write_za64_vg1x4 (w0, z0, z0))
+
+/*
+** add_write_w8_z0_z4:
+** add za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z4, svint64x4_t,
+ svadd_write_za64_s64_vg1x4 (w8, z0, z4),
+ svadd_write_za64_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_write_w8_z0_z18:
+** ...
+** add za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z18, svint64x4_t,
+ svadd_write_za64_s64_vg1x4 (w8, z0, z18),
+ svadd_write_za64_vg1x4 (w8, z0, z18))
+
+/*
+** add_write_w8_z18_z28:
+** ...
+** add za\.d\[w8, 0, vgx4\], [^\n]+, {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z18_z28, svint64x4_t,
+ svadd_write_za64_s64_vg1x4 (w8, z18, z28),
+ svadd_write_za64_vg1x4 (w8, z18, z28))
+
+/*
+** add_write_w8_z28_z23:
+** ...
+** add za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z28_z23, svint64x4_t,
+ svadd_write_za64_s64_vg1x4 (w8, z28, z23),
+ svadd_write_za64_vg1x4 (w8, z28, z23))
+
+/*
+** add_write_w8p7_z4_z0:
+** add za\.d\[w8, 7, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p7_z4_z0, svint64x4_t,
+ svadd_write_za64_s64_vg1x4 (w8 + 7, z4, z0),
+ svadd_write_za64_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** add_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p8_z4_z4, svint64x4_t,
+ svadd_write_za64_s64_vg1x4 (w8 + 8, z4, z4),
+ svadd_write_za64_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** add_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8m1_z4_z0, svint64x4_t,
+ svadd_write_za64_s64_vg1x4 (w8 - 1, z4, z0),
+ svadd_write_za64_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** add_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_0_z1_z0, svint64x4_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x4 (0, z1, z0),
+ svadd_write_za64_vg1x4 (0, z1, z0))
+
+/*
+** add_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0_z1_z0, svint64x4_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x4 (w0, z1, z0),
+ svadd_write_za64_vg1x4 (w0, z1, z0))
+
+/*
+** add_write_single_w8_z1_z0:
+** add za\.d\[w8, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z1_z0, svint64x4_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x4 (w8, z1, z0),
+ svadd_write_za64_vg1x4 (w8, z1, z0))
+
+/*
+** add_write_single_w8p7_z1_z0:
+** add za\.d\[w8, 7, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p7_z1_z0, svint64x4_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x4 (w8 + 7, z1, z0),
+ svadd_write_za64_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** add_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p8_z1_z0, svint64x4_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x4 (w8 + 8, z1, z0),
+ svadd_write_za64_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** add_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** add za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0m1_z1_z0, svint64x4_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x4 (w0 - 1, z1, z0),
+ svadd_write_za64_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** add_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** add za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (add_write_single_w8_z0_z15, svint64x4_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x4 (w8, z0, z15),
+ svadd_write_za64_vg1x4 (w8, z0, z15))
+
+/*
+** add_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** add za\.d\[w8, 0, vgx4\], {z20\.d - z23\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z20_z16, svint64x4_t, svint64_t,
+ svadd_write_single_za64_s64_vg1x4 (w8, z20, z16),
+ svadd_write_za64_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_u64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_u64_vg1x2.c
new file mode 100644
index 0000000..1c31401
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_u64_vg1x2.c
@@ -0,0 +1,182 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** add_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_0_z0_z0, svuint64x2_t,
+ svadd_write_za64_u64_vg1x2 (0, z0, z0),
+ svadd_write_za64_vg1x2 (0, z0, z0))
+
+/*
+** add_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w0_z0_z0, svuint64x2_t,
+ svadd_write_za64_u64_vg1x2 (w0, z0, z0),
+ svadd_write_za64_vg1x2 (w0, z0, z0))
+
+/*
+** add_write_w8_z0_z4:
+** add za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z4, svuint64x2_t,
+ svadd_write_za64_u64_vg1x2 (w8, z0, z4),
+ svadd_write_za64_vg1x2 (w8, z0, z4))
+
+/*
+** add_write_w8_z4_z18:
+** add za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z4_z18, svuint64x2_t,
+ svadd_write_za64_u64_vg1x2 (w8, z4, z18),
+ svadd_write_za64_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_write_w8_z23_z0:
+** ...
+** add za\.d\[w8, 0, vgx2\], [^\n]+, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z23_z0, svuint64x2_t,
+ svadd_write_za64_u64_vg1x2 (w8, z23, z0),
+ svadd_write_za64_vg1x2 (w8, z23, z0))
+
+/*
+** add_write_w8_z18_z23:
+** ...
+** add za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z18_z23, svuint64x2_t,
+ svadd_write_za64_u64_vg1x2 (w8, z18, z23),
+ svadd_write_za64_vg1x2 (w8, z18, z23))
+
+/*
+** add_write_w8_z4_z28:
+** add za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z4_z28, svuint64x2_t,
+ svadd_write_za64_u64_vg1x2 (w8, z4, z28),
+ svadd_write_za64_vg1x2 (w8, z4, z28))
+
+/*
+** add_write_w8p7_z4_z0:
+** add za\.d\[w8, 7, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p7_z4_z0, svuint64x2_t,
+ svadd_write_za64_u64_vg1x2 (w8 + 7, z4, z0),
+ svadd_write_za64_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** add_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p8_z4_z4, svuint64x2_t,
+ svadd_write_za64_u64_vg1x2 (w8 + 8, z4, z4),
+ svadd_write_za64_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** add_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8m1_z4_z0, svuint64x2_t,
+ svadd_write_za64_u64_vg1x2 (w8 - 1, z4, z0),
+ svadd_write_za64_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** add_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_0_z1_z0, svuint64x2_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x2 (0, z1, z0),
+ svadd_write_za64_vg1x2 (0, z1, z0))
+
+/*
+** add_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0_z1_z0, svuint64x2_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x2 (w0, z1, z0),
+ svadd_write_za64_vg1x2 (w0, z1, z0))
+
+/*
+** add_write_single_w8_z1_z0:
+** add za\.d\[w8, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z1_z0, svuint64x2_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x2 (w8, z1, z0),
+ svadd_write_za64_vg1x2 (w8, z1, z0))
+
+/*
+** add_write_single_w8p7_z1_z0:
+** add za\.d\[w8, 7, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p7_z1_z0, svuint64x2_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x2 (w8 + 7, z1, z0),
+ svadd_write_za64_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** add_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p8_z1_z0, svuint64x2_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x2 (w8 + 8, z1, z0),
+ svadd_write_za64_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** add_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** add za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0m1_z1_z0, svuint64x2_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x2 (w0 - 1, z1, z0),
+ svadd_write_za64_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** add_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** add za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (add_write_single_w8_z0_z15, svuint64x2_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x2 (w8, z0, z15),
+ svadd_write_za64_vg1x2 (w8, z0, z15))
+
+/*
+** add_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** add za\.d\[w8, 0, vgx2\], {z20\.d - z21\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z20_z16, svuint64x2_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x2 (w8, z20, z16),
+ svadd_write_za64_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_u64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_u64_vg1x4.c
new file mode 100644
index 0000000..8574e6c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_write_za64_u64_vg1x4.c
@@ -0,0 +1,174 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** add_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_0_z0_z0, svuint64x4_t,
+ svadd_write_za64_u64_vg1x4 (0, z0, z0),
+ svadd_write_za64_vg1x4 (0, z0, z0))
+
+/*
+** add_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w0_z0_z0, svuint64x4_t,
+ svadd_write_za64_u64_vg1x4 (w0, z0, z0),
+ svadd_write_za64_vg1x4 (w0, z0, z0))
+
+/*
+** add_write_w8_z0_z4:
+** add za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z4, svuint64x4_t,
+ svadd_write_za64_u64_vg1x4 (w8, z0, z4),
+ svadd_write_za64_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_write_w8_z0_z18:
+** ...
+** add za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z0_z18, svuint64x4_t,
+ svadd_write_za64_u64_vg1x4 (w8, z0, z18),
+ svadd_write_za64_vg1x4 (w8, z0, z18))
+
+/*
+** add_write_w8_z18_z28:
+** ...
+** add za\.d\[w8, 0, vgx4\], [^\n]+, {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z18_z28, svuint64x4_t,
+ svadd_write_za64_u64_vg1x4 (w8, z18, z28),
+ svadd_write_za64_vg1x4 (w8, z18, z28))
+
+/*
+** add_write_w8_z28_z23:
+** ...
+** add za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_write_w8_z28_z23, svuint64x4_t,
+ svadd_write_za64_u64_vg1x4 (w8, z28, z23),
+ svadd_write_za64_vg1x4 (w8, z28, z23))
+
+/*
+** add_write_w8p7_z4_z0:
+** add za\.d\[w8, 7, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p7_z4_z0, svuint64x4_t,
+ svadd_write_za64_u64_vg1x4 (w8 + 7, z4, z0),
+ svadd_write_za64_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** add_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8p8_z4_z4, svuint64x4_t,
+ svadd_write_za64_u64_vg1x4 (w8 + 8, z4, z4),
+ svadd_write_za64_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** add_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_write_w8m1_z4_z0, svuint64x4_t,
+ svadd_write_za64_u64_vg1x4 (w8 - 1, z4, z0),
+ svadd_write_za64_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** add_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_0_z1_z0, svuint64x4_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x4 (0, z1, z0),
+ svadd_write_za64_vg1x4 (0, z1, z0))
+
+/*
+** add_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0_z1_z0, svuint64x4_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x4 (w0, z1, z0),
+ svadd_write_za64_vg1x4 (w0, z1, z0))
+
+/*
+** add_write_single_w8_z1_z0:
+** add za\.d\[w8, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z1_z0, svuint64x4_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x4 (w8, z1, z0),
+ svadd_write_za64_vg1x4 (w8, z1, z0))
+
+/*
+** add_write_single_w8p7_z1_z0:
+** add za\.d\[w8, 7, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p7_z1_z0, svuint64x4_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x4 (w8 + 7, z1, z0),
+ svadd_write_za64_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** add_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8p8_z1_z0, svuint64x4_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x4 (w8 + 8, z1, z0),
+ svadd_write_za64_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** add_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** add za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w0m1_z1_z0, svuint64x4_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x4 (w0 - 1, z1, z0),
+ svadd_write_za64_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** add_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** add za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (add_write_single_w8_z0_z15, svuint64x4_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x4 (w8, z0, z15),
+ svadd_write_za64_vg1x4 (w8, z0, z15))
+
+/*
+** add_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** add za\.d\[w8, 0, vgx4\], {z20\.d - z23\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (add_write_single_w8_z20_z16, svuint64x4_t, svuint64_t,
+ svadd_write_single_za64_u64_vg1x4 (w8, z20, z16),
+ svadd_write_za64_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_f32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_f32_vg1x2.c
new file mode 100644
index 0000000..25e7217
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_f32_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fadd za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (0, z0),
+ svadd_za32_vg1x2 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** fadd za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (w0, z0),
+ svadd_za32_vg1x2 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** fadd za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (w7, z0),
+ svadd_za32_vg1x2 (w7, z0))
+
+/*
+** add_w8_z0:
+** fadd za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (w8, z0),
+ svadd_za32_vg1x2 (w8, z0))
+
+/*
+** add_w11_z0:
+** fadd za\.s\[w11, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (w11, z0),
+ svadd_za32_vg1x2 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** fadd za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (w12, z0),
+ svadd_za32_vg1x2 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** fadd za\.s\[w8, 7, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (w8 + 7, z0),
+ svadd_za32_vg1x2 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fadd za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (w8 + 8, z0),
+ svadd_za32_vg1x2 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fadd za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (w8 - 1, z0),
+ svadd_za32_vg1x2 (w8 - 1, z0))
+
+/*
+** add_w8_z18:
+** fadd za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (w8, z18),
+ svadd_za32_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** fadd za\.s\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (w8, z23),
+ svadd_za32_vg1x2 (w8, z23))
+
+/*
+** add_w8_z28:
+** fadd za\.s\[w8, 0, vgx2\], {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svfloat32x2_t,
+ svadd_za32_f32_vg1x2 (w8, z28),
+ svadd_za32_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_f32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_f32_vg1x4.c
new file mode 100644
index 0000000..3fb9da0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_f32_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fadd za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (0, z0),
+ svadd_za32_vg1x4 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** fadd za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w0, z0),
+ svadd_za32_vg1x4 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** fadd za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w7, z0),
+ svadd_za32_vg1x4 (w7, z0))
+
+/*
+** add_w8_z0:
+** fadd za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w8, z0),
+ svadd_za32_vg1x4 (w8, z0))
+
+/*
+** add_w11_z0:
+** fadd za\.s\[w11, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w11, z0),
+ svadd_za32_vg1x4 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** fadd za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w12, z0),
+ svadd_za32_vg1x4 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** fadd za\.s\[w8, 7, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w8 + 7, z0),
+ svadd_za32_vg1x4 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fadd za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w8 + 8, z0),
+ svadd_za32_vg1x4 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fadd za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w8 - 1, z0),
+ svadd_za32_vg1x4 (w8 - 1, z0))
+
+/*
+** add_w8_z4:
+** fadd za\.s\[w8, 0, vgx4\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z4, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w8, z4),
+ svadd_za32_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fadd za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w8, z18),
+ svadd_za32_vg1x4 (w8, z18))
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fadd za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w8, z23),
+ svadd_za32_vg1x4 (w8, z23))
+
+/*
+** add_w8_z28:
+** fadd za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svfloat32x4_t,
+ svadd_za32_f32_vg1x4 (w8, z28),
+ svadd_za32_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_s32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_s32_vg1x2.c
new file mode 100644
index 0000000..424a88aa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_s32_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svint32x2_t,
+ svadd_za32_s32_vg1x2 (0, z0),
+ svadd_za32_vg1x2 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svint32x2_t,
+ svadd_za32_s32_vg1x2 (w0, z0),
+ svadd_za32_vg1x2 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svint32x2_t,
+ svadd_za32_s32_vg1x2 (w7, z0),
+ svadd_za32_vg1x2 (w7, z0))
+
+/*
+** add_w8_z0:
+** add za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svint32x2_t,
+ svadd_za32_s32_vg1x2 (w8, z0),
+ svadd_za32_vg1x2 (w8, z0))
+
+/*
+** add_w11_z0:
+** add za\.s\[w11, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svint32x2_t,
+ svadd_za32_s32_vg1x2 (w11, z0),
+ svadd_za32_vg1x2 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svint32x2_t,
+ svadd_za32_s32_vg1x2 (w12, z0),
+ svadd_za32_vg1x2 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** add za\.s\[w8, 7, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svint32x2_t,
+ svadd_za32_s32_vg1x2 (w8 + 7, z0),
+ svadd_za32_vg1x2 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svint32x2_t,
+ svadd_za32_s32_vg1x2 (w8 + 8, z0),
+ svadd_za32_vg1x2 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svint32x2_t,
+ svadd_za32_s32_vg1x2 (w8 - 1, z0),
+ svadd_za32_vg1x2 (w8 - 1, z0))
+
+/*
+** add_w8_z18:
+** add za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svint32x2_t,
+ svadd_za32_s32_vg1x2 (w8, z18),
+ svadd_za32_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** add za\.s\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svint32x2_t,
+ svadd_za32_s32_vg1x2 (w8, z23),
+ svadd_za32_vg1x2 (w8, z23))
+
+/*
+** add_w8_z28:
+** add za\.s\[w8, 0, vgx2\], {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svint32x2_t,
+ svadd_za32_s32_vg1x2 (w8, z28),
+ svadd_za32_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_s32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_s32_vg1x4.c
new file mode 100644
index 0000000..40b6a39
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_s32_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svint32x4_t,
+ svadd_za32_s32_vg1x4 (0, z0),
+ svadd_za32_vg1x4 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w0, z0),
+ svadd_za32_vg1x4 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w7, z0),
+ svadd_za32_vg1x4 (w7, z0))
+
+/*
+** add_w8_z0:
+** add za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w8, z0),
+ svadd_za32_vg1x4 (w8, z0))
+
+/*
+** add_w11_z0:
+** add za\.s\[w11, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w11, z0),
+ svadd_za32_vg1x4 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w12, z0),
+ svadd_za32_vg1x4 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** add za\.s\[w8, 7, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w8 + 7, z0),
+ svadd_za32_vg1x4 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w8 + 8, z0),
+ svadd_za32_vg1x4 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w8 - 1, z0),
+ svadd_za32_vg1x4 (w8 - 1, z0))
+
+/*
+** add_w8_z4:
+** add za\.s\[w8, 0, vgx4\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z4, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w8, z4),
+ svadd_za32_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w8, z18),
+ svadd_za32_vg1x4 (w8, z18))
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w8, z23),
+ svadd_za32_vg1x4 (w8, z23))
+
+/*
+** add_w8_z28:
+** add za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svint32x4_t,
+ svadd_za32_s32_vg1x4 (w8, z28),
+ svadd_za32_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_u32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_u32_vg1x2.c
new file mode 100644
index 0000000..80db23d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_u32_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (0, z0),
+ svadd_za32_vg1x2 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (w0, z0),
+ svadd_za32_vg1x2 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (w7, z0),
+ svadd_za32_vg1x2 (w7, z0))
+
+/*
+** add_w8_z0:
+** add za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (w8, z0),
+ svadd_za32_vg1x2 (w8, z0))
+
+/*
+** add_w11_z0:
+** add za\.s\[w11, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (w11, z0),
+ svadd_za32_vg1x2 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (w12, z0),
+ svadd_za32_vg1x2 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** add za\.s\[w8, 7, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (w8 + 7, z0),
+ svadd_za32_vg1x2 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (w8 + 8, z0),
+ svadd_za32_vg1x2 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (w8 - 1, z0),
+ svadd_za32_vg1x2 (w8 - 1, z0))
+
+/*
+** add_w8_z18:
+** add za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (w8, z18),
+ svadd_za32_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** add za\.s\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (w8, z23),
+ svadd_za32_vg1x2 (w8, z23))
+
+/*
+** add_w8_z28:
+** add za\.s\[w8, 0, vgx2\], {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svuint32x2_t,
+ svadd_za32_u32_vg1x2 (w8, z28),
+ svadd_za32_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_u32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_u32_vg1x4.c
new file mode 100644
index 0000000..185b6b0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za32_u32_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (0, z0),
+ svadd_za32_vg1x4 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w0, z0),
+ svadd_za32_vg1x4 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w7, z0),
+ svadd_za32_vg1x4 (w7, z0))
+
+/*
+** add_w8_z0:
+** add za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w8, z0),
+ svadd_za32_vg1x4 (w8, z0))
+
+/*
+** add_w11_z0:
+** add za\.s\[w11, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w11, z0),
+ svadd_za32_vg1x4 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w12, z0),
+ svadd_za32_vg1x4 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** add za\.s\[w8, 7, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w8 + 7, z0),
+ svadd_za32_vg1x4 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w8 + 8, z0),
+ svadd_za32_vg1x4 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w8 - 1, z0),
+ svadd_za32_vg1x4 (w8 - 1, z0))
+
+/*
+** add_w8_z4:
+** add za\.s\[w8, 0, vgx4\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z4, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w8, z4),
+ svadd_za32_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w8, z18),
+ svadd_za32_vg1x4 (w8, z18))
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w8, z23),
+ svadd_za32_vg1x4 (w8, z23))
+
+/*
+** add_w8_z28:
+** add za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svuint32x4_t,
+ svadd_za32_u32_vg1x4 (w8, z28),
+ svadd_za32_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_f64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_f64_vg1x2.c
new file mode 100644
index 0000000..5c9c228
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_f64_vg1x2.c
@@ -0,0 +1,126 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fadd za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (0, z0),
+ svadd_za64_vg1x2 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** fadd za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (w0, z0),
+ svadd_za64_vg1x2 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** fadd za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (w7, z0),
+ svadd_za64_vg1x2 (w7, z0))
+
+/*
+** add_w8_z0:
+** fadd za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (w8, z0),
+ svadd_za64_vg1x2 (w8, z0))
+
+/*
+** add_w11_z0:
+** fadd za\.d\[w11, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (w11, z0),
+ svadd_za64_vg1x2 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** fadd za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (w12, z0),
+ svadd_za64_vg1x2 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** fadd za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (w8 + 7, z0),
+ svadd_za64_vg1x2 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fadd za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (w8 + 8, z0),
+ svadd_za64_vg1x2 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fadd za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (w8 - 1, z0),
+ svadd_za64_vg1x2 (w8 - 1, z0))
+
+/*
+** add_w8_z18:
+** fadd za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (w8, z18),
+ svadd_za64_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** fadd za\.d\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (w8, z23),
+ svadd_za64_vg1x2 (w8, z23))
+
+/*
+** add_w8_z28:
+** fadd za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svfloat64x2_t,
+ svadd_za64_f64_vg1x2 (w8, z28),
+ svadd_za64_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_f64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_f64_vg1x4.c
new file mode 100644
index 0000000..adc2086
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_f64_vg1x4.c
@@ -0,0 +1,141 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fadd za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (0, z0),
+ svadd_za64_vg1x4 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** fadd za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w0, z0),
+ svadd_za64_vg1x4 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** fadd za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w7, z0),
+ svadd_za64_vg1x4 (w7, z0))
+
+/*
+** add_w8_z0:
+** fadd za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w8, z0),
+ svadd_za64_vg1x4 (w8, z0))
+
+/*
+** add_w11_z0:
+** fadd za\.d\[w11, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w11, z0),
+ svadd_za64_vg1x4 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** fadd za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w12, z0),
+ svadd_za64_vg1x4 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** fadd za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w8 + 7, z0),
+ svadd_za64_vg1x4 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fadd za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w8 + 8, z0),
+ svadd_za64_vg1x4 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fadd za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w8 - 1, z0),
+ svadd_za64_vg1x4 (w8 - 1, z0))
+
+/*
+** add_w8_z4:
+** fadd za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z4, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w8, z4),
+ svadd_za64_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fadd za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w8, z18),
+ svadd_za64_vg1x4 (w8, z18))
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fadd za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w8, z23),
+ svadd_za64_vg1x4 (w8, z23))
+
+/*
+** add_w8_z28:
+** fadd za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svfloat64x4_t,
+ svadd_za64_f64_vg1x4 (w8, z28),
+ svadd_za64_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_s64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_s64_vg1x2.c
new file mode 100644
index 0000000..13aa886
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_s64_vg1x2.c
@@ -0,0 +1,124 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svint64x2_t,
+ svadd_za64_s64_vg1x2 (0, z0),
+ svadd_za64_vg1x2 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svint64x2_t,
+ svadd_za64_s64_vg1x2 (w0, z0),
+ svadd_za64_vg1x2 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svint64x2_t,
+ svadd_za64_s64_vg1x2 (w7, z0),
+ svadd_za64_vg1x2 (w7, z0))
+
+/*
+** add_w8_z0:
+** add za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svint64x2_t,
+ svadd_za64_s64_vg1x2 (w8, z0),
+ svadd_za64_vg1x2 (w8, z0))
+
+/*
+** add_w11_z0:
+** add za\.d\[w11, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svint64x2_t,
+ svadd_za64_s64_vg1x2 (w11, z0),
+ svadd_za64_vg1x2 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svint64x2_t,
+ svadd_za64_s64_vg1x2 (w12, z0),
+ svadd_za64_vg1x2 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** add za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svint64x2_t,
+ svadd_za64_s64_vg1x2 (w8 + 7, z0),
+ svadd_za64_vg1x2 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svint64x2_t,
+ svadd_za64_s64_vg1x2 (w8 + 8, z0),
+ svadd_za64_vg1x2 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svint64x2_t,
+ svadd_za64_s64_vg1x2 (w8 - 1, z0),
+ svadd_za64_vg1x2 (w8 - 1, z0))
+
+/*
+** add_w8_z18:
+** add za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svint64x2_t,
+ svadd_za64_s64_vg1x2 (w8, z18),
+ svadd_za64_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** add za\.d\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svint64x2_t,
+ svadd_za64_s64_vg1x2 (w8, z23),
+ svadd_za64_vg1x2 (w8, z23))
+
+/*
+** add_w8_z28:
+** add za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svint64x2_t,
+ svadd_za64_s64_vg1x2 (w8, z28),
+ svadd_za64_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_s64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_s64_vg1x4.c
new file mode 100644
index 0000000..7b3366c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_s64_vg1x4.c
@@ -0,0 +1,139 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svint64x4_t,
+ svadd_za64_s64_vg1x4 (0, z0),
+ svadd_za64_vg1x4 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w0, z0),
+ svadd_za64_vg1x4 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w7, z0),
+ svadd_za64_vg1x4 (w7, z0))
+
+/*
+** add_w8_z0:
+** add za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w8, z0),
+ svadd_za64_vg1x4 (w8, z0))
+
+/*
+** add_w11_z0:
+** add za\.d\[w11, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w11, z0),
+ svadd_za64_vg1x4 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w12, z0),
+ svadd_za64_vg1x4 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** add za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w8 + 7, z0),
+ svadd_za64_vg1x4 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w8 + 8, z0),
+ svadd_za64_vg1x4 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w8 - 1, z0),
+ svadd_za64_vg1x4 (w8 - 1, z0))
+
+/*
+** add_w8_z4:
+** add za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z4, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w8, z4),
+ svadd_za64_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w8, z18),
+ svadd_za64_vg1x4 (w8, z18))
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w8, z23),
+ svadd_za64_vg1x4 (w8, z23))
+
+/*
+** add_w8_z28:
+** add za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svint64x4_t,
+ svadd_za64_s64_vg1x4 (w8, z28),
+ svadd_za64_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_u64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_u64_vg1x2.c
new file mode 100644
index 0000000..2c68a0e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_u64_vg1x2.c
@@ -0,0 +1,124 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (0, z0),
+ svadd_za64_vg1x2 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (w0, z0),
+ svadd_za64_vg1x2 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (w7, z0),
+ svadd_za64_vg1x2 (w7, z0))
+
+/*
+** add_w8_z0:
+** add za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (w8, z0),
+ svadd_za64_vg1x2 (w8, z0))
+
+/*
+** add_w11_z0:
+** add za\.d\[w11, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (w11, z0),
+ svadd_za64_vg1x2 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (w12, z0),
+ svadd_za64_vg1x2 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** add za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (w8 + 7, z0),
+ svadd_za64_vg1x2 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (w8 + 8, z0),
+ svadd_za64_vg1x2 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (w8 - 1, z0),
+ svadd_za64_vg1x2 (w8 - 1, z0))
+
+/*
+** add_w8_z18:
+** add za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (w8, z18),
+ svadd_za64_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** add za\.d\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (w8, z23),
+ svadd_za64_vg1x2 (w8, z23))
+
+/*
+** add_w8_z28:
+** add za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svuint64x2_t,
+ svadd_za64_u64_vg1x2 (w8, z28),
+ svadd_za64_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_u64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_u64_vg1x4.c
new file mode 100644
index 0000000..249e888
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/add_za64_u64_vg1x4.c
@@ -0,0 +1,139 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** add_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_0_z0, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (0, z0),
+ svadd_za64_vg1x4 (0, z0))
+
+/*
+** add_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w0_z0, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w0, z0),
+ svadd_za64_vg1x4 (w0, z0))
+
+/*
+** add_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w7_z0, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w7, z0),
+ svadd_za64_vg1x4 (w7, z0))
+
+/*
+** add_w8_z0:
+** add za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z0, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w8, z0),
+ svadd_za64_vg1x4 (w8, z0))
+
+/*
+** add_w11_z0:
+** add za\.d\[w11, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w11_z0, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w11, z0),
+ svadd_za64_vg1x4 (w11, z0))
+
+
+/*
+** add_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w12_z0, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w12, z0),
+ svadd_za64_vg1x4 (w12, z0))
+
+/*
+** add_w8p7_z0:
+** add za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p7_z0, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w8 + 7, z0),
+ svadd_za64_vg1x4 (w8 + 7, z0))
+
+/*
+** add_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8p8_z0, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w8 + 8, z0),
+ svadd_za64_vg1x4 (w8 + 8, z0))
+
+/*
+** add_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** add za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8m1_z0, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w8 - 1, z0),
+ svadd_za64_vg1x4 (w8 - 1, z0))
+
+/*
+** add_w8_z4:
+** add za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z4, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w8, z4),
+ svadd_za64_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** add_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z18, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w8, z18),
+ svadd_za64_vg1x4 (w8, z18))
+
+/*
+** add_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** add za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (add_w8_z23, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w8, z23),
+ svadd_za64_vg1x4 (w8, z23))
+
+/*
+** add_w8_z28:
+** add za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (add_w8_z28, svuint64x4_t,
+ svadd_za64_u64_vg1x4 (w8, z28),
+ svadd_za64_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslb_f32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslb_f32.c
new file mode 100644
index 0000000..f67316c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslb_f32.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** bfmlalb_f32_tied1:
+** bfmlalb z0\.s, z4\.h, z5\.h
+** ret
+*/
+TEST_DUAL_Z (bfmlalb_f32_tied1, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlalb_f32 (z0, z4, z5),
+ z0 = svbfmlalb (z0, z4, z5))
+
+/*
+** bfmlalb_f32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** bfmlalb z0\.s, \1\.h, z1\.h
+** ret
+*/
+TEST_DUAL_Z_REV (bfmlalb_f32_tied2, svfloat32_t, svbfloat16_t,
+ z0_res = svbfmlalb_f32 (z4, z0, z1),
+ z0_res = svbfmlalb (z4, z0, z1))
+
+/*
+** bfmlalb_f32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** bfmlalb z0\.s, z1\.h, \1\.h
+** ret
+*/
+TEST_DUAL_Z_REV (bfmlalb_f32_tied3, svfloat32_t, svbfloat16_t,
+ z0_res = svbfmlalb_f32 (z4, z1, z0),
+ z0_res = svbfmlalb (z4, z1, z0))
+
+/*
+** bfmlalb_f32_untied:
+** movprfx z0, z1
+** bfmlalb z0\.s, z4\.h, z5\.h
+** ret
+*/
+TEST_DUAL_Z (bfmlalb_f32_untied, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlalb_f32 (z1, z4, z5),
+ z0 = svbfmlalb (z1, z4, z5))
+
+/*
+** bfmlalb_h7_f32_tied1:
+** mov (z[0-9]+\.h), h7
+** bfmlalb z0\.s, z4\.h, \1
+** ret
+*/
+TEST_DUAL_ZD (bfmlalb_h7_f32_tied1, svfloat32_t, svbfloat16_t, bfloat16_t,
+ z0 = svbfmlalb_n_f32 (z0, z4, d7),
+ z0 = svbfmlalb (z0, z4, d7))
+
+/*
+** bfmlalb_h7_f32_untied:
+** mov (z[0-9]+\.h), h7
+** movprfx z0, z1
+** bfmlalb z0\.s, z4\.h, \1
+** ret
+*/
+TEST_DUAL_ZD (bfmlalb_h7_f32_untied, svfloat32_t, svbfloat16_t, bfloat16_t,
+ z0 = svbfmlalb_n_f32 (z1, z4, d7),
+ z0 = svbfmlalb (z1, z4, d7))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslb_lane_f32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslb_lane_f32.c
new file mode 100644
index 0000000..91a7a18
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslb_lane_f32.c
@@ -0,0 +1,84 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** bfmlslb_lane_0_f32_tied1:
+** bfmlslb z0\.s, z4\.h, z5\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z (bfmlslb_lane_0_f32_tied1, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlslb_lane_f32 (z0, z4, z5, 0),
+ z0 = svbfmlslb_lane (z0, z4, z5, 0))
+
+/*
+** bfmlslb_lane_0_f32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** bfmlslb z0\.s, \1\.h, z1\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z_REV (bfmlslb_lane_0_f32_tied2, svfloat32_t, svbfloat16_t,
+ z0_res = svbfmlslb_lane_f32 (z4, z0, z1, 0),
+ z0_res = svbfmlslb_lane (z4, z0, z1, 0))
+
+/*
+** bfmlslb_lane_0_f32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** bfmlslb z0\.s, z1\.h, \1\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z_REV (bfmlslb_lane_0_f32_tied3, svfloat32_t, svbfloat16_t,
+ z0_res = svbfmlslb_lane_f32 (z4, z1, z0, 0),
+ z0_res = svbfmlslb_lane (z4, z1, z0, 0))
+
+/*
+** bfmlslb_lane_0_f32_untied:
+** movprfx z0, z1
+** bfmlslb z0\.s, z4\.h, z5\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z (bfmlslb_lane_0_f32_untied, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlslb_lane_f32 (z1, z4, z5, 0),
+ z0 = svbfmlslb_lane (z1, z4, z5, 0))
+
+/*
+** bfmlslb_lane_1_f32:
+** bfmlslb z0\.s, z4\.h, z5\.h\[1\]
+** ret
+*/
+TEST_DUAL_Z (bfmlslb_lane_1_f32, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlslb_lane_f32 (z0, z4, z5, 1),
+ z0 = svbfmlslb_lane (z0, z4, z5, 1))
+
+/*
+** bfmlslb_lane_7_f32:
+** bfmlslb z0\.s, z4\.h, z5\.h\[7\]
+** ret
+*/
+TEST_DUAL_Z (bfmlslb_lane_7_f32, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlslb_lane_f32 (z0, z4, z5, 7),
+ z0 = svbfmlslb_lane (z0, z4, z5, 7))
+
+/*
+** bfmlslb_lane_z8_f32:
+** str d8, \[sp, -16\]!
+** mov (z[0-7])\.d, z8\.d
+** bfmlslb z0\.s, z1\.h, \1\.h\[1\]
+** ldr d8, \[sp\], 16
+** ret
+*/
+TEST_DUAL_LANE_REG (bfmlslb_lane_z8_f32, svfloat32_t, svbfloat16_t, z8,
+ z0 = svbfmlslb_lane_f32 (z0, z1, z8, 1),
+ z0 = svbfmlslb_lane (z0, z1, z8, 1))
+
+/*
+** bfmlslb_lane_z16_f32:
+** mov (z[0-7])\.d, z16\.d
+** bfmlslb z0\.s, z1\.h, \1\.h\[1\]
+** ret
+*/
+TEST_DUAL_LANE_REG (bfmlslb_lane_z16_f32, svfloat32_t, svbfloat16_t, z16,
+ z0 = svbfmlslb_lane_f32 (z0, z1, z16, 1),
+ z0 = svbfmlslb_lane (z0, z1, z16, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslt_f32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslt_f32.c
new file mode 100644
index 0000000..bc6b7a7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslt_f32.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** bfmlslt_f32_tied1:
+** bfmlslt z0\.s, z4\.h, z5\.h
+** ret
+*/
+TEST_DUAL_Z (bfmlslt_f32_tied1, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlslt_f32 (z0, z4, z5),
+ z0 = svbfmlslt (z0, z4, z5))
+
+/*
+** bfmlslt_f32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** bfmlslt z0\.s, \1\.h, z1\.h
+** ret
+*/
+TEST_DUAL_Z_REV (bfmlslt_f32_tied2, svfloat32_t, svbfloat16_t,
+ z0_res = svbfmlslt_f32 (z4, z0, z1),
+ z0_res = svbfmlslt (z4, z0, z1))
+
+/*
+** bfmlslt_f32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** bfmlslt z0\.s, z1\.h, \1\.h
+** ret
+*/
+TEST_DUAL_Z_REV (bfmlslt_f32_tied3, svfloat32_t, svbfloat16_t,
+ z0_res = svbfmlslt_f32 (z4, z1, z0),
+ z0_res = svbfmlslt (z4, z1, z0))
+
+/*
+** bfmlslt_f32_untied:
+** movprfx z0, z1
+** bfmlslt z0\.s, z4\.h, z5\.h
+** ret
+*/
+TEST_DUAL_Z (bfmlslt_f32_untied, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlslt_f32 (z1, z4, z5),
+ z0 = svbfmlslt (z1, z4, z5))
+
+/*
+** bfmlslt_h7_f32_tied1:
+** mov (z[0-9]+\.h), h7
+** bfmlslt z0\.s, z4\.h, \1
+** ret
+*/
+TEST_DUAL_ZD (bfmlslt_h7_f32_tied1, svfloat32_t, svbfloat16_t, bfloat16_t,
+ z0 = svbfmlslt_n_f32 (z0, z4, d7),
+ z0 = svbfmlslt (z0, z4, d7))
+
+/*
+** bfmlslt_h7_f32_untied:
+** mov (z[0-9]+\.h), h7
+** movprfx z0, z1
+** bfmlslt z0\.s, z4\.h, \1
+** ret
+*/
+TEST_DUAL_ZD (bfmlslt_h7_f32_untied, svfloat32_t, svbfloat16_t, bfloat16_t,
+ z0 = svbfmlslt_n_f32 (z1, z4, d7),
+ z0 = svbfmlslt (z1, z4, d7))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslt_lane_f32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslt_lane_f32.c
new file mode 100644
index 0000000..1c93011
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bfmlslt_lane_f32.c
@@ -0,0 +1,84 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** bfmlslt_lane_0_f32_tied1:
+** bfmlslt z0\.s, z4\.h, z5\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z (bfmlslt_lane_0_f32_tied1, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlslt_lane_f32 (z0, z4, z5, 0),
+ z0 = svbfmlslt_lane (z0, z4, z5, 0))
+
+/*
+** bfmlslt_lane_0_f32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** bfmlslt z0\.s, \1\.h, z1\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z_REV (bfmlslt_lane_0_f32_tied2, svfloat32_t, svbfloat16_t,
+ z0_res = svbfmlslt_lane_f32 (z4, z0, z1, 0),
+ z0_res = svbfmlslt_lane (z4, z0, z1, 0))
+
+/*
+** bfmlslt_lane_0_f32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** bfmlslt z0\.s, z1\.h, \1\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z_REV (bfmlslt_lane_0_f32_tied3, svfloat32_t, svbfloat16_t,
+ z0_res = svbfmlslt_lane_f32 (z4, z1, z0, 0),
+ z0_res = svbfmlslt_lane (z4, z1, z0, 0))
+
+/*
+** bfmlslt_lane_0_f32_untied:
+** movprfx z0, z1
+** bfmlslt z0\.s, z4\.h, z5\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z (bfmlslt_lane_0_f32_untied, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlslt_lane_f32 (z1, z4, z5, 0),
+ z0 = svbfmlslt_lane (z1, z4, z5, 0))
+
+/*
+** bfmlslt_lane_1_f32:
+** bfmlslt z0\.s, z4\.h, z5\.h\[1\]
+** ret
+*/
+TEST_DUAL_Z (bfmlslt_lane_1_f32, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlslt_lane_f32 (z0, z4, z5, 1),
+ z0 = svbfmlslt_lane (z0, z4, z5, 1))
+
+/*
+** bfmlslt_lane_7_f32:
+** bfmlslt z0\.s, z4\.h, z5\.h\[7\]
+** ret
+*/
+TEST_DUAL_Z (bfmlslt_lane_7_f32, svfloat32_t, svbfloat16_t,
+ z0 = svbfmlslt_lane_f32 (z0, z4, z5, 7),
+ z0 = svbfmlslt_lane (z0, z4, z5, 7))
+
+/*
+** bfmlslt_lane_z8_f32:
+** str d8, \[sp, -16\]!
+** mov (z[0-7])\.d, z8\.d
+** bfmlslt z0\.s, z1\.h, \1\.h\[1\]
+** ldr d8, \[sp\], 16
+** ret
+*/
+TEST_DUAL_LANE_REG (bfmlslt_lane_z8_f32, svfloat32_t, svbfloat16_t, z8,
+ z0 = svbfmlslt_lane_f32 (z0, z1, z8, 1),
+ z0 = svbfmlslt_lane (z0, z1, z8, 1))
+
+/*
+** bfmlslt_lane_z16_f32:
+** mov (z[0-7])\.d, z16\.d
+** bfmlslt z0\.s, z1\.h, \1\.h\[1\]
+** ret
+*/
+TEST_DUAL_LANE_REG (bfmlslt_lane_z16_f32, svfloat32_t, svbfloat16_t, z16,
+ z0 = svbfmlslt_lane_f32 (z0, z1, z16, 1),
+ z0 = svbfmlslt_lane (z0, z1, z16, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bmopa_za32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bmopa_za32.c
new file mode 100644
index 0000000..b88a817
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bmopa_za32.c
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** bmopa_za32_u32_0_p0_p1_z0_z1:
+** bmopa za0\.s, p0/m, p1/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZA (bmopa_za32_u32_0_p0_p1_z0_z1, svuint32_t,
+ svbmopa_za32_u32_m (0, p0, p1, z0, z1),
+ svbmopa_za32_m (0, p0, p1, z0, z1))
+
+/*
+** bmopa_za32_u32_0_p1_p0_z1_z0:
+** bmopa za0\.s, p1/m, p0/m, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZA (bmopa_za32_u32_0_p1_p0_z1_z0, svuint32_t,
+ svbmopa_za32_u32_m (0, p1, p0, z1, z0),
+ svbmopa_za32_m (0, p1, p0, z1, z0))
+
+/*
+** bmopa_za32_u32_3_p0_p1_z0_z1:
+** bmopa za3\.s, p0/m, p1/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZA (bmopa_za32_u32_3_p0_p1_z0_z1, svuint32_t,
+ svbmopa_za32_u32_m (3, p0, p1, z0, z1),
+ svbmopa_za32_m (3, p0, p1, z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bmops_za32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bmops_za32.c
new file mode 100644
index 0000000..dda120c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/bmops_za32.c
@@ -0,0 +1,30 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** bmops_za32_u32_0_p0_p1_z0_z1:
+** bmops za0\.s, p0/m, p1/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZA (bmops_za32_u32_0_p0_p1_z0_z1, svuint32_t,
+ svbmops_za32_u32_m (0, p0, p1, z0, z1),
+ svbmops_za32_m (0, p0, p1, z0, z1))
+
+/*
+** bmops_za32_u32_0_p1_p0_z1_z0:
+** bmops za0\.s, p1/m, p0/m, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZA (bmops_za32_u32_0_p1_p0_z1_z0, svuint32_t,
+ svbmops_za32_u32_m (0, p1, p0, z1, z0),
+ svbmops_za32_m (0, p1, p0, z1, z0))
+
+/*
+** bmops_za32_u32_3_p0_p1_z0_z1:
+** bmops za3\.s, p0/m, p1/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZA (bmops_za32_u32_3_p0_p1_z0_z1, svuint32_t,
+ svbmops_za32_u32_m (3, p0, p1, z0, z1),
+ svbmops_za32_m (3, p0, p1, z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16.c
new file mode 100644
index 0000000..7918a9c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16.c
@@ -0,0 +1,42 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_f16_tied1:
+** fclamp z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f16_tied1, svfloat16_t,
+ z0 = svclamp_f16 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_f16_tied2:
+** fclamp z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f16_tied2, svfloat16_t,
+ z0 = svclamp_f16 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_f16_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** fclamp z0\.h, z2\.h, \1\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f16_tied3, svfloat16_t,
+ z0 = svclamp_f16 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_f16_untied:
+** movprfx z0, z1
+** fclamp z0\.h, z2\.h, z3\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f16_untied, svfloat16_t,
+ z0 = svclamp_f16 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16_x2.c
new file mode 100644
index 0000000..c63294b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16_x2.c
@@ -0,0 +1,94 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** fclamp {z24\.h - z25\.h}, z0\.h, z5\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svfloat16x2_t, svfloat16_t, z24,
+ svclamp_single_f16_x2 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z24\.h - z25\.h}, z5\.h, z7\.h
+** |
+** fclamp {z28\.h - z29\.h}, z5\.h, z7\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svfloat16x2_t, svfloat16_t, z24,
+ svclamp_single_f16_x2 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fclamp {z24\.h - z25\.h}, z7\.h, z16\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svfloat16x2_t, svfloat16_t, z24,
+ svclamp_single_f16_x2 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** fclamp {z24\.h - z25\.h}, z16\.h, z23\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svfloat16x2_t, svfloat16_t, z1,
+ svclamp_single_f16_x2 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z[0-9]+\.h - z[0-9]+\.h}, z23\.h, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svfloat16x2_t, svfloat16_t, z1,
+ svclamp_single_f16_x2 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z0_z23:
+** fclamp {z18\.h - z19\.h}, z0\.h, z23\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z0_z23, svfloat16x2_t, svfloat16_t, z18,
+ svclamp_single_f16_x2 (z18, z0, z23),
+ svclamp (z18, z0, z23))
+
+/*
+** clamp_awkward:
+** ...
+** fclamp {z[0-9]+\.h - z[0-9]+\.h}, z[0-9]+\.h, z3\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svfloat16x2_t, svfloat16_t,
+ z0_res = svclamp_single_f16_x2 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16_x4.c
new file mode 100644
index 0000000..7487aa0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f16_x4.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** fclamp {z24\.h - z27\.h}, z0\.h, z5\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svfloat16x4_t, svfloat16_t, z24,
+ svclamp_single_f16_x4 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z24\.h - z27\.h}, z5\.h, z7\.h
+** |
+** fclamp {z28\.h - z31\.h}, z5\.h, z7\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svfloat16x4_t, svfloat16_t, z24,
+ svclamp_single_f16_x4 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z24\.h - z27\.h}, z7\.h, z16\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svfloat16x4_t, svfloat16_t, z24,
+ svclamp_single_f16_x4 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** fclamp {z24\.h - z27\.h}, z16\.h, z23\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svfloat16x4_t, svfloat16_t, z1,
+ svclamp_single_f16_x4 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z[0-9]+\.h - z[0-9]+\.h}, z23\.h, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svfloat16x4_t, svfloat16_t, z1,
+ svclamp_single_f16_x4 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z16_z5:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z[0-9]+\.h - z[0-9]+\.h}, z16\.h, z5\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z16_z5, svfloat16x4_t, svfloat16_t, z18,
+ svclamp_single_f16_x4 (z18, z16, z5),
+ svclamp (z18, z16, z5))
+
+/*
+** clamp_awkward:
+** ...
+** fclamp {z[0-9]+\.h - z[0-9]+\.h}, z[0-9]+\.h, z5\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svfloat16x4_t, svfloat16_t,
+ z0_res = svclamp_single_f16_x4 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32.c
new file mode 100644
index 0000000..7c6cff3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32.c
@@ -0,0 +1,42 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_f32_tied1:
+** fclamp z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f32_tied1, svfloat32_t,
+ z0 = svclamp_f32 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_f32_tied2:
+** fclamp z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f32_tied2, svfloat32_t,
+ z0 = svclamp_f32 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_f32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** fclamp z0\.s, z2\.s, \1\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f32_tied3, svfloat32_t,
+ z0 = svclamp_f32 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_f32_untied:
+** movprfx z0, z1
+** fclamp z0\.s, z2\.s, z3\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f32_untied, svfloat32_t,
+ z0 = svclamp_f32 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32_x2.c
new file mode 100644
index 0000000..dd8eb62
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32_x2.c
@@ -0,0 +1,94 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** fclamp {z24\.s - z25\.s}, z0\.s, z5\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svfloat32x2_t, svfloat32_t, z24,
+ svclamp_single_f32_x2 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z24\.s - z25\.s}, z5\.s, z7\.s
+** |
+** fclamp {z28\.s - z29\.s}, z5\.s, z7\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svfloat32x2_t, svfloat32_t, z24,
+ svclamp_single_f32_x2 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fclamp {z24\.s - z25\.s}, z7\.s, z16\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svfloat32x2_t, svfloat32_t, z24,
+ svclamp_single_f32_x2 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** fclamp {z24\.s - z25\.s}, z16\.s, z23\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svfloat32x2_t, svfloat32_t, z1,
+ svclamp_single_f32_x2 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z[0-9]+\.s - z[0-9]+\.s}, z23\.s, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svfloat32x2_t, svfloat32_t, z1,
+ svclamp_single_f32_x2 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z0_z23:
+** fclamp {z18\.s - z19\.s}, z0\.s, z23\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z0_z23, svfloat32x2_t, svfloat32_t, z18,
+ svclamp_single_f32_x2 (z18, z0, z23),
+ svclamp (z18, z0, z23))
+
+/*
+** clamp_awkward:
+** ...
+** fclamp {z[0-9]+\.s - z[0-9]+\.s}, z[0-9]+\.s, z3\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svfloat32x2_t, svfloat32_t,
+ z0_res = svclamp_single_f32_x2 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32_x4.c
new file mode 100644
index 0000000..29d73f4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f32_x4.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** fclamp {z24\.s - z27\.s}, z0\.s, z5\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svfloat32x4_t, svfloat32_t, z24,
+ svclamp_single_f32_x4 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z24\.s - z27\.s}, z5\.s, z7\.s
+** |
+** fclamp {z28\.s - z31\.s}, z5\.s, z7\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svfloat32x4_t, svfloat32_t, z24,
+ svclamp_single_f32_x4 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z24\.s - z27\.s}, z7\.s, z16\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svfloat32x4_t, svfloat32_t, z24,
+ svclamp_single_f32_x4 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** fclamp {z24\.s - z27\.s}, z16\.s, z23\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svfloat32x4_t, svfloat32_t, z1,
+ svclamp_single_f32_x4 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z[0-9]+\.s - z[0-9]+\.s}, z23\.s, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svfloat32x4_t, svfloat32_t, z1,
+ svclamp_single_f32_x4 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z16_z5:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z[0-9]+\.s - z[0-9]+\.s}, z16\.s, z5\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z16_z5, svfloat32x4_t, svfloat32_t, z18,
+ svclamp_single_f32_x4 (z18, z16, z5),
+ svclamp (z18, z16, z5))
+
+/*
+** clamp_awkward:
+** ...
+** fclamp {z[0-9]+\.s - z[0-9]+\.s}, z[0-9]+\.s, z5\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svfloat32x4_t, svfloat32_t,
+ z0_res = svclamp_single_f32_x4 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64.c
new file mode 100644
index 0000000..599f218
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64.c
@@ -0,0 +1,42 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_f64_tied1:
+** fclamp z0\.d, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f64_tied1, svfloat64_t,
+ z0 = svclamp_f64 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_f64_tied2:
+** fclamp z0\.d, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f64_tied2, svfloat64_t,
+ z0 = svclamp_f64 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_f64_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** fclamp z0\.d, z2\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f64_tied3, svfloat64_t,
+ z0 = svclamp_f64 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_f64_untied:
+** movprfx z0, z1
+** fclamp z0\.d, z2\.d, z3\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_f64_untied, svfloat64_t,
+ z0 = svclamp_f64 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64_x2.c
new file mode 100644
index 0000000..ca9e996
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64_x2.c
@@ -0,0 +1,94 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** fclamp {z24\.d - z25\.d}, z0\.d, z5\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svfloat64x2_t, svfloat64_t, z24,
+ svclamp_single_f64_x2 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z24\.d - z25\.d}, z5\.d, z7\.d
+** |
+** fclamp {z28\.d - z29\.d}, z5\.d, z7\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svfloat64x2_t, svfloat64_t, z24,
+ svclamp_single_f64_x2 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fclamp {z24\.d - z25\.d}, z7\.d, z16\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svfloat64x2_t, svfloat64_t, z24,
+ svclamp_single_f64_x2 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** fclamp {z24\.d - z25\.d}, z16\.d, z23\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svfloat64x2_t, svfloat64_t, z1,
+ svclamp_single_f64_x2 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z[0-9]+\.d - z[0-9]+\.d}, z23\.d, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svfloat64x2_t, svfloat64_t, z1,
+ svclamp_single_f64_x2 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z0_z23:
+** fclamp {z18\.d - z19\.d}, z0\.d, z23\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z0_z23, svfloat64x2_t, svfloat64_t, z18,
+ svclamp_single_f64_x2 (z18, z0, z23),
+ svclamp (z18, z0, z23))
+
+/*
+** clamp_awkward:
+** ...
+** fclamp {z[0-9]+\.d - z[0-9]+\.d}, z[0-9]+\.d, z3\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svfloat64x2_t, svfloat64_t,
+ z0_res = svclamp_single_f64_x2 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64_x4.c
new file mode 100644
index 0000000..c2773e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_f64_x4.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** fclamp {z24\.d - z27\.d}, z0\.d, z5\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svfloat64x4_t, svfloat64_t, z24,
+ svclamp_single_f64_x4 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z24\.d - z27\.d}, z5\.d, z7\.d
+** |
+** fclamp {z28\.d - z31\.d}, z5\.d, z7\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svfloat64x4_t, svfloat64_t, z24,
+ svclamp_single_f64_x4 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z24\.d - z27\.d}, z7\.d, z16\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svfloat64x4_t, svfloat64_t, z24,
+ svclamp_single_f64_x4 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** fclamp {z24\.d - z27\.d}, z16\.d, z23\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svfloat64x4_t, svfloat64_t, z1,
+ svclamp_single_f64_x4 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z[0-9]+\.d - z[0-9]+\.d}, z23\.d, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svfloat64x4_t, svfloat64_t, z1,
+ svclamp_single_f64_x4 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z16_z5:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fclamp {z[0-9]+\.d - z[0-9]+\.d}, z16\.d, z5\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z16_z5, svfloat64x4_t, svfloat64_t, z18,
+ svclamp_single_f64_x4 (z18, z16, z5),
+ svclamp (z18, z16, z5))
+
+/*
+** clamp_awkward:
+** ...
+** fclamp {z[0-9]+\.d - z[0-9]+\.d}, z[0-9]+\.d, z5\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svfloat64x4_t, svfloat64_t,
+ z0_res = svclamp_single_f64_x4 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s16_x2.c
new file mode 100644
index 0000000..401a298
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s16_x2.c
@@ -0,0 +1,94 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** sclamp {z24\.h - z25\.h}, z0\.h, z5\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svint16x2_t, svint16_t, z24,
+ svclamp_single_s16_x2 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.h - z25\.h}, z5\.h, z7\.h
+** |
+** sclamp {z28\.h - z29\.h}, z5\.h, z7\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svint16x2_t, svint16_t, z24,
+ svclamp_single_s16_x2 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** sclamp {z24\.h - z25\.h}, z7\.h, z16\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svint16x2_t, svint16_t, z24,
+ svclamp_single_s16_x2 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** sclamp {z24\.h - z25\.h}, z16\.h, z23\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svint16x2_t, svint16_t, z1,
+ svclamp_single_s16_x2 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.h - z[0-9]+\.h}, z23\.h, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svint16x2_t, svint16_t, z1,
+ svclamp_single_s16_x2 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z0_z23:
+** sclamp {z18\.h - z19\.h}, z0\.h, z23\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z0_z23, svint16x2_t, svint16_t, z18,
+ svclamp_single_s16_x2 (z18, z0, z23),
+ svclamp (z18, z0, z23))
+
+/*
+** clamp_awkward:
+** ...
+** sclamp {z[0-9]+\.h - z[0-9]+\.h}, z[0-9]+\.h, z3\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svint16x2_t, svint16_t,
+ z0_res = svclamp_single_s16_x2 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s16_x4.c
new file mode 100644
index 0000000..96c87db
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s16_x4.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** sclamp {z24\.h - z27\.h}, z0\.h, z5\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svint16x4_t, svint16_t, z24,
+ svclamp_single_s16_x4 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.h - z27\.h}, z5\.h, z7\.h
+** |
+** sclamp {z28\.h - z31\.h}, z5\.h, z7\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svint16x4_t, svint16_t, z24,
+ svclamp_single_s16_x4 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.h - z27\.h}, z7\.h, z16\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svint16x4_t, svint16_t, z24,
+ svclamp_single_s16_x4 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** sclamp {z24\.h - z27\.h}, z16\.h, z23\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svint16x4_t, svint16_t, z1,
+ svclamp_single_s16_x4 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.h - z[0-9]+\.h}, z23\.h, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svint16x4_t, svint16_t, z1,
+ svclamp_single_s16_x4 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z16_z5:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.h - z[0-9]+\.h}, z16\.h, z5\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z16_z5, svint16x4_t, svint16_t, z18,
+ svclamp_single_s16_x4 (z18, z16, z5),
+ svclamp (z18, z16, z5))
+
+/*
+** clamp_awkward:
+** ...
+** sclamp {z[0-9]+\.h - z[0-9]+\.h}, z[0-9]+\.h, z5\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svint16x4_t, svint16_t,
+ z0_res = svclamp_single_s16_x4 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s32_x2.c
new file mode 100644
index 0000000..1a50b85
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s32_x2.c
@@ -0,0 +1,94 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** sclamp {z24\.s - z25\.s}, z0\.s, z5\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svint32x2_t, svint32_t, z24,
+ svclamp_single_s32_x2 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.s - z25\.s}, z5\.s, z7\.s
+** |
+** sclamp {z28\.s - z29\.s}, z5\.s, z7\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svint32x2_t, svint32_t, z24,
+ svclamp_single_s32_x2 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** sclamp {z24\.s - z25\.s}, z7\.s, z16\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svint32x2_t, svint32_t, z24,
+ svclamp_single_s32_x2 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** sclamp {z24\.s - z25\.s}, z16\.s, z23\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svint32x2_t, svint32_t, z1,
+ svclamp_single_s32_x2 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.s - z[0-9]+\.s}, z23\.s, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svint32x2_t, svint32_t, z1,
+ svclamp_single_s32_x2 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z0_z23:
+** sclamp {z18\.s - z19\.s}, z0\.s, z23\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z0_z23, svint32x2_t, svint32_t, z18,
+ svclamp_single_s32_x2 (z18, z0, z23),
+ svclamp (z18, z0, z23))
+
+/*
+** clamp_awkward:
+** ...
+** sclamp {z[0-9]+\.s - z[0-9]+\.s}, z[0-9]+\.s, z3\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svint32x2_t, svint32_t,
+ z0_res = svclamp_single_s32_x2 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s32_x4.c
new file mode 100644
index 0000000..8f6a0d3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s32_x4.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** sclamp {z24\.s - z27\.s}, z0\.s, z5\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svint32x4_t, svint32_t, z24,
+ svclamp_single_s32_x4 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.s - z27\.s}, z5\.s, z7\.s
+** |
+** sclamp {z28\.s - z31\.s}, z5\.s, z7\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svint32x4_t, svint32_t, z24,
+ svclamp_single_s32_x4 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.s - z27\.s}, z7\.s, z16\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svint32x4_t, svint32_t, z24,
+ svclamp_single_s32_x4 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** sclamp {z24\.s - z27\.s}, z16\.s, z23\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svint32x4_t, svint32_t, z1,
+ svclamp_single_s32_x4 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.s - z[0-9]+\.s}, z23\.s, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svint32x4_t, svint32_t, z1,
+ svclamp_single_s32_x4 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z16_z5:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.s - z[0-9]+\.s}, z16\.s, z5\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z16_z5, svint32x4_t, svint32_t, z18,
+ svclamp_single_s32_x4 (z18, z16, z5),
+ svclamp (z18, z16, z5))
+
+/*
+** clamp_awkward:
+** ...
+** sclamp {z[0-9]+\.s - z[0-9]+\.s}, z[0-9]+\.s, z5\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svint32x4_t, svint32_t,
+ z0_res = svclamp_single_s32_x4 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s64_x2.c
new file mode 100644
index 0000000..6accce5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s64_x2.c
@@ -0,0 +1,94 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** sclamp {z24\.d - z25\.d}, z0\.d, z5\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svint64x2_t, svint64_t, z24,
+ svclamp_single_s64_x2 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.d - z25\.d}, z5\.d, z7\.d
+** |
+** sclamp {z28\.d - z29\.d}, z5\.d, z7\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svint64x2_t, svint64_t, z24,
+ svclamp_single_s64_x2 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** sclamp {z24\.d - z25\.d}, z7\.d, z16\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svint64x2_t, svint64_t, z24,
+ svclamp_single_s64_x2 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** sclamp {z24\.d - z25\.d}, z16\.d, z23\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svint64x2_t, svint64_t, z1,
+ svclamp_single_s64_x2 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.d - z[0-9]+\.d}, z23\.d, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svint64x2_t, svint64_t, z1,
+ svclamp_single_s64_x2 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z0_z23:
+** sclamp {z18\.d - z19\.d}, z0\.d, z23\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z0_z23, svint64x2_t, svint64_t, z18,
+ svclamp_single_s64_x2 (z18, z0, z23),
+ svclamp (z18, z0, z23))
+
+/*
+** clamp_awkward:
+** ...
+** sclamp {z[0-9]+\.d - z[0-9]+\.d}, z[0-9]+\.d, z3\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svint64x2_t, svint64_t,
+ z0_res = svclamp_single_s64_x2 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s64_x4.c
new file mode 100644
index 0000000..fbc0658
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s64_x4.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** sclamp {z24\.d - z27\.d}, z0\.d, z5\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svint64x4_t, svint64_t, z24,
+ svclamp_single_s64_x4 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.d - z27\.d}, z5\.d, z7\.d
+** |
+** sclamp {z28\.d - z31\.d}, z5\.d, z7\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svint64x4_t, svint64_t, z24,
+ svclamp_single_s64_x4 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.d - z27\.d}, z7\.d, z16\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svint64x4_t, svint64_t, z24,
+ svclamp_single_s64_x4 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** sclamp {z24\.d - z27\.d}, z16\.d, z23\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svint64x4_t, svint64_t, z1,
+ svclamp_single_s64_x4 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.d - z[0-9]+\.d}, z23\.d, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svint64x4_t, svint64_t, z1,
+ svclamp_single_s64_x4 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z16_z5:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.d - z[0-9]+\.d}, z16\.d, z5\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z16_z5, svint64x4_t, svint64_t, z18,
+ svclamp_single_s64_x4 (z18, z16, z5),
+ svclamp (z18, z16, z5))
+
+/*
+** clamp_awkward:
+** ...
+** sclamp {z[0-9]+\.d - z[0-9]+\.d}, z[0-9]+\.d, z5\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svint64x4_t, svint64_t,
+ z0_res = svclamp_single_s64_x4 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s8_x2.c
new file mode 100644
index 0000000..fc9151b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s8_x2.c
@@ -0,0 +1,94 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** sclamp {z24\.b - z25\.b}, z0\.b, z5\.b
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svint8x2_t, svint8_t, z24,
+ svclamp_single_s8_x2 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.b - z25\.b}, z5\.b, z7\.b
+** |
+** sclamp {z28\.b - z29\.b}, z5\.b, z7\.b
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svint8x2_t, svint8_t, z24,
+ svclamp_single_s8_x2 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** sclamp {z24\.b - z25\.b}, z7\.b, z16\.b
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svint8x2_t, svint8_t, z24,
+ svclamp_single_s8_x2 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** sclamp {z24\.b - z25\.b}, z16\.b, z23\.b
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svint8x2_t, svint8_t, z1,
+ svclamp_single_s8_x2 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.b - z[0-9]+\.b}, z23\.b, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svint8x2_t, svint8_t, z1,
+ svclamp_single_s8_x2 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z0_z23:
+** sclamp {z18\.b - z19\.b}, z0\.b, z23\.b
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z0_z23, svint8x2_t, svint8_t, z18,
+ svclamp_single_s8_x2 (z18, z0, z23),
+ svclamp (z18, z0, z23))
+
+/*
+** clamp_awkward:
+** ...
+** sclamp {z[0-9]+\.b - z[0-9]+\.b}, z[0-9]+\.b, z3\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svint8x2_t, svint8_t,
+ z0_res = svclamp_single_s8_x2 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s8_x4.c
new file mode 100644
index 0000000..ce1ad02
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_s8_x4.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** sclamp {z24\.b - z27\.b}, z0\.b, z5\.b
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svint8x4_t, svint8_t, z24,
+ svclamp_single_s8_x4 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.b - z27\.b}, z5\.b, z7\.b
+** |
+** sclamp {z28\.b - z31\.b}, z5\.b, z7\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svint8x4_t, svint8_t, z24,
+ svclamp_single_s8_x4 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z24\.b - z27\.b}, z7\.b, z16\.b
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svint8x4_t, svint8_t, z24,
+ svclamp_single_s8_x4 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** sclamp {z24\.b - z27\.b}, z16\.b, z23\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svint8x4_t, svint8_t, z1,
+ svclamp_single_s8_x4 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.b - z[0-9]+\.b}, z23\.b, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svint8x4_t, svint8_t, z1,
+ svclamp_single_s8_x4 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z16_z5:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sclamp {z[0-9]+\.b - z[0-9]+\.b}, z16\.b, z5\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z16_z5, svint8x4_t, svint8_t, z18,
+ svclamp_single_s8_x4 (z18, z16, z5),
+ svclamp (z18, z16, z5))
+
+/*
+** clamp_awkward:
+** ...
+** sclamp {z[0-9]+\.b - z[0-9]+\.b}, z[0-9]+\.b, z5\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svint8x4_t, svint8_t,
+ z0_res = svclamp_single_s8_x4 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u16_x2.c
new file mode 100644
index 0000000..50ed0a1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u16_x2.c
@@ -0,0 +1,94 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** uclamp {z24\.h - z25\.h}, z0\.h, z5\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svuint16x2_t, svuint16_t, z24,
+ svclamp_single_u16_x2 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.h - z25\.h}, z5\.h, z7\.h
+** |
+** uclamp {z28\.h - z29\.h}, z5\.h, z7\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svuint16x2_t, svuint16_t, z24,
+ svclamp_single_u16_x2 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** uclamp {z24\.h - z25\.h}, z7\.h, z16\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svuint16x2_t, svuint16_t, z24,
+ svclamp_single_u16_x2 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** uclamp {z24\.h - z25\.h}, z16\.h, z23\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svuint16x2_t, svuint16_t, z1,
+ svclamp_single_u16_x2 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.h - z[0-9]+\.h}, z23\.h, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svuint16x2_t, svuint16_t, z1,
+ svclamp_single_u16_x2 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z0_z23:
+** uclamp {z18\.h - z19\.h}, z0\.h, z23\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z0_z23, svuint16x2_t, svuint16_t, z18,
+ svclamp_single_u16_x2 (z18, z0, z23),
+ svclamp (z18, z0, z23))
+
+/*
+** clamp_awkward:
+** ...
+** uclamp {z[0-9]+\.h - z[0-9]+\.h}, z[0-9]+\.h, z3\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svuint16x2_t, svuint16_t,
+ z0_res = svclamp_single_u16_x2 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u16_x4.c
new file mode 100644
index 0000000..ca3e65b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u16_x4.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** uclamp {z24\.h - z27\.h}, z0\.h, z5\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svuint16x4_t, svuint16_t, z24,
+ svclamp_single_u16_x4 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.h - z27\.h}, z5\.h, z7\.h
+** |
+** uclamp {z28\.h - z31\.h}, z5\.h, z7\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svuint16x4_t, svuint16_t, z24,
+ svclamp_single_u16_x4 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.h - z27\.h}, z7\.h, z16\.h
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svuint16x4_t, svuint16_t, z24,
+ svclamp_single_u16_x4 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** uclamp {z24\.h - z27\.h}, z16\.h, z23\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svuint16x4_t, svuint16_t, z1,
+ svclamp_single_u16_x4 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.h - z[0-9]+\.h}, z23\.h, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svuint16x4_t, svuint16_t, z1,
+ svclamp_single_u16_x4 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z16_z5:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.h - z[0-9]+\.h}, z16\.h, z5\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z16_z5, svuint16x4_t, svuint16_t, z18,
+ svclamp_single_u16_x4 (z18, z16, z5),
+ svclamp (z18, z16, z5))
+
+/*
+** clamp_awkward:
+** ...
+** uclamp {z[0-9]+\.h - z[0-9]+\.h}, z[0-9]+\.h, z5\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svuint16x4_t, svuint16_t,
+ z0_res = svclamp_single_u16_x4 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u32_x2.c
new file mode 100644
index 0000000..2494df2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u32_x2.c
@@ -0,0 +1,94 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** uclamp {z24\.s - z25\.s}, z0\.s, z5\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svuint32x2_t, svuint32_t, z24,
+ svclamp_single_u32_x2 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.s - z25\.s}, z5\.s, z7\.s
+** |
+** uclamp {z28\.s - z29\.s}, z5\.s, z7\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svuint32x2_t, svuint32_t, z24,
+ svclamp_single_u32_x2 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** uclamp {z24\.s - z25\.s}, z7\.s, z16\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svuint32x2_t, svuint32_t, z24,
+ svclamp_single_u32_x2 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** uclamp {z24\.s - z25\.s}, z16\.s, z23\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svuint32x2_t, svuint32_t, z1,
+ svclamp_single_u32_x2 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.s - z[0-9]+\.s}, z23\.s, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svuint32x2_t, svuint32_t, z1,
+ svclamp_single_u32_x2 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z0_z23:
+** uclamp {z18\.s - z19\.s}, z0\.s, z23\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z0_z23, svuint32x2_t, svuint32_t, z18,
+ svclamp_single_u32_x2 (z18, z0, z23),
+ svclamp (z18, z0, z23))
+
+/*
+** clamp_awkward:
+** ...
+** uclamp {z[0-9]+\.s - z[0-9]+\.s}, z[0-9]+\.s, z3\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svuint32x2_t, svuint32_t,
+ z0_res = svclamp_single_u32_x2 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u32_x4.c
new file mode 100644
index 0000000..a02d9dd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u32_x4.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** uclamp {z24\.s - z27\.s}, z0\.s, z5\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svuint32x4_t, svuint32_t, z24,
+ svclamp_single_u32_x4 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.s - z27\.s}, z5\.s, z7\.s
+** |
+** uclamp {z28\.s - z31\.s}, z5\.s, z7\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svuint32x4_t, svuint32_t, z24,
+ svclamp_single_u32_x4 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.s - z27\.s}, z7\.s, z16\.s
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svuint32x4_t, svuint32_t, z24,
+ svclamp_single_u32_x4 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** uclamp {z24\.s - z27\.s}, z16\.s, z23\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svuint32x4_t, svuint32_t, z1,
+ svclamp_single_u32_x4 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.s - z[0-9]+\.s}, z23\.s, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svuint32x4_t, svuint32_t, z1,
+ svclamp_single_u32_x4 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z16_z5:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.s - z[0-9]+\.s}, z16\.s, z5\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z16_z5, svuint32x4_t, svuint32_t, z18,
+ svclamp_single_u32_x4 (z18, z16, z5),
+ svclamp (z18, z16, z5))
+
+/*
+** clamp_awkward:
+** ...
+** uclamp {z[0-9]+\.s - z[0-9]+\.s}, z[0-9]+\.s, z5\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svuint32x4_t, svuint32_t,
+ z0_res = svclamp_single_u32_x4 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u64_x2.c
new file mode 100644
index 0000000..b827ee4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u64_x2.c
@@ -0,0 +1,94 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** uclamp {z24\.d - z25\.d}, z0\.d, z5\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svuint64x2_t, svuint64_t, z24,
+ svclamp_single_u64_x2 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.d - z25\.d}, z5\.d, z7\.d
+** |
+** uclamp {z28\.d - z29\.d}, z5\.d, z7\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svuint64x2_t, svuint64_t, z24,
+ svclamp_single_u64_x2 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** uclamp {z24\.d - z25\.d}, z7\.d, z16\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svuint64x2_t, svuint64_t, z24,
+ svclamp_single_u64_x2 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** uclamp {z24\.d - z25\.d}, z16\.d, z23\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svuint64x2_t, svuint64_t, z1,
+ svclamp_single_u64_x2 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.d - z[0-9]+\.d}, z23\.d, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svuint64x2_t, svuint64_t, z1,
+ svclamp_single_u64_x2 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z0_z23:
+** uclamp {z18\.d - z19\.d}, z0\.d, z23\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z0_z23, svuint64x2_t, svuint64_t, z18,
+ svclamp_single_u64_x2 (z18, z0, z23),
+ svclamp (z18, z0, z23))
+
+/*
+** clamp_awkward:
+** ...
+** uclamp {z[0-9]+\.d - z[0-9]+\.d}, z[0-9]+\.d, z3\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svuint64x2_t, svuint64_t,
+ z0_res = svclamp_single_u64_x2 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u64_x4.c
new file mode 100644
index 0000000..f27c9ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u64_x4.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** uclamp {z24\.d - z27\.d}, z0\.d, z5\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svuint64x4_t, svuint64_t, z24,
+ svclamp_single_u64_x4 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.d - z27\.d}, z5\.d, z7\.d
+** |
+** uclamp {z28\.d - z31\.d}, z5\.d, z7\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svuint64x4_t, svuint64_t, z24,
+ svclamp_single_u64_x4 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.d - z27\.d}, z7\.d, z16\.d
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svuint64x4_t, svuint64_t, z24,
+ svclamp_single_u64_x4 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** uclamp {z24\.d - z27\.d}, z16\.d, z23\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svuint64x4_t, svuint64_t, z1,
+ svclamp_single_u64_x4 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.d - z[0-9]+\.d}, z23\.d, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svuint64x4_t, svuint64_t, z1,
+ svclamp_single_u64_x4 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z16_z5:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.d - z[0-9]+\.d}, z16\.d, z5\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z16_z5, svuint64x4_t, svuint64_t, z18,
+ svclamp_single_u64_x4 (z18, z16, z5),
+ svclamp (z18, z16, z5))
+
+/*
+** clamp_awkward:
+** ...
+** uclamp {z[0-9]+\.d - z[0-9]+\.d}, z[0-9]+\.d, z5\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svuint64x4_t, svuint64_t,
+ z0_res = svclamp_single_u64_x4 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u8_x2.c
new file mode 100644
index 0000000..27f6b1d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u8_x2.c
@@ -0,0 +1,94 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** uclamp {z24\.b - z25\.b}, z0\.b, z5\.b
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svuint8x2_t, svuint8_t, z24,
+ svclamp_single_u8_x2 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.b - z25\.b}, z5\.b, z7\.b
+** |
+** uclamp {z28\.b - z29\.b}, z5\.b, z7\.b
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svuint8x2_t, svuint8_t, z24,
+ svclamp_single_u8_x2 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** uclamp {z24\.b - z25\.b}, z7\.b, z16\.b
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svuint8x2_t, svuint8_t, z24,
+ svclamp_single_u8_x2 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** uclamp {z24\.b - z25\.b}, z16\.b, z23\.b
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svuint8x2_t, svuint8_t, z1,
+ svclamp_single_u8_x2 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.b - z[0-9]+\.b}, z23\.b, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svuint8x2_t, svuint8_t, z1,
+ svclamp_single_u8_x2 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z0_z23:
+** uclamp {z18\.b - z19\.b}, z0\.b, z23\.b
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z0_z23, svuint8x2_t, svuint8_t, z18,
+ svclamp_single_u8_x2 (z18, z0, z23),
+ svclamp (z18, z0, z23))
+
+/*
+** clamp_awkward:
+** ...
+** uclamp {z[0-9]+\.b - z[0-9]+\.b}, z[0-9]+\.b, z3\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svuint8x2_t, svuint8_t,
+ z0_res = svclamp_single_u8_x2 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u8_x4.c
new file mode 100644
index 0000000..1e04634
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/clamp_u8_x4.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** clamp_z24_z24_z0_z5:
+** uclamp {z24\.b - z27\.b}, z0\.b, z5\.b
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z24_z0_z5, svuint8x4_t, svuint8_t, z24,
+ svclamp_single_u8_x4 (z24, z0, z5),
+ svclamp (z24, z0, z5))
+
+/*
+** clamp_z24_z28_z5_z7:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.b - z27\.b}, z5\.b, z7\.b
+** |
+** uclamp {z28\.b - z31\.b}, z5\.b, z7\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z28_z5_z7, svuint8x4_t, svuint8_t, z24,
+ svclamp_single_u8_x4 (z28, z5, z7),
+ svclamp (z28, z5, z7))
+
+/*
+** clamp_z24_z1_z7_z16:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z24\.b - z27\.b}, z7\.b, z16\.b
+** ret
+*/
+TEST_XN_SINGLE (clamp_z24_z1_z7_z16, svuint8x4_t, svuint8_t, z24,
+ svclamp_single_u8_x4 (z1, z7, z16),
+ svclamp (z1, z7, z16))
+
+/*
+** clamp_z1_z24_z16_z23:
+** uclamp {z24\.b - z27\.b}, z16\.b, z23\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z24_z16_z23, svuint8x4_t, svuint8_t, z1,
+ svclamp_single_u8_x4 (z24, z16, z23),
+ svclamp (z24, z16, z23))
+
+/*
+** clamp_z1_z1_z23_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.b - z[0-9]+\.b}, z23\.b, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z1_z1_z23_z0, svuint8x4_t, svuint8_t, z1,
+ svclamp_single_u8_x4 (z1, z23, z0),
+ svclamp (z1, z23, z0))
+
+/*
+** clamp_z18_z18_z16_z5:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uclamp {z[0-9]+\.b - z[0-9]+\.b}, z16\.b, z5\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (clamp_z18_z18_z16_z5, svuint8x4_t, svuint8_t, z18,
+ svclamp_single_u8_x4 (z18, z16, z5),
+ svclamp (z18, z16, z5))
+
+/*
+** clamp_awkward:
+** ...
+** uclamp {z[0-9]+\.b - z[0-9]+\.b}, z[0-9]+\.b, z5\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (clamp_awkward, svuint8x4_t, svuint8_t,
+ z0_res = svclamp_single_u8_x4 (z1, z0, zn),
+ z0_res = svclamp (z1, z0, zn))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c16.c
new file mode 100644
index 0000000..2206206
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c16.c
@@ -0,0 +1,39 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cntp_x0_pn0_2:
+** cntp x0, pn0\.h, vlx2
+** ret
+*/
+TEST_COUNT_PN (cntp_x0_pn0_2,
+ x0 = svcntp_c16 (pn0, 2),
+ x0 = svcntp_c16 (pn0, 2))
+
+/*
+** cntp_x15_pn7_4:
+** cntp x15, pn7\.h, vlx4
+** ret
+*/
+TEST_COUNT_PN (cntp_x15_pn7_4,
+ x15 = svcntp_c16 (pn7, 4),
+ x15 = svcntp_c16 (pn7, 4))
+
+/*
+** cntp_x17_pn8_2:
+** cntp x17, pn8\.h, vlx2
+** ret
+*/
+TEST_COUNT_PN (cntp_x17_pn8_2,
+ x17 = svcntp_c16 (pn8, 2),
+ x17 = svcntp_c16 (pn8, 2))
+
+/*
+** cntp_x0_pn15_4:
+** cntp x0, pn15\.h, vlx4
+** ret
+*/
+TEST_COUNT_PN (cntp_x0_pn15_4,
+ x0 = svcntp_c16 (pn15, 4),
+ x0 = svcntp_c16 (pn15, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c32.c
new file mode 100644
index 0000000..86d15c6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c32.c
@@ -0,0 +1,39 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cntp_x0_pn0_2:
+** cntp x0, pn0\.s, vlx2
+** ret
+*/
+TEST_COUNT_PN (cntp_x0_pn0_2,
+ x0 = svcntp_c32 (pn0, 2),
+ x0 = svcntp_c32 (pn0, 2))
+
+/*
+** cntp_x15_pn7_4:
+** cntp x15, pn7\.s, vlx4
+** ret
+*/
+TEST_COUNT_PN (cntp_x15_pn7_4,
+ x15 = svcntp_c32 (pn7, 4),
+ x15 = svcntp_c32 (pn7, 4))
+
+/*
+** cntp_x17_pn8_2:
+** cntp x17, pn8\.s, vlx2
+** ret
+*/
+TEST_COUNT_PN (cntp_x17_pn8_2,
+ x17 = svcntp_c32 (pn8, 2),
+ x17 = svcntp_c32 (pn8, 2))
+
+/*
+** cntp_x0_pn15_4:
+** cntp x0, pn15\.s, vlx4
+** ret
+*/
+TEST_COUNT_PN (cntp_x0_pn15_4,
+ x0 = svcntp_c32 (pn15, 4),
+ x0 = svcntp_c32 (pn15, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c64.c
new file mode 100644
index 0000000..d56e676
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c64.c
@@ -0,0 +1,39 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cntp_x0_pn0_2:
+** cntp x0, pn0\.d, vlx2
+** ret
+*/
+TEST_COUNT_PN (cntp_x0_pn0_2,
+ x0 = svcntp_c64 (pn0, 2),
+ x0 = svcntp_c64 (pn0, 2))
+
+/*
+** cntp_x15_pn7_4:
+** cntp x15, pn7\.d, vlx4
+** ret
+*/
+TEST_COUNT_PN (cntp_x15_pn7_4,
+ x15 = svcntp_c64 (pn7, 4),
+ x15 = svcntp_c64 (pn7, 4))
+
+/*
+** cntp_x17_pn8_2:
+** cntp x17, pn8\.d, vlx2
+** ret
+*/
+TEST_COUNT_PN (cntp_x17_pn8_2,
+ x17 = svcntp_c64 (pn8, 2),
+ x17 = svcntp_c64 (pn8, 2))
+
+/*
+** cntp_x0_pn15_4:
+** cntp x0, pn15\.d, vlx4
+** ret
+*/
+TEST_COUNT_PN (cntp_x0_pn15_4,
+ x0 = svcntp_c64 (pn15, 4),
+ x0 = svcntp_c64 (pn15, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c8.c
new file mode 100644
index 0000000..35ce2d6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cntp_c8.c
@@ -0,0 +1,39 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cntp_x0_pn0_2:
+** cntp x0, pn0\.b, vlx2
+** ret
+*/
+TEST_COUNT_PN (cntp_x0_pn0_2,
+ x0 = svcntp_c8 (pn0, 2),
+ x0 = svcntp_c8 (pn0, 2))
+
+/*
+** cntp_x15_pn7_4:
+** cntp x15, pn7\.b, vlx4
+** ret
+*/
+TEST_COUNT_PN (cntp_x15_pn7_4,
+ x15 = svcntp_c8 (pn7, 4),
+ x15 = svcntp_c8 (pn7, 4))
+
+/*
+** cntp_x17_pn8_2:
+** cntp x17, pn8\.b, vlx2
+** ret
+*/
+TEST_COUNT_PN (cntp_x17_pn8_2,
+ x17 = svcntp_c8 (pn8, 2),
+ x17 = svcntp_c8 (pn8, 2))
+
+/*
+** cntp_x0_pn15_4:
+** cntp x0, pn15\.b, vlx4
+** ret
+*/
+TEST_COUNT_PN (cntp_x0_pn15_4,
+ x0 = svcntp_c8 (pn15, 4),
+ x0 = svcntp_c8 (pn15, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_bf16_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_bf16_f32_x2.c
new file mode 100644
index 0000000..639991a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_bf16_f32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvt_z0_z0:
+** bfcvt z0\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (cvt_z0_z0, svfloat32x2_t, svbfloat16_t,
+ z0_res = svcvt_bf16_f32_x2 (z0),
+ z0_res = svcvt_bf16 (z0))
+
+/*
+** cvt_z0_z6:
+** bfcvt z0\.h, {z6\.s - z7\.s}
+** ret
+*/
+TEST_X2_NARROW (cvt_z0_z6, svfloat32x2_t, svbfloat16_t,
+ z0_res = svcvt_bf16_f32_x2 (z6),
+ z0_res = svcvt_bf16 (z6))
+
+/*
+** cvt_z0_z29:
+** mov [^\n]+
+** mov [^\n]+
+** bfcvt z0\.h, [^\n]+
+** ret
+*/
+TEST_X2_NARROW (cvt_z0_z29, svfloat32x2_t, svbfloat16_t,
+ z0_res = svcvt_bf16_f32_x2 (z29),
+ z0_res = svcvt_bf16 (z29))
+
+/*
+** cvt_z5_z0:
+** bfcvt z5\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (cvt_z5_z0, svfloat32x2_t, svbfloat16_t,
+ z5 = svcvt_bf16_f32_x2 (z0),
+ z5 = svcvt_bf16 (z0))
+
+/*
+** cvt_z22_z16:
+** bfcvt z22\.h, {z16\.s - z17\.s}
+** ret
+*/
+TEST_X2_NARROW (cvt_z22_z16, svfloat32x2_t, svbfloat16_t,
+ z22 = svcvt_bf16_f32_x2 (z16),
+ z22 = svcvt_bf16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f16_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f16_f32_x2.c
new file mode 100644
index 0000000..35f8c1c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f16_f32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvt_z0_z0:
+** fcvt z0\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (cvt_z0_z0, svfloat32x2_t, svfloat16_t,
+ z0_res = svcvt_f16_f32_x2 (z0),
+ z0_res = svcvt_f16 (z0))
+
+/*
+** cvt_z0_z6:
+** fcvt z0\.h, {z6\.s - z7\.s}
+** ret
+*/
+TEST_X2_NARROW (cvt_z0_z6, svfloat32x2_t, svfloat16_t,
+ z0_res = svcvt_f16_f32_x2 (z6),
+ z0_res = svcvt_f16 (z6))
+
+/*
+** cvt_z0_z29:
+** mov [^\n]+
+** mov [^\n]+
+** fcvt z0\.h, [^\n]+
+** ret
+*/
+TEST_X2_NARROW (cvt_z0_z29, svfloat32x2_t, svfloat16_t,
+ z0_res = svcvt_f16_f32_x2 (z29),
+ z0_res = svcvt_f16 (z29))
+
+/*
+** cvt_z5_z0:
+** fcvt z5\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (cvt_z5_z0, svfloat32x2_t, svfloat16_t,
+ z5 = svcvt_f16_f32_x2 (z0),
+ z5 = svcvt_f16 (z0))
+
+/*
+** cvt_z22_z16:
+** fcvt z22\.h, {z16\.s - z17\.s}
+** ret
+*/
+TEST_X2_NARROW (cvt_z22_z16, svfloat32x2_t, svfloat16_t,
+ z22 = svcvt_f16_f32_x2 (z16),
+ z22 = svcvt_f16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_s32_x2.c
new file mode 100644
index 0000000..3e39512
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_s32_x2.c
@@ -0,0 +1,43 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvt_z0_z4:
+** scvtf {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z0_z4, svfloat32x2_t, svint32x2_t, z0,
+ svcvt_f32_s32_x2 (z4),
+ svcvt_f32 (z4))
+
+/*
+** cvt_z4_z0:
+** scvtf {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z0, svint32x2_t, svfloat32x2_t, z4,
+ svcvt_f32_s32_x2 (z0),
+ svcvt_f32 (z0))
+
+/*
+** cvt_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** scvtf {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z18_z23, svfloat32x2_t, svint32x2_t, z18,
+ svcvt_f32_s32_x2 (z23),
+ svcvt_f32 (z23))
+
+/*
+** cvt_z23_z28:
+** scvtf [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z28, svint32x2_t, svfloat32x2_t, z23,
+ svcvt_f32_s32_x2 (z28),
+ svcvt_f32 (z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_s32_x4.c
new file mode 100644
index 0000000..ae3d582
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_s32_x4.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvt_z0_z4:
+** scvtf {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z0_z4, svfloat32x4_t, svint32x4_t, z0,
+ svcvt_f32_s32_x4 (z4),
+ svcvt_f32 (z4))
+
+/*
+** cvt_z4_z0:
+** scvtf {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z0, svint32x4_t, svfloat32x4_t, z4,
+ svcvt_f32_s32_x4 (z0),
+ svcvt_f32 (z0))
+
+/*
+** cvt_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** scvtf {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z18, svint32x4_t, svfloat32x4_t, z4,
+ svcvt_f32_s32_x4 (z18),
+ svcvt_f32 (z18))
+
+/*
+** cvt_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** scvtf {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z28_z23, svfloat32x4_t, svint32x4_t, z28,
+ svcvt_f32_s32_x4 (z23),
+ svcvt_f32 (z23))
+
+/*
+** cvt_z23_z28:
+** scvtf [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z28, svint32x4_t, svfloat32x4_t, z23,
+ svcvt_f32_s32_x4 (z28),
+ svcvt_f32 (z28))
+
+/*
+** cvt_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** scvtf {z[^\n]+}, {z.*}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z18, svint32x4_t, svfloat32x4_t, z23,
+ svcvt_f32_s32_x4 (z18),
+ svcvt_f32 (z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_u32_x2.c
new file mode 100644
index 0000000..da23d1f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_u32_x2.c
@@ -0,0 +1,43 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvt_z0_z4:
+** ucvtf {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z0_z4, svfloat32x2_t, svuint32x2_t, z0,
+ svcvt_f32_u32_x2 (z4),
+ svcvt_f32 (z4))
+
+/*
+** cvt_z4_z0:
+** ucvtf {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z0, svuint32x2_t, svfloat32x2_t, z4,
+ svcvt_f32_u32_x2 (z0),
+ svcvt_f32 (z0))
+
+/*
+** cvt_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** ucvtf {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z18_z23, svfloat32x2_t, svuint32x2_t, z18,
+ svcvt_f32_u32_x2 (z23),
+ svcvt_f32 (z23))
+
+/*
+** cvt_z23_z28:
+** ucvtf [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z28, svuint32x2_t, svfloat32x2_t, z23,
+ svcvt_f32_u32_x2 (z28),
+ svcvt_f32 (z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_u32_x4.c
new file mode 100644
index 0000000..6302981
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_f32_u32_x4.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvt_z0_z4:
+** ucvtf {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z0_z4, svfloat32x4_t, svuint32x4_t, z0,
+ svcvt_f32_u32_x4 (z4),
+ svcvt_f32 (z4))
+
+/*
+** cvt_z4_z0:
+** ucvtf {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z0, svuint32x4_t, svfloat32x4_t, z4,
+ svcvt_f32_u32_x4 (z0),
+ svcvt_f32 (z0))
+
+/*
+** cvt_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ucvtf {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z18, svuint32x4_t, svfloat32x4_t, z4,
+ svcvt_f32_u32_x4 (z18),
+ svcvt_f32 (z18))
+
+/*
+** cvt_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ucvtf {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z28_z23, svfloat32x4_t, svuint32x4_t, z28,
+ svcvt_f32_u32_x4 (z23),
+ svcvt_f32 (z23))
+
+/*
+** cvt_z23_z28:
+** ucvtf [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z28, svuint32x4_t, svfloat32x4_t, z23,
+ svcvt_f32_u32_x4 (z28),
+ svcvt_f32 (z28))
+
+/*
+** cvt_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ucvtf {z[^\n]+}, {z.*}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z18, svuint32x4_t, svfloat32x4_t, z23,
+ svcvt_f32_u32_x4 (z18),
+ svcvt_f32 (z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_s32_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_s32_f32_x2.c
new file mode 100644
index 0000000..935d7db
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_s32_f32_x2.c
@@ -0,0 +1,43 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvt_z0_z4:
+** fcvtzs {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z0_z4, svint32x2_t, svfloat32x2_t, z0,
+ svcvt_s32_f32_x2 (z4),
+ svcvt_s32 (z4))
+
+/*
+** cvt_z4_z0:
+** fcvtzs {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z0, svfloat32x2_t, svint32x2_t, z4,
+ svcvt_s32_f32_x2 (z0),
+ svcvt_s32 (z0))
+
+/*
+** cvt_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** fcvtzs {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z18_z23, svint32x2_t, svfloat32x2_t, z18,
+ svcvt_s32_f32_x2 (z23),
+ svcvt_s32 (z23))
+
+/*
+** cvt_z23_z28:
+** fcvtzs [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z28, svfloat32x2_t, svint32x2_t, z23,
+ svcvt_s32_f32_x2 (z28),
+ svcvt_s32 (z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_s32_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_s32_f32_x4.c
new file mode 100644
index 0000000..45b90d5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_s32_f32_x4.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvt_z0_z4:
+** fcvtzs {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z0_z4, svint32x4_t, svfloat32x4_t, z0,
+ svcvt_s32_f32_x4 (z4),
+ svcvt_s32 (z4))
+
+/*
+** cvt_z4_z0:
+** fcvtzs {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z0, svfloat32x4_t, svint32x4_t, z4,
+ svcvt_s32_f32_x4 (z0),
+ svcvt_s32 (z0))
+
+/*
+** cvt_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fcvtzs {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z18, svfloat32x4_t, svint32x4_t, z4,
+ svcvt_s32_f32_x4 (z18),
+ svcvt_s32 (z18))
+
+/*
+** cvt_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fcvtzs {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z28_z23, svint32x4_t, svfloat32x4_t, z28,
+ svcvt_s32_f32_x4 (z23),
+ svcvt_s32 (z23))
+
+/*
+** cvt_z23_z28:
+** fcvtzs [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z28, svfloat32x4_t, svint32x4_t, z23,
+ svcvt_s32_f32_x4 (z28),
+ svcvt_s32 (z28))
+
+/*
+** cvt_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fcvtzs {z[^\n]+}, {z.*}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z18, svfloat32x4_t, svint32x4_t, z23,
+ svcvt_s32_f32_x4 (z18),
+ svcvt_s32 (z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_u32_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_u32_f32_x2.c
new file mode 100644
index 0000000..ad57a78
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_u32_f32_x2.c
@@ -0,0 +1,43 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvt_z0_z4:
+** fcvtzu {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z0_z4, svuint32x2_t, svfloat32x2_t, z0,
+ svcvt_u32_f32_x2 (z4),
+ svcvt_u32 (z4))
+
+/*
+** cvt_z4_z0:
+** fcvtzu {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z0, svfloat32x2_t, svuint32x2_t, z4,
+ svcvt_u32_f32_x2 (z0),
+ svcvt_u32 (z0))
+
+/*
+** cvt_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** fcvtzu {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z18_z23, svuint32x2_t, svfloat32x2_t, z18,
+ svcvt_u32_f32_x2 (z23),
+ svcvt_u32 (z23))
+
+/*
+** cvt_z23_z28:
+** fcvtzu [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z28, svfloat32x2_t, svuint32x2_t, z23,
+ svcvt_u32_f32_x2 (z28),
+ svcvt_u32 (z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_u32_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_u32_f32_x4.c
new file mode 100644
index 0000000..29a140c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvt_u32_f32_x4.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvt_z0_z4:
+** fcvtzu {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z0_z4, svuint32x4_t, svfloat32x4_t, z0,
+ svcvt_u32_f32_x4 (z4),
+ svcvt_u32 (z4))
+
+/*
+** cvt_z4_z0:
+** fcvtzu {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z0, svfloat32x4_t, svuint32x4_t, z4,
+ svcvt_u32_f32_x4 (z0),
+ svcvt_u32 (z0))
+
+/*
+** cvt_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fcvtzu {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z4_z18, svfloat32x4_t, svuint32x4_t, z4,
+ svcvt_u32_f32_x4 (z18),
+ svcvt_u32 (z18))
+
+/*
+** cvt_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fcvtzu {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z28_z23, svuint32x4_t, svfloat32x4_t, z28,
+ svcvt_u32_f32_x4 (z23),
+ svcvt_u32 (z23))
+
+/*
+** cvt_z23_z28:
+** fcvtzu [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z28, svfloat32x4_t, svuint32x4_t, z23,
+ svcvt_u32_f32_x4 (z28),
+ svcvt_u32 (z28))
+
+/*
+** cvt_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fcvtzu {z[^\n]+}, {z.*}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (cvt_z23_z18, svfloat32x4_t, svuint32x4_t, z23,
+ svcvt_u32_f32_x4 (z18),
+ svcvt_u32 (z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvtn_bf16_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvtn_bf16_f32_x2.c
new file mode 100644
index 0000000..8974bed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvtn_bf16_f32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvtn_z0_z0:
+** bfcvtn z0\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (cvtn_z0_z0, svfloat32x2_t, svbfloat16_t,
+ z0_res = svcvtn_bf16_f32_x2 (z0),
+ z0_res = svcvtn_bf16 (z0))
+
+/*
+** cvtn_z0_z6:
+** bfcvtn z0\.h, {z6\.s - z7\.s}
+** ret
+*/
+TEST_X2_NARROW (cvtn_z0_z6, svfloat32x2_t, svbfloat16_t,
+ z0_res = svcvtn_bf16_f32_x2 (z6),
+ z0_res = svcvtn_bf16 (z6))
+
+/*
+** cvtn_z0_z29:
+** mov [^\n]+
+** mov [^\n]+
+** bfcvtn z0\.h, [^\n]+
+** ret
+*/
+TEST_X2_NARROW (cvtn_z0_z29, svfloat32x2_t, svbfloat16_t,
+ z0_res = svcvtn_bf16_f32_x2 (z29),
+ z0_res = svcvtn_bf16 (z29))
+
+/*
+** cvtn_z5_z0:
+** bfcvtn z5\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (cvtn_z5_z0, svfloat32x2_t, svbfloat16_t,
+ z5 = svcvtn_bf16_f32_x2 (z0),
+ z5 = svcvtn_bf16 (z0))
+
+/*
+** cvtn_z22_z16:
+** bfcvtn z22\.h, {z16\.s - z17\.s}
+** ret
+*/
+TEST_X2_NARROW (cvtn_z22_z16, svfloat32x2_t, svbfloat16_t,
+ z22 = svcvtn_bf16_f32_x2 (z16),
+ z22 = svcvtn_bf16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvtn_f16_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvtn_f16_f32_x2.c
new file mode 100644
index 0000000..6693d38
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/cvtn_f16_f32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** cvtn_z0_z0:
+** fcvtn z0\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (cvtn_z0_z0, svfloat32x2_t, svfloat16_t,
+ z0_res = svcvtn_f16_f32_x2 (z0),
+ z0_res = svcvtn_f16 (z0))
+
+/*
+** cvtn_z0_z6:
+** fcvtn z0\.h, {z6\.s - z7\.s}
+** ret
+*/
+TEST_X2_NARROW (cvtn_z0_z6, svfloat32x2_t, svfloat16_t,
+ z0_res = svcvtn_f16_f32_x2 (z6),
+ z0_res = svcvtn_f16 (z6))
+
+/*
+** cvtn_z0_z29:
+** mov [^\n]+
+** mov [^\n]+
+** fcvtn z0\.h, [^\n]+
+** ret
+*/
+TEST_X2_NARROW (cvtn_z0_z29, svfloat32x2_t, svfloat16_t,
+ z0_res = svcvtn_f16_f32_x2 (z29),
+ z0_res = svcvtn_f16 (z29))
+
+/*
+** cvtn_z5_z0:
+** fcvtn z5\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (cvtn_z5_z0, svfloat32x2_t, svfloat16_t,
+ z5 = svcvtn_f16_f32_x2 (z0),
+ z5 = svcvtn_f16 (z0))
+
+/*
+** cvtn_z22_z16:
+** fcvtn z22\.h, {z16\.s - z17\.s}
+** ret
+*/
+TEST_X2_NARROW (cvtn_z22_z16, svfloat32x2_t, svfloat16_t,
+ z22 = svcvtn_f16_f32_x2 (z16),
+ z22 = svcvtn_f16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_f32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_f32.c
new file mode 100644
index 0000000..815aada
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_f32.c
@@ -0,0 +1,44 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_f32_tied1:
+** fdot z0\.s, z4\.h, z5\.h
+** ret
+*/
+TEST_DUAL_Z (dot_f32_tied1, svfloat32_t, svfloat16_t,
+ z0 = svdot_f32_f16 (z0, z4, z5),
+ z0 = svdot (z0, z4, z5))
+
+/*
+** dot_f32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** fdot z0\.s, \1\.h, z1\.h
+** ret
+*/
+TEST_DUAL_Z_REV (dot_f32_tied2, svfloat32_t, svfloat16_t,
+ z0_res = svdot_f32_f16 (z4, z0, z1),
+ z0_res = svdot (z4, z0, z1))
+
+/*
+** dot_f32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** fdot z0\.s, z1\.h, \1\.h
+** ret
+*/
+TEST_DUAL_Z_REV (dot_f32_tied3, svfloat32_t, svfloat16_t,
+ z0_res = svdot_f32_f16 (z4, z1, z0),
+ z0_res = svdot (z4, z1, z0))
+
+/*
+** dot_f32_untied:
+** movprfx z0, z1
+** fdot z0\.s, z4\.h, z5\.h
+** ret
+*/
+TEST_DUAL_Z (dot_f32_untied, svfloat32_t, svfloat16_t,
+ z0 = svdot_f32_f16 (z1, z4, z5),
+ z0 = svdot (z1, z4, z5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_f32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_f32.c
new file mode 100644
index 0000000..263b21e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_f32.c
@@ -0,0 +1,93 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_f32_tied1:
+** fdot z0\.s, z4\.h, z5\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_0_f32_tied1, svfloat32_t, svfloat16_t,
+ z0 = svdot_lane_f32_f16 (z0, z4, z5, 0),
+ z0 = svdot_lane (z0, z4, z5, 0))
+
+/*
+** dot_lane_0_f32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** fdot z0\.s, \1\.h, z1\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z_REV (dot_lane_0_f32_tied2, svfloat32_t, svfloat16_t,
+ z0_res = svdot_lane_f32_f16 (z4, z0, z1, 0),
+ z0_res = svdot_lane (z4, z0, z1, 0))
+
+/*
+** dot_lane_0_f32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** fdot z0\.s, z1\.h, \1\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z_REV (dot_lane_0_f32_tied3, svfloat32_t, svfloat16_t,
+ z0_res = svdot_lane_f32_f16 (z4, z1, z0, 0),
+ z0_res = svdot_lane (z4, z1, z0, 0))
+
+/*
+** dot_lane_0_f32_untied:
+** movprfx z0, z1
+** fdot z0\.s, z4\.h, z5\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_0_f32_untied, svfloat32_t, svfloat16_t,
+ z0 = svdot_lane_f32_f16 (z1, z4, z5, 0),
+ z0 = svdot_lane (z1, z4, z5, 0))
+
+/*
+** dot_lane_1_f32:
+** fdot z0\.s, z4\.h, z5\.h\[1\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_1_f32, svfloat32_t, svfloat16_t,
+ z0 = svdot_lane_f32_f16 (z0, z4, z5, 1),
+ z0 = svdot_lane (z0, z4, z5, 1))
+
+/*
+** dot_lane_2_f32:
+** fdot z0\.s, z4\.h, z5\.h\[2\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_2_f32, svfloat32_t, svfloat16_t,
+ z0 = svdot_lane_f32_f16 (z0, z4, z5, 2),
+ z0 = svdot_lane (z0, z4, z5, 2))
+
+/*
+** dot_lane_3_f32:
+** fdot z0\.s, z4\.h, z5\.h\[3\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_3_f32, svfloat32_t, svfloat16_t,
+ z0 = svdot_lane_f32_f16 (z0, z4, z5, 3),
+ z0 = svdot_lane (z0, z4, z5, 3))
+
+/*
+** dot_lane_z8_f32:
+** str d8, \[sp, -16\]!
+** mov (z[0-7])\.d, z8\.d
+** fdot z0\.s, z1\.h, \1\.h\[1\]
+** ldr d8, \[sp\], 16
+** ret
+*/
+TEST_DUAL_LANE_REG (dot_lane_z8_f32, svfloat32_t, svfloat16_t, z8,
+ z0 = svdot_lane_f32_f16 (z0, z1, z8, 1),
+ z0 = svdot_lane (z0, z1, z8, 1))
+
+/*
+** dot_lane_z16_f32:
+** mov (z[0-7])\.d, z16\.d
+** fdot z0\.s, z1\.h, \1\.h\[1\]
+** ret
+*/
+TEST_DUAL_LANE_REG (dot_lane_z16_f32, svfloat32_t, svfloat16_t, z16,
+ z0 = svdot_lane_f32_f16 (z0, z1, z16, 1),
+ z0 = svdot_lane (z0, z1, z16, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_s32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_s32.c
new file mode 100644
index 0000000..58abbea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_s32.c
@@ -0,0 +1,93 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_s32_tied1:
+** sdot z0\.s, z4\.h, z5\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_0_s32_tied1, svint32_t, svint16_t,
+ z0 = svdot_lane_s32_s16 (z0, z4, z5, 0),
+ z0 = svdot_lane (z0, z4, z5, 0))
+
+/*
+** dot_lane_0_s32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** sdot z0\.s, \1\.h, z1\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z_REV (dot_lane_0_s32_tied2, svint32_t, svint16_t,
+ z0_res = svdot_lane_s32_s16 (z4, z0, z1, 0),
+ z0_res = svdot_lane (z4, z0, z1, 0))
+
+/*
+** dot_lane_0_s32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** sdot z0\.s, z1\.h, \1\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z_REV (dot_lane_0_s32_tied3, svint32_t, svint16_t,
+ z0_res = svdot_lane_s32_s16 (z4, z1, z0, 0),
+ z0_res = svdot_lane (z4, z1, z0, 0))
+
+/*
+** dot_lane_0_s32_untied:
+** movprfx z0, z1
+** sdot z0\.s, z4\.h, z5\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_0_s32_untied, svint32_t, svint16_t,
+ z0 = svdot_lane_s32_s16 (z1, z4, z5, 0),
+ z0 = svdot_lane (z1, z4, z5, 0))
+
+/*
+** dot_lane_1_s32:
+** sdot z0\.s, z4\.h, z5\.h\[1\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_1_s32, svint32_t, svint16_t,
+ z0 = svdot_lane_s32_s16 (z0, z4, z5, 1),
+ z0 = svdot_lane (z0, z4, z5, 1))
+
+/*
+** dot_lane_2_s32:
+** sdot z0\.s, z4\.h, z5\.h\[2\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_2_s32, svint32_t, svint16_t,
+ z0 = svdot_lane_s32_s16 (z0, z4, z5, 2),
+ z0 = svdot_lane (z0, z4, z5, 2))
+
+/*
+** dot_lane_3_s32:
+** sdot z0\.s, z4\.h, z5\.h\[3\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_3_s32, svint32_t, svint16_t,
+ z0 = svdot_lane_s32_s16 (z0, z4, z5, 3),
+ z0 = svdot_lane (z0, z4, z5, 3))
+
+/*
+** dot_lane_z8_s32:
+** str d8, \[sp, -16\]!
+** mov (z[0-7])\.d, z8\.d
+** sdot z0\.s, z1\.h, \1\.h\[1\]
+** ldr d8, \[sp\], 16
+** ret
+*/
+TEST_DUAL_LANE_REG (dot_lane_z8_s32, svint32_t, svint16_t, z8,
+ z0 = svdot_lane_s32_s16 (z0, z1, z8, 1),
+ z0 = svdot_lane (z0, z1, z8, 1))
+
+/*
+** dot_lane_z16_s32:
+** mov (z[0-7])\.d, z16\.d
+** sdot z0\.s, z1\.h, \1\.h\[1\]
+** ret
+*/
+TEST_DUAL_LANE_REG (dot_lane_z16_s32, svint32_t, svint16_t, z16,
+ z0 = svdot_lane_s32_s16 (z0, z1, z16, 1),
+ z0 = svdot_lane (z0, z1, z16, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_u32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_u32.c
new file mode 100644
index 0000000..2cf9a14
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_u32.c
@@ -0,0 +1,93 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_u32_tied1:
+** udot z0\.s, z4\.h, z5\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_0_u32_tied1, svuint32_t, svuint16_t,
+ z0 = svdot_lane_u32_u16 (z0, z4, z5, 0),
+ z0 = svdot_lane (z0, z4, z5, 0))
+
+/*
+** dot_lane_0_u32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** udot z0\.s, \1\.h, z1\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z_REV (dot_lane_0_u32_tied2, svuint32_t, svuint16_t,
+ z0_res = svdot_lane_u32_u16 (z4, z0, z1, 0),
+ z0_res = svdot_lane (z4, z0, z1, 0))
+
+/*
+** dot_lane_0_u32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** udot z0\.s, z1\.h, \1\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z_REV (dot_lane_0_u32_tied3, svuint32_t, svuint16_t,
+ z0_res = svdot_lane_u32_u16 (z4, z1, z0, 0),
+ z0_res = svdot_lane (z4, z1, z0, 0))
+
+/*
+** dot_lane_0_u32_untied:
+** movprfx z0, z1
+** udot z0\.s, z4\.h, z5\.h\[0\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_0_u32_untied, svuint32_t, svuint16_t,
+ z0 = svdot_lane_u32_u16 (z1, z4, z5, 0),
+ z0 = svdot_lane (z1, z4, z5, 0))
+
+/*
+** dot_lane_1_u32:
+** udot z0\.s, z4\.h, z5\.h\[1\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_1_u32, svuint32_t, svuint16_t,
+ z0 = svdot_lane_u32_u16 (z0, z4, z5, 1),
+ z0 = svdot_lane (z0, z4, z5, 1))
+
+/*
+** dot_lane_2_u32:
+** udot z0\.s, z4\.h, z5\.h\[2\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_2_u32, svuint32_t, svuint16_t,
+ z0 = svdot_lane_u32_u16 (z0, z4, z5, 2),
+ z0 = svdot_lane (z0, z4, z5, 2))
+
+/*
+** dot_lane_3_u32:
+** udot z0\.s, z4\.h, z5\.h\[3\]
+** ret
+*/
+TEST_DUAL_Z (dot_lane_3_u32, svuint32_t, svuint16_t,
+ z0 = svdot_lane_u32_u16 (z0, z4, z5, 3),
+ z0 = svdot_lane (z0, z4, z5, 3))
+
+/*
+** dot_lane_z8_u32:
+** str d8, \[sp, -16\]!
+** mov (z[0-7])\.d, z8\.d
+** udot z0\.s, z1\.h, \1\.h\[1\]
+** ldr d8, \[sp\], 16
+** ret
+*/
+TEST_DUAL_LANE_REG (dot_lane_z8_u32, svuint32_t, svuint16_t, z8,
+ z0 = svdot_lane_u32_u16 (z0, z1, z8, 1),
+ z0 = svdot_lane (z0, z1, z8, 1))
+
+/*
+** dot_lane_z16_u32:
+** mov (z[0-7])\.d, z16\.d
+** udot z0\.s, z1\.h, \1\.h\[1\]
+** ret
+*/
+TEST_DUAL_LANE_REG (dot_lane_z16_u32, svuint32_t, svuint16_t, z16,
+ z0 = svdot_lane_u32_u16 (z0, z1, z16, 1),
+ z0 = svdot_lane (z0, z1, z16, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_bf16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_bf16_vg1x2.c
new file mode 100644
index 0000000..a452c48
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_bf16_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** bfdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svbfloat16x2_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x2 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** bfdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svbfloat16x2_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x2 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** bfdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svbfloat16x2_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x2 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** bfdot za\.s\[w8, 7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svbfloat16x2_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x2 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** bfdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svbfloat16x2_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x2 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svbfloat16x2_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x2 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** bfdot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svbfloat16x2_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x2 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** bfdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svbfloat16x2_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x2 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** bfdot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svbfloat16x2_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x2 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** bfdot za\.s\[w8, 0, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svbfloat16x2_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x2 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_bf16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_bf16_vg1x4.c
new file mode 100644
index 0000000..a6632a2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_bf16_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** bfdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svbfloat16x4_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x4 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** bfdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svbfloat16x4_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x4 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** bfdot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svbfloat16x4_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x4 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** bfdot za\.s\[w8, 7, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svbfloat16x4_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x4 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** bfdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svbfloat16x4_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x4 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svbfloat16x4_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x4 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** bfdot za\.s\[w8, 0, vgx4\], {z4\.h - z7\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svbfloat16x4_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x4 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** bfdot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svbfloat16x4_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x4 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** bfdot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svbfloat16x4_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x4 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** bfdot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svbfloat16x4_t, svbfloat16_t,
+ svdot_lane_za32_bf16_vg1x4 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_f16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_f16_vg1x2.c
new file mode 100644
index 0000000..5f27d5f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_f16_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svfloat16x2_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x2 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svfloat16x2_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x2 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** fdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svfloat16x2_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x2 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** fdot za\.s\[w8, 7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svfloat16x2_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x2 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** fdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svfloat16x2_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x2 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** fdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svfloat16x2_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x2 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** fdot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svfloat16x2_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x2 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** fdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svfloat16x2_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x2 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** fdot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svfloat16x2_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x2 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** fdot za\.s\[w8, 0, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svfloat16x2_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x2 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_f16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_f16_vg1x4.c
new file mode 100644
index 0000000..fd9eb56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_f16_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svfloat16x4_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x4 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svfloat16x4_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x4 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** fdot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svfloat16x4_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x4 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** fdot za\.s\[w8, 7, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svfloat16x4_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x4 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** fdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svfloat16x4_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x4 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** fdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svfloat16x4_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x4 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** fdot za\.s\[w8, 0, vgx4\], {z4\.h - z7\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svfloat16x4_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x4 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** fdot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svfloat16x4_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x4 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fdot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svfloat16x4_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x4 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fdot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svfloat16x4_t, svfloat16_t,
+ svdot_lane_za32_f16_vg1x4 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s16_vg1x2.c
new file mode 100644
index 0000000..f7b0861
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s16_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svint16x2_t, svint16_t,
+ svdot_lane_za32_s16_vg1x2 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svint16x2_t, svint16_t,
+ svdot_lane_za32_s16_vg1x2 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** sdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svint16x2_t, svint16_t,
+ svdot_lane_za32_s16_vg1x2 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** sdot za\.s\[w8, 7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svint16x2_t, svint16_t,
+ svdot_lane_za32_s16_vg1x2 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svint16x2_t, svint16_t,
+ svdot_lane_za32_s16_vg1x2 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svint16x2_t, svint16_t,
+ svdot_lane_za32_s16_vg1x2 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** sdot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svint16x2_t, svint16_t,
+ svdot_lane_za32_s16_vg1x2 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** sdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svint16x2_t, svint16_t,
+ svdot_lane_za32_s16_vg1x2 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** sdot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svint16x2_t, svint16_t,
+ svdot_lane_za32_s16_vg1x2 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** sdot za\.s\[w8, 0, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svint16x2_t, svint16_t,
+ svdot_lane_za32_s16_vg1x2 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s16_vg1x4.c
new file mode 100644
index 0000000..240b633
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s16_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svint16x4_t, svint16_t,
+ svdot_lane_za32_s16_vg1x4 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svint16x4_t, svint16_t,
+ svdot_lane_za32_s16_vg1x4 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** sdot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svint16x4_t, svint16_t,
+ svdot_lane_za32_s16_vg1x4 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** sdot za\.s\[w8, 7, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svint16x4_t, svint16_t,
+ svdot_lane_za32_s16_vg1x4 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svint16x4_t, svint16_t,
+ svdot_lane_za32_s16_vg1x4 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svint16x4_t, svint16_t,
+ svdot_lane_za32_s16_vg1x4 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** sdot za\.s\[w8, 0, vgx4\], {z4\.h - z7\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svint16x4_t, svint16_t,
+ svdot_lane_za32_s16_vg1x4 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** sdot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svint16x4_t, svint16_t,
+ svdot_lane_za32_s16_vg1x4 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sdot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svint16x4_t, svint16_t,
+ svdot_lane_za32_s16_vg1x4 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sdot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svint16x4_t, svint16_t,
+ svdot_lane_za32_s16_vg1x4 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s8_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s8_vg1x2.c
new file mode 100644
index 0000000..623756c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s8_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svint8x2_t, svint8_t,
+ svdot_lane_za32_s8_vg1x2 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svint8x2_t, svint8_t,
+ svdot_lane_za32_s8_vg1x2 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** sdot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svint8x2_t, svint8_t,
+ svdot_lane_za32_s8_vg1x2 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** sdot za\.s\[w8, 7, vgx2\], {z0\.b - z1\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svint8x2_t, svint8_t,
+ svdot_lane_za32_s8_vg1x2 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svint8x2_t, svint8_t,
+ svdot_lane_za32_s8_vg1x2 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svint8x2_t, svint8_t,
+ svdot_lane_za32_s8_vg1x2 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** sdot za\.s\[w8, 0, vgx2\], {z4\.b - z5\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svint8x2_t, svint8_t,
+ svdot_lane_za32_s8_vg1x2 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** sdot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svint8x2_t, svint8_t,
+ svdot_lane_za32_s8_vg1x2 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** sdot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svint8x2_t, svint8_t,
+ svdot_lane_za32_s8_vg1x2 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** sdot za\.s\[w8, 0, vgx2\], {z22\.b - z23\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svint8x2_t, svint8_t,
+ svdot_lane_za32_s8_vg1x2 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s8_vg1x4.c
new file mode 100644
index 0000000..b775026
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_s8_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svint8x4_t, svint8_t,
+ svdot_lane_za32_s8_vg1x4 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svint8x4_t, svint8_t,
+ svdot_lane_za32_s8_vg1x4 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** sdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svint8x4_t, svint8_t,
+ svdot_lane_za32_s8_vg1x4 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** sdot za\.s\[w8, 7, vgx4\], {z0\.b - z3\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svint8x4_t, svint8_t,
+ svdot_lane_za32_s8_vg1x4 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svint8x4_t, svint8_t,
+ svdot_lane_za32_s8_vg1x4 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svint8x4_t, svint8_t,
+ svdot_lane_za32_s8_vg1x4 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** sdot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svint8x4_t, svint8_t,
+ svdot_lane_za32_s8_vg1x4 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** sdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svint8x4_t, svint8_t,
+ svdot_lane_za32_s8_vg1x4 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sdot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svint8x4_t, svint8_t,
+ svdot_lane_za32_s8_vg1x4 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sdot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svint8x4_t, svint8_t,
+ svdot_lane_za32_s8_vg1x4 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u16_vg1x2.c
new file mode 100644
index 0000000..b0e9550
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u16_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svuint16x2_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x2 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svuint16x2_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x2 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** udot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svuint16x2_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x2 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** udot za\.s\[w8, 7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svuint16x2_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x2 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svuint16x2_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x2 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svuint16x2_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x2 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** udot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svuint16x2_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x2 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** udot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svuint16x2_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x2 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** udot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svuint16x2_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x2 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** udot za\.s\[w8, 0, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svuint16x2_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x2 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u16_vg1x4.c
new file mode 100644
index 0000000..87a7476
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u16_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svuint16x4_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x4 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svuint16x4_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x4 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** udot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svuint16x4_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x4 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** udot za\.s\[w8, 7, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svuint16x4_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x4 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svuint16x4_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x4 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svuint16x4_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x4 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** udot za\.s\[w8, 0, vgx4\], {z4\.h - z7\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svuint16x4_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x4 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** udot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svuint16x4_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x4 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** udot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svuint16x4_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x4 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** udot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svuint16x4_t, svuint16_t,
+ svdot_lane_za32_u16_vg1x4 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u8_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u8_vg1x2.c
new file mode 100644
index 0000000..c3374b4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u8_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svuint8x2_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x2 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svuint8x2_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x2 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** udot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svuint8x2_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x2 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** udot za\.s\[w8, 7, vgx2\], {z0\.b - z1\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svuint8x2_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x2 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svuint8x2_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x2 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svuint8x2_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x2 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** udot za\.s\[w8, 0, vgx2\], {z4\.b - z5\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svuint8x2_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x2 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** udot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svuint8x2_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x2 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** udot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svuint8x2_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x2 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** udot za\.s\[w8, 0, vgx2\], {z22\.b - z23\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svuint8x2_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x2 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u8_vg1x4.c
new file mode 100644
index 0000000..0d51813
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za32_u8_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svuint8x4_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x4 (0, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svuint8x4_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x4 (w0, z0, z7, 1),
+ svdot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_2:
+** udot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_2, svuint8x4_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x4 (w8, z28, z4, 2),
+ svdot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** dot_lane_w8p7_z0_z4_3:
+** udot za\.s\[w8, 7, vgx4\], {z0\.b - z3\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_3, svuint8x4_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x4 (w8 + 7, z0, z4, 3),
+ svdot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svuint8x4_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x4 (w8 + 8, z0, z4, 0),
+ svdot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svuint8x4_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x4 (w0 - 1, z0, z4, 1),
+ svdot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** udot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_2, svuint8x4_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x4 (w8, z4, z15, 2),
+ svdot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** dot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** udot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_3, svuint8x4_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x4 (w8, z28, z16, 3),
+ svdot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** udot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svuint8x4_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x4 (w8, z17, z7, 0),
+ svdot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** udot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svuint8x4_t, svuint8_t,
+ svdot_lane_za32_u8_vg1x4 (w8, z22, z4, 1),
+ svdot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_s16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_s16_vg1x2.c
new file mode 100644
index 0000000..d11466f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_s16_vg1x2.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svint16x2_t, svint16_t,
+ svdot_lane_za64_s16_vg1x2 (0, z0, z4, 0),
+ svdot_lane_za64_vg1x2 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svint16x2_t, svint16_t,
+ svdot_lane_za64_s16_vg1x2 (w0, z0, z7, 1),
+ svdot_lane_za64_vg1x2 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_0:
+** sdot za\.d\[w8, 0, vgx2\], {z28\.h - z29\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_0, svint16x2_t, svint16_t,
+ svdot_lane_za64_s16_vg1x2 (w8, z28, z4, 0),
+ svdot_lane_za64_vg1x2 (w8, z28, z4, 0))
+
+/*
+** dot_lane_w8p7_z0_z4_1:
+** sdot za\.d\[w8, 7, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_1, svint16x2_t, svint16_t,
+ svdot_lane_za64_s16_vg1x2 (w8 + 7, z0, z4, 1),
+ svdot_lane_za64_vg1x2 (w8 + 7, z0, z4, 1))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svint16x2_t, svint16_t,
+ svdot_lane_za64_s16_vg1x2 (w8 + 8, z0, z4, 0),
+ svdot_lane_za64_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svint16x2_t, svint16_t,
+ svdot_lane_za64_s16_vg1x2 (w0 - 1, z0, z4, 1),
+ svdot_lane_za64_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** sdot za\.d\[w8, 0, vgx2\], {z4\.h - z5\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_0, svint16x2_t, svint16_t,
+ svdot_lane_za64_s16_vg1x2 (w8, z4, z15, 0),
+ svdot_lane_za64_vg1x2 (w8, z4, z15, 0))
+
+/*
+** dot_lane_w8_z28_z16_1:
+** mov (z[0-7]).d, z16.d
+** sdot za\.d\[w8, 0, vgx2\], {z28\.h - z29\.h}, \1\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_1, svint16x2_t, svint16_t,
+ svdot_lane_za64_s16_vg1x2 (w8, z28, z16, 1),
+ svdot_lane_za64_vg1x2 (w8, z28, z16, 1))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** sdot za\.d\[w8, 0, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svint16x2_t, svint16_t,
+ svdot_lane_za64_s16_vg1x2 (w8, z17, z7, 0),
+ svdot_lane_za64_vg1x2 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** sdot za\.d\[w8, 0, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svint16x2_t, svint16_t,
+ svdot_lane_za64_s16_vg1x2 (w8, z22, z4, 1),
+ svdot_lane_za64_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_s16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_s16_vg1x4.c
new file mode 100644
index 0000000..ed48dca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_s16_vg1x4.c
@@ -0,0 +1,110 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svint16x4_t, svint16_t,
+ svdot_lane_za64_s16_vg1x4 (0, z0, z4, 0),
+ svdot_lane_za64_vg1x4 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svint16x4_t, svint16_t,
+ svdot_lane_za64_s16_vg1x4 (w0, z0, z7, 1),
+ svdot_lane_za64_vg1x4 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_0:
+** sdot za\.d\[w8, 0, vgx4\], {z28\.h - z31\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_0, svint16x4_t, svint16_t,
+ svdot_lane_za64_s16_vg1x4 (w8, z28, z4, 0),
+ svdot_lane_za64_vg1x4 (w8, z28, z4, 0))
+
+/*
+** dot_lane_w8p7_z0_z4_1:
+** sdot za\.d\[w8, 7, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_1, svint16x4_t, svint16_t,
+ svdot_lane_za64_s16_vg1x4 (w8 + 7, z0, z4, 1),
+ svdot_lane_za64_vg1x4 (w8 + 7, z0, z4, 1))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svint16x4_t, svint16_t,
+ svdot_lane_za64_s16_vg1x4 (w8 + 8, z0, z4, 0),
+ svdot_lane_za64_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svint16x4_t, svint16_t,
+ svdot_lane_za64_s16_vg1x4 (w0 - 1, z0, z4, 1),
+ svdot_lane_za64_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** sdot za\.d\[w8, 0, vgx4\], {z4\.h - z7\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_0, svint16x4_t, svint16_t,
+ svdot_lane_za64_s16_vg1x4 (w8, z4, z15, 0),
+ svdot_lane_za64_vg1x4 (w8, z4, z15, 0))
+
+/*
+** dot_lane_w8_z28_z16_1:
+** mov (z[0-7]).d, z16.d
+** sdot za\.d\[w8, 0, vgx4\], {z28\.h - z31\.h}, \1\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_1, svint16x4_t, svint16_t,
+ svdot_lane_za64_s16_vg1x4 (w8, z28, z16, 1),
+ svdot_lane_za64_vg1x4 (w8, z28, z16, 1))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sdot za\.d\[w8, 0, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svint16x4_t, svint16_t,
+ svdot_lane_za64_s16_vg1x4 (w8, z17, z7, 0),
+ svdot_lane_za64_vg1x4 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sdot za\.d\[w8, 0, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svint16x4_t, svint16_t,
+ svdot_lane_za64_s16_vg1x4 (w8, z22, z4, 1),
+ svdot_lane_za64_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_u16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_u16_vg1x2.c
new file mode 100644
index 0000000..1dd89ea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_u16_vg1x2.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svuint16x2_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x2 (0, z0, z4, 0),
+ svdot_lane_za64_vg1x2 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** udot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svuint16x2_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x2 (w0, z0, z7, 1),
+ svdot_lane_za64_vg1x2 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_0:
+** udot za\.d\[w8, 0, vgx2\], {z28\.h - z29\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_0, svuint16x2_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x2 (w8, z28, z4, 0),
+ svdot_lane_za64_vg1x2 (w8, z28, z4, 0))
+
+/*
+** dot_lane_w8p7_z0_z4_1:
+** udot za\.d\[w8, 7, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_1, svuint16x2_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x2 (w8 + 7, z0, z4, 1),
+ svdot_lane_za64_vg1x2 (w8 + 7, z0, z4, 1))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svuint16x2_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x2 (w8 + 8, z0, z4, 0),
+ svdot_lane_za64_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svuint16x2_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x2 (w0 - 1, z0, z4, 1),
+ svdot_lane_za64_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** udot za\.d\[w8, 0, vgx2\], {z4\.h - z5\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_0, svuint16x2_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x2 (w8, z4, z15, 0),
+ svdot_lane_za64_vg1x2 (w8, z4, z15, 0))
+
+/*
+** dot_lane_w8_z28_z16_1:
+** mov (z[0-7]).d, z16.d
+** udot za\.d\[w8, 0, vgx2\], {z28\.h - z29\.h}, \1\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_1, svuint16x2_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x2 (w8, z28, z16, 1),
+ svdot_lane_za64_vg1x2 (w8, z28, z16, 1))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** udot za\.d\[w8, 0, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svuint16x2_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x2 (w8, z17, z7, 0),
+ svdot_lane_za64_vg1x2 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** udot za\.d\[w8, 0, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svuint16x2_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x2 (w8, z22, z4, 1),
+ svdot_lane_za64_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_u16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_u16_vg1x4.c
new file mode 100644
index 0000000..2ce269e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_lane_za64_u16_vg1x4.c
@@ -0,0 +1,110 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_0_z0_z4_0, svuint16x4_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x4 (0, z0, z4, 0),
+ svdot_lane_za64_vg1x4 (0, z0, z4, 0))
+
+/*
+** dot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** udot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0_z0_z7_1, svuint16x4_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x4 (w0, z0, z7, 1),
+ svdot_lane_za64_vg1x4 (w0, z0, z7, 1))
+
+/*
+** dot_lane_w8_z28_z4_0:
+** udot za\.d\[w8, 0, vgx4\], {z28\.h - z31\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z4_0, svuint16x4_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x4 (w8, z28, z4, 0),
+ svdot_lane_za64_vg1x4 (w8, z28, z4, 0))
+
+/*
+** dot_lane_w8p7_z0_z4_1:
+** udot za\.d\[w8, 7, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p7_z0_z4_1, svuint16x4_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x4 (w8 + 7, z0, z4, 1),
+ svdot_lane_za64_vg1x4 (w8 + 7, z0, z4, 1))
+
+/*
+** dot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8p8_z0_z4_0, svuint16x4_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x4 (w8 + 8, z0, z4, 0),
+ svdot_lane_za64_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** dot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w0m1_z0_z4_1, svuint16x4_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x4 (w0 - 1, z0, z4, 1),
+ svdot_lane_za64_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** dot_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** udot za\.d\[w8, 0, vgx4\], {z4\.h - z7\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (dot_lane_w8_z4_z15_0, svuint16x4_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x4 (w8, z4, z15, 0),
+ svdot_lane_za64_vg1x4 (w8, z4, z15, 0))
+
+/*
+** dot_lane_w8_z28_z16_1:
+** mov (z[0-7]).d, z16.d
+** udot za\.d\[w8, 0, vgx4\], {z28\.h - z31\.h}, \1\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z28_z16_1, svuint16x4_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x4 (w8, z28, z16, 1),
+ svdot_lane_za64_vg1x4 (w8, z28, z16, 1))
+
+/*
+** dot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** udot za\.d\[w8, 0, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z17_z7_0, svuint16x4_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x4 (w8, z17, z7, 0),
+ svdot_lane_za64_vg1x4 (w8, z17, z7, 0))
+
+/*
+** dot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** udot za\.d\[w8, 0, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (dot_lane_w8_z22_z4_1, svuint16x4_t, svuint16_t,
+ svdot_lane_za64_u16_vg1x4 (w8, z22, z4, 1),
+ svdot_lane_za64_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_s32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_s32.c
new file mode 100644
index 0000000..d1f7556
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_s32.c
@@ -0,0 +1,44 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_s32_tied1:
+** sdot z0\.s, z4\.h, z5\.h
+** ret
+*/
+TEST_DUAL_Z (dot_s32_tied1, svint32_t, svint16_t,
+ z0 = svdot_s32_s16 (z0, z4, z5),
+ z0 = svdot (z0, z4, z5))
+
+/*
+** dot_s32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** sdot z0\.s, \1\.h, z1\.h
+** ret
+*/
+TEST_DUAL_Z_REV (dot_s32_tied2, svint32_t, svint16_t,
+ z0_res = svdot_s32_s16 (z4, z0, z1),
+ z0_res = svdot (z4, z0, z1))
+
+/*
+** dot_s32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** sdot z0\.s, z1\.h, \1\.h
+** ret
+*/
+TEST_DUAL_Z_REV (dot_s32_tied3, svint32_t, svint16_t,
+ z0_res = svdot_s32_s16 (z4, z1, z0),
+ z0_res = svdot (z4, z1, z0))
+
+/*
+** dot_s32_untied:
+** movprfx z0, z1
+** sdot z0\.s, z4\.h, z5\.h
+** ret
+*/
+TEST_DUAL_Z (dot_s32_untied, svint32_t, svint16_t,
+ z0 = svdot_s32_s16 (z1, z4, z5),
+ z0 = svdot (z1, z4, z5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_u32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_u32.c
new file mode 100644
index 0000000..ce2052b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_u32.c
@@ -0,0 +1,44 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_u32_tied1:
+** udot z0\.s, z4\.h, z5\.h
+** ret
+*/
+TEST_DUAL_Z (dot_u32_tied1, svuint32_t, svuint16_t,
+ z0 = svdot_u32_u16 (z0, z4, z5),
+ z0 = svdot (z0, z4, z5))
+
+/*
+** dot_u32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** udot z0\.s, \1\.h, z1\.h
+** ret
+*/
+TEST_DUAL_Z_REV (dot_u32_tied2, svuint32_t, svuint16_t,
+ z0_res = svdot_u32_u16 (z4, z0, z1),
+ z0_res = svdot (z4, z0, z1))
+
+/*
+** dot_u32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z4
+** udot z0\.s, z1\.h, \1\.h
+** ret
+*/
+TEST_DUAL_Z_REV (dot_u32_tied3, svuint32_t, svuint16_t,
+ z0_res = svdot_u32_u16 (z4, z1, z0),
+ z0_res = svdot (z4, z1, z0))
+
+/*
+** dot_u32_untied:
+** movprfx z0, z1
+** udot z0\.s, z4\.h, z5\.h
+** ret
+*/
+TEST_DUAL_Z (dot_u32_untied, svuint32_t, svuint16_t,
+ z0 = svdot_u32_u16 (z1, z4, z5),
+ z0 = svdot (z1, z4, z5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_bf16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_bf16_vg1x2.c
new file mode 100644
index 0000000..0665f88
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_bf16_vg1x2.c
@@ -0,0 +1,243 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (0, z0, z0),
+ svdot_za32_vg1x2 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** bfdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w0, z0, z0),
+ svdot_za32_vg1x2 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** bfdot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w8, z0, z4),
+ svdot_za32_vg1x2 (w8, z0, z4))
+
+/*
+** dot_w8_z4_z18:
+** bfdot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z18, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w8, z4, z18),
+ svdot_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z23:
+** ...
+** bfdot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w8, z0, z23),
+ svdot_za32_vg1x2 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** bfdot za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w8, z23, z0),
+ svdot_za32_vg1x2 (w8, z23, z0))
+
+/*
+** dot_w8_z18_z28:
+** bfdot za\.s\[w8, 0, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z28, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w8, z18, z28),
+ svdot_za32_vg1x2 (w8, z18, z28))
+
+/*
+** dot_w8_z28_z4:
+** bfdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z4, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w8, z28, z4),
+ svdot_za32_vg1x2 (w8, z28, z4))
+
+/*
+** dot_w8p1_z4_z0:
+** bfdot za\.s\[w8, 1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w8 + 1, z4, z0),
+ svdot_za32_vg1x2 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** bfdot za\.s\[w8, 2, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w8 + 2, z4, z0),
+ svdot_za32_vg1x2 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** bfdot za\.s\[w11, 4, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w11 + 4, z4, z0),
+ svdot_za32_vg1x2 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** bfdot za\.s\[w8, 7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w8 + 7, z4, z0),
+ svdot_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** bfdot za\.s\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w8 + 8, z4, z4),
+ svdot_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** bfdot za\.s\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svbfloat16x2_t,
+ svdot_za32_bf16_vg1x2 (w8 - 1, z4, z0),
+ svdot_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x2 (0, z1, z0),
+ svdot_za32_vg1x2 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** bfdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x2 (w0, z1, z0),
+ svdot_za32_vg1x2 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** bfdot za\.s\[w8, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x2 (w8, z1, z0),
+ svdot_za32_vg1x2 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** bfdot za\.s\[w8, 1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x2 (w8 + 1, z1, z0),
+ svdot_za32_vg1x2 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p2_z20_z0:
+** bfdot za\.s\[w8, 2, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p2_z20_z0, svbfloat16x2_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x2 (w8 + 2, z20, z0),
+ svdot_za32_vg1x2 (w8 + 2, z20, z0))
+
+/*
+** dot_single_w11p4_z27_z0:
+** bfdot za\.s\[w11, 4, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w11p4_z27_z0, svbfloat16x2_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x2 (w11 + 4, z27, z0),
+ svdot_za32_vg1x2 (w11 + 4, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** bfdot za\.s\[w8, 7, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x2 (w8 + 7, z1, z0),
+ svdot_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** bfdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x2 (w8 + 8, z1, z0),
+ svdot_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x2 (w0 - 1, z1, z0),
+ svdot_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** bfdot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svbfloat16x2_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x2 (w8, z0, z15),
+ svdot_za32_vg1x2 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** bfdot za\.s\[w8, 0, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svbfloat16x2_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x2 (w8, z20, z16),
+ svdot_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_bf16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_bf16_vg1x4.c
new file mode 100644
index 0000000..acdb3cd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_bf16_vg1x4.c
@@ -0,0 +1,254 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (0, z0, z0),
+ svdot_za32_vg1x4 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** bfdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w0, z0, z0),
+ svdot_za32_vg1x4 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** bfdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8, z0, z4),
+ svdot_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z18:
+** ...
+** bfdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8, z0, z18),
+ svdot_za32_vg1x4 (w8, z0, z18))
+
+/*
+** dot_w8_z18_z0:
+** ...
+** bfdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z0, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8, z18, z0),
+ svdot_za32_vg1x4 (w8, z18, z0))
+
+/*
+** dot_w8_z0_z23:
+** ...
+** bfdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8, z0, z23),
+ svdot_za32_vg1x4 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** bfdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8, z23, z0),
+ svdot_za32_vg1x4 (w8, z23, z0))
+
+/*
+** dot_w8_z4_z28:
+** bfdot za\.s\[w8, 0, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z28, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8, z4, z28),
+ svdot_za32_vg1x4 (w8, z4, z28))
+
+/*
+** dot_w8_z28_z0:
+** bfdot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z0, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8, z28, z0),
+ svdot_za32_vg1x4 (w8, z28, z0))
+
+/*
+** dot_w8p1_z4_z0:
+** bfdot za\.s\[w8, 1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8 + 1, z4, z0),
+ svdot_za32_vg1x4 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** bfdot za\.s\[w8, 2, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8 + 2, z4, z0),
+ svdot_za32_vg1x4 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** bfdot za\.s\[w11, 4, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w11 + 4, z4, z0),
+ svdot_za32_vg1x4 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** bfdot za\.s\[w8, 7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8 + 7, z4, z0),
+ svdot_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** bfdot za\.s\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8 + 8, z4, z4),
+ svdot_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** bfdot za\.s\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svbfloat16x4_t,
+ svdot_za32_bf16_vg1x4 (w8 - 1, z4, z0),
+ svdot_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x4 (0, z1, z0),
+ svdot_za32_vg1x4 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** bfdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x4 (w0, z1, z0),
+ svdot_za32_vg1x4 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** bfdot za\.s\[w8, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x4 (w8, z1, z0),
+ svdot_za32_vg1x4 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** bfdot za\.s\[w8, 1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x4 (w8 + 1, z1, z0),
+ svdot_za32_vg1x4 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p4_z20_z0:
+** bfdot za\.s\[w8, 4, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p4_z20_z0, svbfloat16x4_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x4 (w8 + 4, z20, z0),
+ svdot_za32_vg1x4 (w8 + 4, z20, z0))
+
+/*
+** dot_single_w8p6_z27_z0:
+** bfdot za\.s\[w8, 6, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p6_z27_z0, svbfloat16x4_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x4 (w8 + 6, z27, z0),
+ svdot_za32_vg1x4 (w8 + 6, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** bfdot za\.s\[w8, 7, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x4 (w8 + 7, z1, z0),
+ svdot_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** bfdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x4 (w8 + 8, z1, z0),
+ svdot_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x4 (w0 - 1, z1, z0),
+ svdot_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** bfdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svbfloat16x4_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x4 (w8, z0, z15),
+ svdot_za32_vg1x4 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** bfdot za\.s\[w8, 0, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svbfloat16x4_t, svbfloat16_t,
+ svdot_single_za32_bf16_vg1x4 (w8, z20, z16),
+ svdot_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_f16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_f16_vg1x2.c
new file mode 100644
index 0000000..8779959
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_f16_vg1x2.c
@@ -0,0 +1,243 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (0, z0, z0),
+ svdot_za32_vg1x2 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w0, z0, z0),
+ svdot_za32_vg1x2 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** fdot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w8, z0, z4),
+ svdot_za32_vg1x2 (w8, z0, z4))
+
+/*
+** dot_w8_z4_z18:
+** fdot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z18, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w8, z4, z18),
+ svdot_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z23:
+** ...
+** fdot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w8, z0, z23),
+ svdot_za32_vg1x2 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** fdot za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w8, z23, z0),
+ svdot_za32_vg1x2 (w8, z23, z0))
+
+/*
+** dot_w8_z18_z28:
+** fdot za\.s\[w8, 0, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z28, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w8, z18, z28),
+ svdot_za32_vg1x2 (w8, z18, z28))
+
+/*
+** dot_w8_z28_z4:
+** fdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z4, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w8, z28, z4),
+ svdot_za32_vg1x2 (w8, z28, z4))
+
+/*
+** dot_w8p1_z4_z0:
+** fdot za\.s\[w8, 1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w8 + 1, z4, z0),
+ svdot_za32_vg1x2 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** fdot za\.s\[w8, 2, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w8 + 2, z4, z0),
+ svdot_za32_vg1x2 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** fdot za\.s\[w11, 4, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w11 + 4, z4, z0),
+ svdot_za32_vg1x2 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** fdot za\.s\[w8, 7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w8 + 7, z4, z0),
+ svdot_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fdot za\.s\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w8 + 8, z4, z4),
+ svdot_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fdot za\.s\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svfloat16x2_t,
+ svdot_za32_f16_vg1x2 (w8 - 1, z4, z0),
+ svdot_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svfloat16x2_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x2 (0, z1, z0),
+ svdot_za32_vg1x2 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svfloat16x2_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x2 (w0, z1, z0),
+ svdot_za32_vg1x2 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** fdot za\.s\[w8, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svfloat16x2_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x2 (w8, z1, z0),
+ svdot_za32_vg1x2 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** fdot za\.s\[w8, 1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svfloat16x2_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x2 (w8 + 1, z1, z0),
+ svdot_za32_vg1x2 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p2_z20_z0:
+** fdot za\.s\[w8, 2, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p2_z20_z0, svfloat16x2_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x2 (w8 + 2, z20, z0),
+ svdot_za32_vg1x2 (w8 + 2, z20, z0))
+
+/*
+** dot_single_w11p4_z27_z0:
+** fdot za\.s\[w11, 4, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w11p4_z27_z0, svfloat16x2_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x2 (w11 + 4, z27, z0),
+ svdot_za32_vg1x2 (w11 + 4, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** fdot za\.s\[w8, 7, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svfloat16x2_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x2 (w8 + 7, z1, z0),
+ svdot_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svfloat16x2_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x2 (w8 + 8, z1, z0),
+ svdot_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svfloat16x2_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x2 (w0 - 1, z1, z0),
+ svdot_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fdot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svfloat16x2_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x2 (w8, z0, z15),
+ svdot_za32_vg1x2 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fdot za\.s\[w8, 0, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svfloat16x2_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x2 (w8, z20, z16),
+ svdot_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_f16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_f16_vg1x4.c
new file mode 100644
index 0000000..2608230
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_f16_vg1x4.c
@@ -0,0 +1,254 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (0, z0, z0),
+ svdot_za32_vg1x4 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w0, z0, z0),
+ svdot_za32_vg1x4 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** fdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8, z0, z4),
+ svdot_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z18:
+** ...
+** fdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8, z0, z18),
+ svdot_za32_vg1x4 (w8, z0, z18))
+
+/*
+** dot_w8_z18_z0:
+** ...
+** fdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z0, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8, z18, z0),
+ svdot_za32_vg1x4 (w8, z18, z0))
+
+/*
+** dot_w8_z0_z23:
+** ...
+** fdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8, z0, z23),
+ svdot_za32_vg1x4 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** fdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8, z23, z0),
+ svdot_za32_vg1x4 (w8, z23, z0))
+
+/*
+** dot_w8_z4_z28:
+** fdot za\.s\[w8, 0, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z28, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8, z4, z28),
+ svdot_za32_vg1x4 (w8, z4, z28))
+
+/*
+** dot_w8_z28_z0:
+** fdot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z0, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8, z28, z0),
+ svdot_za32_vg1x4 (w8, z28, z0))
+
+/*
+** dot_w8p1_z4_z0:
+** fdot za\.s\[w8, 1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8 + 1, z4, z0),
+ svdot_za32_vg1x4 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** fdot za\.s\[w8, 2, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8 + 2, z4, z0),
+ svdot_za32_vg1x4 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** fdot za\.s\[w11, 4, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w11 + 4, z4, z0),
+ svdot_za32_vg1x4 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** fdot za\.s\[w8, 7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8 + 7, z4, z0),
+ svdot_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fdot za\.s\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8 + 8, z4, z4),
+ svdot_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fdot za\.s\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svfloat16x4_t,
+ svdot_za32_f16_vg1x4 (w8 - 1, z4, z0),
+ svdot_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svfloat16x4_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x4 (0, z1, z0),
+ svdot_za32_vg1x4 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svfloat16x4_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x4 (w0, z1, z0),
+ svdot_za32_vg1x4 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** fdot za\.s\[w8, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svfloat16x4_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x4 (w8, z1, z0),
+ svdot_za32_vg1x4 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** fdot za\.s\[w8, 1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svfloat16x4_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x4 (w8 + 1, z1, z0),
+ svdot_za32_vg1x4 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p4_z20_z0:
+** fdot za\.s\[w8, 4, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p4_z20_z0, svfloat16x4_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x4 (w8 + 4, z20, z0),
+ svdot_za32_vg1x4 (w8 + 4, z20, z0))
+
+/*
+** dot_single_w8p6_z27_z0:
+** fdot za\.s\[w8, 6, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p6_z27_z0, svfloat16x4_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x4 (w8 + 6, z27, z0),
+ svdot_za32_vg1x4 (w8 + 6, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** fdot za\.s\[w8, 7, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svfloat16x4_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x4 (w8 + 7, z1, z0),
+ svdot_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svfloat16x4_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x4 (w8 + 8, z1, z0),
+ svdot_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svfloat16x4_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x4 (w0 - 1, z1, z0),
+ svdot_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svfloat16x4_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x4 (w8, z0, z15),
+ svdot_za32_vg1x4 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fdot za\.s\[w8, 0, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svfloat16x4_t, svfloat16_t,
+ svdot_single_za32_f16_vg1x4 (w8, z20, z16),
+ svdot_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s16_vg1x2.c
new file mode 100644
index 0000000..9b2285b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s16_vg1x2.c
@@ -0,0 +1,243 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svint16x2_t,
+ svdot_za32_s16_vg1x2 (0, z0, z0),
+ svdot_za32_vg1x2 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w0, z0, z0),
+ svdot_za32_vg1x2 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** sdot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w8, z0, z4),
+ svdot_za32_vg1x2 (w8, z0, z4))
+
+/*
+** dot_w8_z4_z18:
+** sdot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z18, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w8, z4, z18),
+ svdot_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z23:
+** ...
+** sdot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w8, z0, z23),
+ svdot_za32_vg1x2 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** sdot za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w8, z23, z0),
+ svdot_za32_vg1x2 (w8, z23, z0))
+
+/*
+** dot_w8_z18_z28:
+** sdot za\.s\[w8, 0, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z28, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w8, z18, z28),
+ svdot_za32_vg1x2 (w8, z18, z28))
+
+/*
+** dot_w8_z28_z4:
+** sdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z4, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w8, z28, z4),
+ svdot_za32_vg1x2 (w8, z28, z4))
+
+/*
+** dot_w8p1_z4_z0:
+** sdot za\.s\[w8, 1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w8 + 1, z4, z0),
+ svdot_za32_vg1x2 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** sdot za\.s\[w8, 2, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w8 + 2, z4, z0),
+ svdot_za32_vg1x2 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** sdot za\.s\[w11, 4, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w11 + 4, z4, z0),
+ svdot_za32_vg1x2 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** sdot za\.s\[w8, 7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w8 + 7, z4, z0),
+ svdot_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w8 + 8, z4, z4),
+ svdot_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sdot za\.s\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svint16x2_t,
+ svdot_za32_s16_vg1x2 (w8 - 1, z4, z0),
+ svdot_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za32_s16_vg1x2 (0, z1, z0),
+ svdot_za32_vg1x2 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za32_s16_vg1x2 (w0, z1, z0),
+ svdot_za32_vg1x2 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** sdot za\.s\[w8, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za32_s16_vg1x2 (w8, z1, z0),
+ svdot_za32_vg1x2 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** sdot za\.s\[w8, 1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za32_s16_vg1x2 (w8 + 1, z1, z0),
+ svdot_za32_vg1x2 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p2_z20_z0:
+** sdot za\.s\[w8, 2, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p2_z20_z0, svint16x2_t, svint16_t,
+ svdot_single_za32_s16_vg1x2 (w8 + 2, z20, z0),
+ svdot_za32_vg1x2 (w8 + 2, z20, z0))
+
+/*
+** dot_single_w11p4_z27_z0:
+** sdot za\.s\[w11, 4, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w11p4_z27_z0, svint16x2_t, svint16_t,
+ svdot_single_za32_s16_vg1x2 (w11 + 4, z27, z0),
+ svdot_za32_vg1x2 (w11 + 4, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** sdot za\.s\[w8, 7, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za32_s16_vg1x2 (w8 + 7, z1, z0),
+ svdot_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za32_s16_vg1x2 (w8 + 8, z1, z0),
+ svdot_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za32_s16_vg1x2 (w0 - 1, z1, z0),
+ svdot_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sdot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svint16x2_t, svint16_t,
+ svdot_single_za32_s16_vg1x2 (w8, z0, z15),
+ svdot_za32_vg1x2 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sdot za\.s\[w8, 0, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svint16x2_t, svint16_t,
+ svdot_single_za32_s16_vg1x2 (w8, z20, z16),
+ svdot_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s16_vg1x4.c
new file mode 100644
index 0000000..78c3f42
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s16_vg1x4.c
@@ -0,0 +1,254 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svint16x4_t,
+ svdot_za32_s16_vg1x4 (0, z0, z0),
+ svdot_za32_vg1x4 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w0, z0, z0),
+ svdot_za32_vg1x4 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** sdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8, z0, z4),
+ svdot_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z18:
+** ...
+** sdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8, z0, z18),
+ svdot_za32_vg1x4 (w8, z0, z18))
+
+/*
+** dot_w8_z18_z0:
+** ...
+** sdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z0, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8, z18, z0),
+ svdot_za32_vg1x4 (w8, z18, z0))
+
+/*
+** dot_w8_z0_z23:
+** ...
+** sdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8, z0, z23),
+ svdot_za32_vg1x4 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** sdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8, z23, z0),
+ svdot_za32_vg1x4 (w8, z23, z0))
+
+/*
+** dot_w8_z4_z28:
+** sdot za\.s\[w8, 0, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z28, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8, z4, z28),
+ svdot_za32_vg1x4 (w8, z4, z28))
+
+/*
+** dot_w8_z28_z0:
+** sdot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z0, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8, z28, z0),
+ svdot_za32_vg1x4 (w8, z28, z0))
+
+/*
+** dot_w8p1_z4_z0:
+** sdot za\.s\[w8, 1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8 + 1, z4, z0),
+ svdot_za32_vg1x4 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** sdot za\.s\[w8, 2, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8 + 2, z4, z0),
+ svdot_za32_vg1x4 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** sdot za\.s\[w11, 4, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w11 + 4, z4, z0),
+ svdot_za32_vg1x4 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** sdot za\.s\[w8, 7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8 + 7, z4, z0),
+ svdot_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8 + 8, z4, z4),
+ svdot_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sdot za\.s\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svint16x4_t,
+ svdot_za32_s16_vg1x4 (w8 - 1, z4, z0),
+ svdot_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za32_s16_vg1x4 (0, z1, z0),
+ svdot_za32_vg1x4 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za32_s16_vg1x4 (w0, z1, z0),
+ svdot_za32_vg1x4 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** sdot za\.s\[w8, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za32_s16_vg1x4 (w8, z1, z0),
+ svdot_za32_vg1x4 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** sdot za\.s\[w8, 1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za32_s16_vg1x4 (w8 + 1, z1, z0),
+ svdot_za32_vg1x4 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p4_z20_z0:
+** sdot za\.s\[w8, 4, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p4_z20_z0, svint16x4_t, svint16_t,
+ svdot_single_za32_s16_vg1x4 (w8 + 4, z20, z0),
+ svdot_za32_vg1x4 (w8 + 4, z20, z0))
+
+/*
+** dot_single_w8p6_z27_z0:
+** sdot za\.s\[w8, 6, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p6_z27_z0, svint16x4_t, svint16_t,
+ svdot_single_za32_s16_vg1x4 (w8 + 6, z27, z0),
+ svdot_za32_vg1x4 (w8 + 6, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** sdot za\.s\[w8, 7, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za32_s16_vg1x4 (w8 + 7, z1, z0),
+ svdot_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za32_s16_vg1x4 (w8 + 8, z1, z0),
+ svdot_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za32_s16_vg1x4 (w0 - 1, z1, z0),
+ svdot_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sdot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svint16x4_t, svint16_t,
+ svdot_single_za32_s16_vg1x4 (w8, z0, z15),
+ svdot_za32_vg1x4 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sdot za\.s\[w8, 0, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svint16x4_t, svint16_t,
+ svdot_single_za32_s16_vg1x4 (w8, z20, z16),
+ svdot_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s8_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s8_vg1x2.c
new file mode 100644
index 0000000..3570efc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s8_vg1x2.c
@@ -0,0 +1,243 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svint8x2_t,
+ svdot_za32_s8_vg1x2 (0, z0, z0),
+ svdot_za32_vg1x2 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w0, z0, z0),
+ svdot_za32_vg1x2 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** sdot za\.s\[w8, 0, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w8, z0, z4),
+ svdot_za32_vg1x2 (w8, z0, z4))
+
+/*
+** dot_w8_z4_z18:
+** sdot za\.s\[w8, 0, vgx2\], {z4\.b - z5\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z18, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w8, z4, z18),
+ svdot_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z23:
+** ...
+** sdot za\.s\[w8, 0, vgx2\], {z0\.b - z1\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w8, z0, z23),
+ svdot_za32_vg1x2 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** sdot za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w8, z23, z0),
+ svdot_za32_vg1x2 (w8, z23, z0))
+
+/*
+** dot_w8_z18_z28:
+** sdot za\.s\[w8, 0, vgx2\], {z18\.b - z19\.b}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z28, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w8, z18, z28),
+ svdot_za32_vg1x2 (w8, z18, z28))
+
+/*
+** dot_w8_z28_z4:
+** sdot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z4, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w8, z28, z4),
+ svdot_za32_vg1x2 (w8, z28, z4))
+
+/*
+** dot_w8p1_z4_z0:
+** sdot za\.s\[w8, 1, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w8 + 1, z4, z0),
+ svdot_za32_vg1x2 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** sdot za\.s\[w8, 2, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w8 + 2, z4, z0),
+ svdot_za32_vg1x2 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** sdot za\.s\[w11, 4, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w11 + 4, z4, z0),
+ svdot_za32_vg1x2 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** sdot za\.s\[w8, 7, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w8 + 7, z4, z0),
+ svdot_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx2\], {z4\.b - z5\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w8 + 8, z4, z4),
+ svdot_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sdot za\.s\[\1, 0, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svint8x2_t,
+ svdot_za32_s8_vg1x2 (w8 - 1, z4, z0),
+ svdot_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svint8x2_t, svint8_t,
+ svdot_single_za32_s8_vg1x2 (0, z1, z0),
+ svdot_za32_vg1x2 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svint8x2_t, svint8_t,
+ svdot_single_za32_s8_vg1x2 (w0, z1, z0),
+ svdot_za32_vg1x2 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** sdot za\.s\[w8, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svint8x2_t, svint8_t,
+ svdot_single_za32_s8_vg1x2 (w8, z1, z0),
+ svdot_za32_vg1x2 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** sdot za\.s\[w8, 1, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svint8x2_t, svint8_t,
+ svdot_single_za32_s8_vg1x2 (w8 + 1, z1, z0),
+ svdot_za32_vg1x2 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p2_z20_z0:
+** sdot za\.s\[w8, 2, vgx2\], {z20\.b - z21\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p2_z20_z0, svint8x2_t, svint8_t,
+ svdot_single_za32_s8_vg1x2 (w8 + 2, z20, z0),
+ svdot_za32_vg1x2 (w8 + 2, z20, z0))
+
+/*
+** dot_single_w11p4_z27_z0:
+** sdot za\.s\[w11, 4, vgx2\], {z27\.b - z28\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w11p4_z27_z0, svint8x2_t, svint8_t,
+ svdot_single_za32_s8_vg1x2 (w11 + 4, z27, z0),
+ svdot_za32_vg1x2 (w11 + 4, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** sdot za\.s\[w8, 7, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svint8x2_t, svint8_t,
+ svdot_single_za32_s8_vg1x2 (w8 + 7, z1, z0),
+ svdot_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svint8x2_t, svint8_t,
+ svdot_single_za32_s8_vg1x2 (w8 + 8, z1, z0),
+ svdot_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svint8x2_t, svint8_t,
+ svdot_single_za32_s8_vg1x2 (w0 - 1, z1, z0),
+ svdot_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sdot za\.s\[w8, 0, vgx2\], {z0\.b - z1\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svint8x2_t, svint8_t,
+ svdot_single_za32_s8_vg1x2 (w8, z0, z15),
+ svdot_za32_vg1x2 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sdot za\.s\[w8, 0, vgx2\], {z20\.b - z21\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svint8x2_t, svint8_t,
+ svdot_single_za32_s8_vg1x2 (w8, z20, z16),
+ svdot_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s8_vg1x4.c
new file mode 100644
index 0000000..d738910
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_s8_vg1x4.c
@@ -0,0 +1,254 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svint8x4_t,
+ svdot_za32_s8_vg1x4 (0, z0, z0),
+ svdot_za32_vg1x4 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w0, z0, z0),
+ svdot_za32_vg1x4 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** sdot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8, z0, z4),
+ svdot_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z18:
+** ...
+** sdot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8, z0, z18),
+ svdot_za32_vg1x4 (w8, z0, z18))
+
+/*
+** dot_w8_z18_z0:
+** ...
+** sdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z0, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8, z18, z0),
+ svdot_za32_vg1x4 (w8, z18, z0))
+
+/*
+** dot_w8_z0_z23:
+** ...
+** sdot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8, z0, z23),
+ svdot_za32_vg1x4 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** sdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8, z23, z0),
+ svdot_za32_vg1x4 (w8, z23, z0))
+
+/*
+** dot_w8_z4_z28:
+** sdot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z28, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8, z4, z28),
+ svdot_za32_vg1x4 (w8, z4, z28))
+
+/*
+** dot_w8_z28_z0:
+** sdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z0, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8, z28, z0),
+ svdot_za32_vg1x4 (w8, z28, z0))
+
+/*
+** dot_w8p1_z4_z0:
+** sdot za\.s\[w8, 1, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8 + 1, z4, z0),
+ svdot_za32_vg1x4 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** sdot za\.s\[w8, 2, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8 + 2, z4, z0),
+ svdot_za32_vg1x4 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** sdot za\.s\[w11, 4, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w11 + 4, z4, z0),
+ svdot_za32_vg1x4 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** sdot za\.s\[w8, 7, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8 + 7, z4, z0),
+ svdot_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx4\], {z4\.b - z7\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8 + 8, z4, z4),
+ svdot_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sdot za\.s\[\1, 0, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svint8x4_t,
+ svdot_za32_s8_vg1x4 (w8 - 1, z4, z0),
+ svdot_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svint8x4_t, svint8_t,
+ svdot_single_za32_s8_vg1x4 (0, z1, z0),
+ svdot_za32_vg1x4 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svint8x4_t, svint8_t,
+ svdot_single_za32_s8_vg1x4 (w0, z1, z0),
+ svdot_za32_vg1x4 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** sdot za\.s\[w8, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svint8x4_t, svint8_t,
+ svdot_single_za32_s8_vg1x4 (w8, z1, z0),
+ svdot_za32_vg1x4 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** sdot za\.s\[w8, 1, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svint8x4_t, svint8_t,
+ svdot_single_za32_s8_vg1x4 (w8 + 1, z1, z0),
+ svdot_za32_vg1x4 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p4_z20_z0:
+** sdot za\.s\[w8, 4, vgx4\], {z20\.b - z23\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p4_z20_z0, svint8x4_t, svint8_t,
+ svdot_single_za32_s8_vg1x4 (w8 + 4, z20, z0),
+ svdot_za32_vg1x4 (w8 + 4, z20, z0))
+
+/*
+** dot_single_w8p6_z27_z0:
+** sdot za\.s\[w8, 6, vgx4\], {z27\.b - z30\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p6_z27_z0, svint8x4_t, svint8_t,
+ svdot_single_za32_s8_vg1x4 (w8 + 6, z27, z0),
+ svdot_za32_vg1x4 (w8 + 6, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** sdot za\.s\[w8, 7, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svint8x4_t, svint8_t,
+ svdot_single_za32_s8_vg1x4 (w8 + 7, z1, z0),
+ svdot_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svint8x4_t, svint8_t,
+ svdot_single_za32_s8_vg1x4 (w8 + 8, z1, z0),
+ svdot_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svint8x4_t, svint8_t,
+ svdot_single_za32_s8_vg1x4 (w0 - 1, z1, z0),
+ svdot_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sdot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svint8x4_t, svint8_t,
+ svdot_single_za32_s8_vg1x4 (w8, z0, z15),
+ svdot_za32_vg1x4 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sdot za\.s\[w8, 0, vgx4\], {z20\.b - z23\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svint8x4_t, svint8_t,
+ svdot_single_za32_s8_vg1x4 (w8, z20, z16),
+ svdot_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u16_vg1x2.c
new file mode 100644
index 0000000..c78fe5b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u16_vg1x2.c
@@ -0,0 +1,243 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (0, z0, z0),
+ svdot_za32_vg1x2 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w0, z0, z0),
+ svdot_za32_vg1x2 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** udot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w8, z0, z4),
+ svdot_za32_vg1x2 (w8, z0, z4))
+
+/*
+** dot_w8_z4_z18:
+** udot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z18, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w8, z4, z18),
+ svdot_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z23:
+** ...
+** udot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w8, z0, z23),
+ svdot_za32_vg1x2 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** udot za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w8, z23, z0),
+ svdot_za32_vg1x2 (w8, z23, z0))
+
+/*
+** dot_w8_z18_z28:
+** udot za\.s\[w8, 0, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z28, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w8, z18, z28),
+ svdot_za32_vg1x2 (w8, z18, z28))
+
+/*
+** dot_w8_z28_z4:
+** udot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z4, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w8, z28, z4),
+ svdot_za32_vg1x2 (w8, z28, z4))
+
+/*
+** dot_w8p1_z4_z0:
+** udot za\.s\[w8, 1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w8 + 1, z4, z0),
+ svdot_za32_vg1x2 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** udot za\.s\[w8, 2, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w8 + 2, z4, z0),
+ svdot_za32_vg1x2 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** udot za\.s\[w11, 4, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w11 + 4, z4, z0),
+ svdot_za32_vg1x2 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** udot za\.s\[w8, 7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w8 + 7, z4, z0),
+ svdot_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w8 + 8, z4, z4),
+ svdot_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** udot za\.s\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svuint16x2_t,
+ svdot_za32_u16_vg1x2 (w8 - 1, z4, z0),
+ svdot_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za32_u16_vg1x2 (0, z1, z0),
+ svdot_za32_vg1x2 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za32_u16_vg1x2 (w0, z1, z0),
+ svdot_za32_vg1x2 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** udot za\.s\[w8, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za32_u16_vg1x2 (w8, z1, z0),
+ svdot_za32_vg1x2 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** udot za\.s\[w8, 1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za32_u16_vg1x2 (w8 + 1, z1, z0),
+ svdot_za32_vg1x2 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p2_z20_z0:
+** udot za\.s\[w8, 2, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p2_z20_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za32_u16_vg1x2 (w8 + 2, z20, z0),
+ svdot_za32_vg1x2 (w8 + 2, z20, z0))
+
+/*
+** dot_single_w11p4_z27_z0:
+** udot za\.s\[w11, 4, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w11p4_z27_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za32_u16_vg1x2 (w11 + 4, z27, z0),
+ svdot_za32_vg1x2 (w11 + 4, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** udot za\.s\[w8, 7, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za32_u16_vg1x2 (w8 + 7, z1, z0),
+ svdot_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za32_u16_vg1x2 (w8 + 8, z1, z0),
+ svdot_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.s\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za32_u16_vg1x2 (w0 - 1, z1, z0),
+ svdot_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** udot za\.s\[w8, 0, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svuint16x2_t, svuint16_t,
+ svdot_single_za32_u16_vg1x2 (w8, z0, z15),
+ svdot_za32_vg1x2 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** udot za\.s\[w8, 0, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svuint16x2_t, svuint16_t,
+ svdot_single_za32_u16_vg1x2 (w8, z20, z16),
+ svdot_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u16_vg1x4.c
new file mode 100644
index 0000000..276a7d3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u16_vg1x4.c
@@ -0,0 +1,254 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (0, z0, z0),
+ svdot_za32_vg1x4 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w0, z0, z0),
+ svdot_za32_vg1x4 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** udot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8, z0, z4),
+ svdot_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z18:
+** ...
+** udot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8, z0, z18),
+ svdot_za32_vg1x4 (w8, z0, z18))
+
+/*
+** dot_w8_z18_z0:
+** ...
+** udot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z0, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8, z18, z0),
+ svdot_za32_vg1x4 (w8, z18, z0))
+
+/*
+** dot_w8_z0_z23:
+** ...
+** udot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8, z0, z23),
+ svdot_za32_vg1x4 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** udot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8, z23, z0),
+ svdot_za32_vg1x4 (w8, z23, z0))
+
+/*
+** dot_w8_z4_z28:
+** udot za\.s\[w8, 0, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z28, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8, z4, z28),
+ svdot_za32_vg1x4 (w8, z4, z28))
+
+/*
+** dot_w8_z28_z0:
+** udot za\.s\[w8, 0, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z0, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8, z28, z0),
+ svdot_za32_vg1x4 (w8, z28, z0))
+
+/*
+** dot_w8p1_z4_z0:
+** udot za\.s\[w8, 1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8 + 1, z4, z0),
+ svdot_za32_vg1x4 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** udot za\.s\[w8, 2, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8 + 2, z4, z0),
+ svdot_za32_vg1x4 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** udot za\.s\[w11, 4, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w11 + 4, z4, z0),
+ svdot_za32_vg1x4 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** udot za\.s\[w8, 7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8 + 7, z4, z0),
+ svdot_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8 + 8, z4, z4),
+ svdot_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** udot za\.s\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svuint16x4_t,
+ svdot_za32_u16_vg1x4 (w8 - 1, z4, z0),
+ svdot_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za32_u16_vg1x4 (0, z1, z0),
+ svdot_za32_vg1x4 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za32_u16_vg1x4 (w0, z1, z0),
+ svdot_za32_vg1x4 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** udot za\.s\[w8, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za32_u16_vg1x4 (w8, z1, z0),
+ svdot_za32_vg1x4 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** udot za\.s\[w8, 1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za32_u16_vg1x4 (w8 + 1, z1, z0),
+ svdot_za32_vg1x4 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p4_z20_z0:
+** udot za\.s\[w8, 4, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p4_z20_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za32_u16_vg1x4 (w8 + 4, z20, z0),
+ svdot_za32_vg1x4 (w8 + 4, z20, z0))
+
+/*
+** dot_single_w8p6_z27_z0:
+** udot za\.s\[w8, 6, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p6_z27_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za32_u16_vg1x4 (w8 + 6, z27, z0),
+ svdot_za32_vg1x4 (w8 + 6, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** udot za\.s\[w8, 7, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za32_u16_vg1x4 (w8 + 7, z1, z0),
+ svdot_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za32_u16_vg1x4 (w8 + 8, z1, z0),
+ svdot_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.s\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za32_u16_vg1x4 (w0 - 1, z1, z0),
+ svdot_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** udot za\.s\[w8, 0, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svuint16x4_t, svuint16_t,
+ svdot_single_za32_u16_vg1x4 (w8, z0, z15),
+ svdot_za32_vg1x4 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** udot za\.s\[w8, 0, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svuint16x4_t, svuint16_t,
+ svdot_single_za32_u16_vg1x4 (w8, z20, z16),
+ svdot_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u8_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u8_vg1x2.c
new file mode 100644
index 0000000..7f01cc1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u8_vg1x2.c
@@ -0,0 +1,243 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (0, z0, z0),
+ svdot_za32_vg1x2 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w0, z0, z0),
+ svdot_za32_vg1x2 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** udot za\.s\[w8, 0, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w8, z0, z4),
+ svdot_za32_vg1x2 (w8, z0, z4))
+
+/*
+** dot_w8_z4_z18:
+** udot za\.s\[w8, 0, vgx2\], {z4\.b - z5\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z18, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w8, z4, z18),
+ svdot_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z23:
+** ...
+** udot za\.s\[w8, 0, vgx2\], {z0\.b - z1\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w8, z0, z23),
+ svdot_za32_vg1x2 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** udot za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w8, z23, z0),
+ svdot_za32_vg1x2 (w8, z23, z0))
+
+/*
+** dot_w8_z18_z28:
+** udot za\.s\[w8, 0, vgx2\], {z18\.b - z19\.b}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z28, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w8, z18, z28),
+ svdot_za32_vg1x2 (w8, z18, z28))
+
+/*
+** dot_w8_z28_z4:
+** udot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z4, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w8, z28, z4),
+ svdot_za32_vg1x2 (w8, z28, z4))
+
+/*
+** dot_w8p1_z4_z0:
+** udot za\.s\[w8, 1, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w8 + 1, z4, z0),
+ svdot_za32_vg1x2 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** udot za\.s\[w8, 2, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w8 + 2, z4, z0),
+ svdot_za32_vg1x2 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** udot za\.s\[w11, 4, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w11 + 4, z4, z0),
+ svdot_za32_vg1x2 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** udot za\.s\[w8, 7, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w8 + 7, z4, z0),
+ svdot_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx2\], {z4\.b - z5\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w8 + 8, z4, z4),
+ svdot_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** udot za\.s\[\1, 0, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svuint8x2_t,
+ svdot_za32_u8_vg1x2 (w8 - 1, z4, z0),
+ svdot_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svuint8x2_t, svuint8_t,
+ svdot_single_za32_u8_vg1x2 (0, z1, z0),
+ svdot_za32_vg1x2 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svuint8x2_t, svuint8_t,
+ svdot_single_za32_u8_vg1x2 (w0, z1, z0),
+ svdot_za32_vg1x2 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** udot za\.s\[w8, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svuint8x2_t, svuint8_t,
+ svdot_single_za32_u8_vg1x2 (w8, z1, z0),
+ svdot_za32_vg1x2 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** udot za\.s\[w8, 1, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svuint8x2_t, svuint8_t,
+ svdot_single_za32_u8_vg1x2 (w8 + 1, z1, z0),
+ svdot_za32_vg1x2 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p2_z20_z0:
+** udot za\.s\[w8, 2, vgx2\], {z20\.b - z21\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p2_z20_z0, svuint8x2_t, svuint8_t,
+ svdot_single_za32_u8_vg1x2 (w8 + 2, z20, z0),
+ svdot_za32_vg1x2 (w8 + 2, z20, z0))
+
+/*
+** dot_single_w11p4_z27_z0:
+** udot za\.s\[w11, 4, vgx2\], {z27\.b - z28\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w11p4_z27_z0, svuint8x2_t, svuint8_t,
+ svdot_single_za32_u8_vg1x2 (w11 + 4, z27, z0),
+ svdot_za32_vg1x2 (w11 + 4, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** udot za\.s\[w8, 7, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svuint8x2_t, svuint8_t,
+ svdot_single_za32_u8_vg1x2 (w8 + 7, z1, z0),
+ svdot_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svuint8x2_t, svuint8_t,
+ svdot_single_za32_u8_vg1x2 (w8 + 8, z1, z0),
+ svdot_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svuint8x2_t, svuint8_t,
+ svdot_single_za32_u8_vg1x2 (w0 - 1, z1, z0),
+ svdot_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** udot za\.s\[w8, 0, vgx2\], {z0\.b - z1\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svuint8x2_t, svuint8_t,
+ svdot_single_za32_u8_vg1x2 (w8, z0, z15),
+ svdot_za32_vg1x2 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** udot za\.s\[w8, 0, vgx2\], {z20\.b - z21\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svuint8x2_t, svuint8_t,
+ svdot_single_za32_u8_vg1x2 (w8, z20, z16),
+ svdot_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u8_vg1x4.c
new file mode 100644
index 0000000..6e56db4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za32_u8_vg1x4.c
@@ -0,0 +1,254 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (0, z0, z0),
+ svdot_za32_vg1x4 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w0, z0, z0),
+ svdot_za32_vg1x4 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** udot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8, z0, z4),
+ svdot_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z18:
+** ...
+** udot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8, z0, z18),
+ svdot_za32_vg1x4 (w8, z0, z18))
+
+/*
+** dot_w8_z18_z0:
+** ...
+** udot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z0, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8, z18, z0),
+ svdot_za32_vg1x4 (w8, z18, z0))
+
+/*
+** dot_w8_z0_z23:
+** ...
+** udot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8, z0, z23),
+ svdot_za32_vg1x4 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** udot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8, z23, z0),
+ svdot_za32_vg1x4 (w8, z23, z0))
+
+/*
+** dot_w8_z4_z28:
+** udot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z28, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8, z4, z28),
+ svdot_za32_vg1x4 (w8, z4, z28))
+
+/*
+** dot_w8_z28_z0:
+** udot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z0, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8, z28, z0),
+ svdot_za32_vg1x4 (w8, z28, z0))
+
+/*
+** dot_w8p1_z4_z0:
+** udot za\.s\[w8, 1, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8 + 1, z4, z0),
+ svdot_za32_vg1x4 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** udot za\.s\[w8, 2, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8 + 2, z4, z0),
+ svdot_za32_vg1x4 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** udot za\.s\[w11, 4, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w11 + 4, z4, z0),
+ svdot_za32_vg1x4 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** udot za\.s\[w8, 7, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8 + 7, z4, z0),
+ svdot_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx4\], {z4\.b - z7\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8 + 8, z4, z4),
+ svdot_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** udot za\.s\[\1, 0, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svuint8x4_t,
+ svdot_za32_u8_vg1x4 (w8 - 1, z4, z0),
+ svdot_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svuint8x4_t, svuint8_t,
+ svdot_single_za32_u8_vg1x4 (0, z1, z0),
+ svdot_za32_vg1x4 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svuint8x4_t, svuint8_t,
+ svdot_single_za32_u8_vg1x4 (w0, z1, z0),
+ svdot_za32_vg1x4 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** udot za\.s\[w8, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svuint8x4_t, svuint8_t,
+ svdot_single_za32_u8_vg1x4 (w8, z1, z0),
+ svdot_za32_vg1x4 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** udot za\.s\[w8, 1, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svuint8x4_t, svuint8_t,
+ svdot_single_za32_u8_vg1x4 (w8 + 1, z1, z0),
+ svdot_za32_vg1x4 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p4_z20_z0:
+** udot za\.s\[w8, 4, vgx4\], {z20\.b - z23\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p4_z20_z0, svuint8x4_t, svuint8_t,
+ svdot_single_za32_u8_vg1x4 (w8 + 4, z20, z0),
+ svdot_za32_vg1x4 (w8 + 4, z20, z0))
+
+/*
+** dot_single_w8p6_z27_z0:
+** udot za\.s\[w8, 6, vgx4\], {z27\.b - z30\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p6_z27_z0, svuint8x4_t, svuint8_t,
+ svdot_single_za32_u8_vg1x4 (w8 + 6, z27, z0),
+ svdot_za32_vg1x4 (w8 + 6, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** udot za\.s\[w8, 7, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svuint8x4_t, svuint8_t,
+ svdot_single_za32_u8_vg1x4 (w8 + 7, z1, z0),
+ svdot_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svuint8x4_t, svuint8_t,
+ svdot_single_za32_u8_vg1x4 (w8 + 8, z1, z0),
+ svdot_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svuint8x4_t, svuint8_t,
+ svdot_single_za32_u8_vg1x4 (w0 - 1, z1, z0),
+ svdot_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** udot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svuint8x4_t, svuint8_t,
+ svdot_single_za32_u8_vg1x4 (w8, z0, z15),
+ svdot_za32_vg1x4 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** udot za\.s\[w8, 0, vgx4\], {z20\.b - z23\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svuint8x4_t, svuint8_t,
+ svdot_single_za32_u8_vg1x4 (w8, z20, z16),
+ svdot_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_s16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_s16_vg1x2.c
new file mode 100644
index 0000000..2bbf632
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_s16_vg1x2.c
@@ -0,0 +1,245 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svint16x2_t,
+ svdot_za64_s16_vg1x2 (0, z0, z0),
+ svdot_za64_vg1x2 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w0, z0, z0),
+ svdot_za64_vg1x2 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** sdot za\.d\[w8, 0, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w8, z0, z4),
+ svdot_za64_vg1x2 (w8, z0, z4))
+
+/*
+** dot_w8_z4_z18:
+** sdot za\.d\[w8, 0, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z18, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w8, z4, z18),
+ svdot_za64_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z23:
+** ...
+** sdot za\.d\[w8, 0, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w8, z0, z23),
+ svdot_za64_vg1x2 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** sdot za\.d\[w8, 0, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w8, z23, z0),
+ svdot_za64_vg1x2 (w8, z23, z0))
+
+/*
+** dot_w8_z18_z28:
+** sdot za\.d\[w8, 0, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z28, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w8, z18, z28),
+ svdot_za64_vg1x2 (w8, z18, z28))
+
+/*
+** dot_w8_z28_z4:
+** sdot za\.d\[w8, 0, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z4, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w8, z28, z4),
+ svdot_za64_vg1x2 (w8, z28, z4))
+
+/*
+** dot_w8p1_z4_z0:
+** sdot za\.d\[w8, 1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w8 + 1, z4, z0),
+ svdot_za64_vg1x2 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** sdot za\.d\[w8, 2, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w8 + 2, z4, z0),
+ svdot_za64_vg1x2 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** sdot za\.d\[w11, 4, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w11 + 4, z4, z0),
+ svdot_za64_vg1x2 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** sdot za\.d\[w8, 7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w8 + 7, z4, z0),
+ svdot_za64_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.d\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w8 + 8, z4, z4),
+ svdot_za64_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sdot za\.d\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svint16x2_t,
+ svdot_za64_s16_vg1x2 (w8 - 1, z4, z0),
+ svdot_za64_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.d\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za64_s16_vg1x2 (0, z1, z0),
+ svdot_za64_vg1x2 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.d\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za64_s16_vg1x2 (w0, z1, z0),
+ svdot_za64_vg1x2 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** sdot za\.d\[w8, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za64_s16_vg1x2 (w8, z1, z0),
+ svdot_za64_vg1x2 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** sdot za\.d\[w8, 1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za64_s16_vg1x2 (w8 + 1, z1, z0),
+ svdot_za64_vg1x2 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p2_z20_z0:
+** sdot za\.d\[w8, 2, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p2_z20_z0, svint16x2_t, svint16_t,
+ svdot_single_za64_s16_vg1x2 (w8 + 2, z20, z0),
+ svdot_za64_vg1x2 (w8 + 2, z20, z0))
+
+/*
+** dot_single_w11p4_z27_z0:
+** sdot za\.d\[w11, 4, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w11p4_z27_z0, svint16x2_t, svint16_t,
+ svdot_single_za64_s16_vg1x2 (w11 + 4, z27, z0),
+ svdot_za64_vg1x2 (w11 + 4, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** sdot za\.d\[w8, 7, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za64_s16_vg1x2 (w8 + 7, z1, z0),
+ svdot_za64_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.d\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za64_s16_vg1x2 (w8 + 8, z1, z0),
+ svdot_za64_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.d\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svint16x2_t, svint16_t,
+ svdot_single_za64_s16_vg1x2 (w0 - 1, z1, z0),
+ svdot_za64_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sdot za\.d\[w8, 0, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svint16x2_t, svint16_t,
+ svdot_single_za64_s16_vg1x2 (w8, z0, z15),
+ svdot_za64_vg1x2 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sdot za\.d\[w8, 0, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svint16x2_t, svint16_t,
+ svdot_single_za64_s16_vg1x2 (w8, z20, z16),
+ svdot_za64_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_s16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_s16_vg1x4.c
new file mode 100644
index 0000000..5c962d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_s16_vg1x4.c
@@ -0,0 +1,256 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svint16x4_t,
+ svdot_za64_s16_vg1x4 (0, z0, z0),
+ svdot_za64_vg1x4 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w0, z0, z0),
+ svdot_za64_vg1x4 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** sdot za\.d\[w8, 0, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8, z0, z4),
+ svdot_za64_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z18:
+** ...
+** sdot za\.d\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8, z0, z18),
+ svdot_za64_vg1x4 (w8, z0, z18))
+
+/*
+** dot_w8_z18_z0:
+** ...
+** sdot za\.d\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z0, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8, z18, z0),
+ svdot_za64_vg1x4 (w8, z18, z0))
+
+/*
+** dot_w8_z0_z23:
+** ...
+** sdot za\.d\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8, z0, z23),
+ svdot_za64_vg1x4 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** sdot za\.d\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8, z23, z0),
+ svdot_za64_vg1x4 (w8, z23, z0))
+
+/*
+** dot_w8_z4_z28:
+** sdot za\.d\[w8, 0, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z28, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8, z4, z28),
+ svdot_za64_vg1x4 (w8, z4, z28))
+
+/*
+** dot_w8_z28_z0:
+** sdot za\.d\[w8, 0, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z0, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8, z28, z0),
+ svdot_za64_vg1x4 (w8, z28, z0))
+
+/*
+** dot_w8p1_z4_z0:
+** sdot za\.d\[w8, 1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8 + 1, z4, z0),
+ svdot_za64_vg1x4 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** sdot za\.d\[w8, 2, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8 + 2, z4, z0),
+ svdot_za64_vg1x4 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** sdot za\.d\[w11, 4, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w11 + 4, z4, z0),
+ svdot_za64_vg1x4 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** sdot za\.d\[w8, 7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8 + 7, z4, z0),
+ svdot_za64_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.d\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8 + 8, z4, z4),
+ svdot_za64_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sdot za\.d\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svint16x4_t,
+ svdot_za64_s16_vg1x4 (w8 - 1, z4, z0),
+ svdot_za64_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sdot za\.d\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za64_s16_vg1x4 (0, z1, z0),
+ svdot_za64_vg1x4 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sdot za\.d\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za64_s16_vg1x4 (w0, z1, z0),
+ svdot_za64_vg1x4 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** sdot za\.d\[w8, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za64_s16_vg1x4 (w8, z1, z0),
+ svdot_za64_vg1x4 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** sdot za\.d\[w8, 1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za64_s16_vg1x4 (w8 + 1, z1, z0),
+ svdot_za64_vg1x4 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p4_z20_z0:
+** sdot za\.d\[w8, 4, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p4_z20_z0, svint16x4_t, svint16_t,
+ svdot_single_za64_s16_vg1x4 (w8 + 4, z20, z0),
+ svdot_za64_vg1x4 (w8 + 4, z20, z0))
+
+/*
+** dot_single_w8p6_z27_z0:
+** sdot za\.d\[w8, 6, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p6_z27_z0, svint16x4_t, svint16_t,
+ svdot_single_za64_s16_vg1x4 (w8 + 6, z27, z0),
+ svdot_za64_vg1x4 (w8 + 6, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** sdot za\.d\[w8, 7, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za64_s16_vg1x4 (w8 + 7, z1, z0),
+ svdot_za64_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sdot za\.d\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za64_s16_vg1x4 (w8 + 8, z1, z0),
+ svdot_za64_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sdot za\.d\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svint16x4_t, svint16_t,
+ svdot_single_za64_s16_vg1x4 (w0 - 1, z1, z0),
+ svdot_za64_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sdot za\.d\[w8, 0, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svint16x4_t, svint16_t,
+ svdot_single_za64_s16_vg1x4 (w8, z0, z15),
+ svdot_za64_vg1x4 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sdot za\.d\[w8, 0, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svint16x4_t, svint16_t,
+ svdot_single_za64_s16_vg1x4 (w8, z20, z16),
+ svdot_za64_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_u16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_u16_vg1x2.c
new file mode 100644
index 0000000..503104c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_u16_vg1x2.c
@@ -0,0 +1,245 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (0, z0, z0),
+ svdot_za64_vg1x2 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.d\[\1, 0, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w0, z0, z0),
+ svdot_za64_vg1x2 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** udot za\.d\[w8, 0, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w8, z0, z4),
+ svdot_za64_vg1x2 (w8, z0, z4))
+
+/*
+** dot_w8_z4_z18:
+** udot za\.d\[w8, 0, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z18, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w8, z4, z18),
+ svdot_za64_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z23:
+** ...
+** udot za\.d\[w8, 0, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w8, z0, z23),
+ svdot_za64_vg1x2 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** udot za\.d\[w8, 0, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w8, z23, z0),
+ svdot_za64_vg1x2 (w8, z23, z0))
+
+/*
+** dot_w8_z18_z28:
+** udot za\.d\[w8, 0, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z28, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w8, z18, z28),
+ svdot_za64_vg1x2 (w8, z18, z28))
+
+/*
+** dot_w8_z28_z4:
+** udot za\.d\[w8, 0, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z4, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w8, z28, z4),
+ svdot_za64_vg1x2 (w8, z28, z4))
+
+/*
+** dot_w8p1_z4_z0:
+** udot za\.d\[w8, 1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w8 + 1, z4, z0),
+ svdot_za64_vg1x2 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** udot za\.d\[w8, 2, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w8 + 2, z4, z0),
+ svdot_za64_vg1x2 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** udot za\.d\[w11, 4, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w11 + 4, z4, z0),
+ svdot_za64_vg1x2 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** udot za\.d\[w8, 7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w8 + 7, z4, z0),
+ svdot_za64_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.d\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w8 + 8, z4, z4),
+ svdot_za64_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** udot za\.d\[\1, 0, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svuint16x2_t,
+ svdot_za64_u16_vg1x2 (w8 - 1, z4, z0),
+ svdot_za64_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.d\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za64_u16_vg1x2 (0, z1, z0),
+ svdot_za64_vg1x2 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.d\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za64_u16_vg1x2 (w0, z1, z0),
+ svdot_za64_vg1x2 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** udot za\.d\[w8, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za64_u16_vg1x2 (w8, z1, z0),
+ svdot_za64_vg1x2 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** udot za\.d\[w8, 1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za64_u16_vg1x2 (w8 + 1, z1, z0),
+ svdot_za64_vg1x2 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p2_z20_z0:
+** udot za\.d\[w8, 2, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p2_z20_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za64_u16_vg1x2 (w8 + 2, z20, z0),
+ svdot_za64_vg1x2 (w8 + 2, z20, z0))
+
+/*
+** dot_single_w11p4_z27_z0:
+** udot za\.d\[w11, 4, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w11p4_z27_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za64_u16_vg1x2 (w11 + 4, z27, z0),
+ svdot_za64_vg1x2 (w11 + 4, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** udot za\.d\[w8, 7, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za64_u16_vg1x2 (w8 + 7, z1, z0),
+ svdot_za64_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.d\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za64_u16_vg1x2 (w8 + 8, z1, z0),
+ svdot_za64_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.d\[\1, 0, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svuint16x2_t, svuint16_t,
+ svdot_single_za64_u16_vg1x2 (w0 - 1, z1, z0),
+ svdot_za64_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** udot za\.d\[w8, 0, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svuint16x2_t, svuint16_t,
+ svdot_single_za64_u16_vg1x2 (w8, z0, z15),
+ svdot_za64_vg1x2 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** udot za\.d\[w8, 0, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svuint16x2_t, svuint16_t,
+ svdot_single_za64_u16_vg1x2 (w8, z20, z16),
+ svdot_za64_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_u16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_u16_vg1x4.c
new file mode 100644
index 0000000..bfde05c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/dot_za64_u16_vg1x4.c
@@ -0,0 +1,256 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z0, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (0, z0, z0),
+ svdot_za64_vg1x4 (0, z0, z0))
+
+/*
+** dot_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z0, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w0, z0, z0),
+ svdot_za64_vg1x4 (w0, z0, z0))
+
+/*
+** dot_w8_z0_z4:
+** udot za\.d\[w8, 0, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z4, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8, z0, z4),
+ svdot_za64_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z18:
+** ...
+** udot za\.d\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8, z0, z18),
+ svdot_za64_vg1x4 (w8, z0, z18))
+
+/*
+** dot_w8_z18_z0:
+** ...
+** udot za\.d\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z0, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8, z18, z0),
+ svdot_za64_vg1x4 (w8, z18, z0))
+
+/*
+** dot_w8_z0_z23:
+** ...
+** udot za\.d\[w8, 0, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8, z0, z23),
+ svdot_za64_vg1x4 (w8, z0, z23))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** udot za\.d\[w8, 0, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8, z23, z0),
+ svdot_za64_vg1x4 (w8, z23, z0))
+
+/*
+** dot_w8_z4_z28:
+** udot za\.d\[w8, 0, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z28, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8, z4, z28),
+ svdot_za64_vg1x4 (w8, z4, z28))
+
+/*
+** dot_w8_z28_z0:
+** udot za\.d\[w8, 0, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z0, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8, z28, z0),
+ svdot_za64_vg1x4 (w8, z28, z0))
+
+/*
+** dot_w8p1_z4_z0:
+** udot za\.d\[w8, 1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8 + 1, z4, z0),
+ svdot_za64_vg1x4 (w8 + 1, z4, z0))
+
+/*
+** dot_w8p2_z4_z0:
+** udot za\.d\[w8, 2, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8 + 2, z4, z0),
+ svdot_za64_vg1x4 (w8 + 2, z4, z0))
+
+/*
+** dot_w11p4_z4_z0:
+** udot za\.d\[w11, 4, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w11 + 4, z4, z0),
+ svdot_za64_vg1x4 (w11 + 4, z4, z0))
+
+/*
+** dot_w8p7_z4_z0:
+** udot za\.d\[w8, 7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8 + 7, z4, z0),
+ svdot_za64_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** dot_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.d\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z4_z4, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8 + 8, z4, z4),
+ svdot_za64_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** udot za\.d\[\1, 0, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svuint16x4_t,
+ svdot_za64_u16_vg1x4 (w8 - 1, z4, z0),
+ svdot_za64_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** udot za\.d\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za64_u16_vg1x4 (0, z1, z0),
+ svdot_za64_vg1x4 (0, z1, z0))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** udot za\.d\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za64_u16_vg1x4 (w0, z1, z0),
+ svdot_za64_vg1x4 (w0, z1, z0))
+
+/*
+** dot_single_w8_z1_z0:
+** udot za\.d\[w8, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za64_u16_vg1x4 (w8, z1, z0),
+ svdot_za64_vg1x4 (w8, z1, z0))
+
+/*
+** dot_single_w8p1_z1_z0:
+** udot za\.d\[w8, 1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za64_u16_vg1x4 (w8 + 1, z1, z0),
+ svdot_za64_vg1x4 (w8 + 1, z1, z0))
+
+/*
+** dot_single_w8p4_z20_z0:
+** udot za\.d\[w8, 4, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p4_z20_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za64_u16_vg1x4 (w8 + 4, z20, z0),
+ svdot_za64_vg1x4 (w8 + 4, z20, z0))
+
+/*
+** dot_single_w8p6_z27_z0:
+** udot za\.d\[w8, 6, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p6_z27_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za64_u16_vg1x4 (w8 + 6, z27, z0),
+ svdot_za64_vg1x4 (w8 + 6, z27, z0))
+
+/*
+** dot_single_w8p7_z1_z0:
+** udot za\.d\[w8, 7, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za64_u16_vg1x4 (w8 + 7, z1, z0),
+ svdot_za64_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** udot za\.d\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za64_u16_vg1x4 (w8 + 8, z1, z0),
+ svdot_za64_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** udot za\.d\[\1, 0, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svuint16x4_t, svuint16_t,
+ svdot_single_za64_u16_vg1x4 (w0 - 1, z1, z0),
+ svdot_za64_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** udot za\.d\[w8, 0, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svuint16x4_t, svuint16_t,
+ svdot_single_za64_u16_vg1x4 (w8, z0, z15),
+ svdot_za64_vg1x4 (w8, z0, z15))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** udot za\.d\[w8, 0, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svuint16x4_t, svuint16_t,
+ svdot_single_za64_u16_vg1x4 (w8, z20, z16),
+ svdot_za64_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_bf16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_bf16_x2.c
new file mode 100644
index 0000000..caed35d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_bf16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_bf16_base:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_base, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_bf16_index:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_index, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_bf16_1:
+** incb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_1, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn8, x0 + svcnth ()),
+ z0 = svld1_x2 (pn8, x0 + svcnth ()))
+
+/*
+** ld1_bf16_2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_2, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn8, x0 + svcnth () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 2))
+
+/*
+** ld1_bf16_14:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_14, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn8, x0 + svcnth () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_bf16_16:
+** incb x0, all, mul #16
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_16, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn8, x0 + svcnth () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_bf16_m1:
+** decb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn8, x0 - svcnth ()),
+ z0 = svld1_x2 (pn8, x0 - svcnth ()))
+
+/*
+** ld1_bf16_m2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn8, x0 - svcnth () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 2))
+
+/*
+** ld1_bf16_m16:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn8, x0 - svcnth () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 16))
+
+/*
+** ld1_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn8, x0 - svcnth () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 18))
+
+/*
+** ld1_bf16_z17:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_z17, svbfloat16x2_t, bfloat16_t,
+ z17 = svld1_bf16_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_bf16_z22:
+** ld1h {z22\.h(?: - |, )z23\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_z22, svbfloat16x2_t, bfloat16_t,
+ z22 = svld1_bf16_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_bf16_z28:
+** ld1h {z28\.h(?: - |, )z29\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_z28, svbfloat16x2_t, bfloat16_t,
+ z28 = svld1_bf16_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_bf16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_pn0, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_bf16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_pn7, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_bf16_pn15:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_pn15, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_bf16_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_bf16_0:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_0, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_bf16_1:
+** incb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_1, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_bf16_2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_2, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_bf16_14:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_14, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_bf16_16:
+** incb x0, all, mul #16
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_16, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_bf16_m1:
+** decb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_bf16_m2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_bf16_m16:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_x1, svbfloat16x2_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_bf16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_bf16_x4.c
new file mode 100644
index 0000000..e6a0b56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_bf16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_bf16_base:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_base, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_bf16_index:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_index, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_bf16_1:
+** incb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_1, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 + svcnth ()),
+ z0 = svld1_x4 (pn8, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_bf16_2:
+** incb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_2, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 + svcnth () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_bf16_3:
+** incb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_3, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 + svcnth () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 3))
+
+/*
+** ld1_bf16_4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_4, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 + svcnth () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 4))
+
+/*
+** ld1_bf16_28:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_28, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 + svcnth () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 28))
+
+/*
+** ld1_bf16_32:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_32, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 + svcnth () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_bf16_m1:
+** decb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 - svcnth ()),
+ z0 = svld1_x4 (pn8, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_bf16_m2:
+** decb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 - svcnth () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_bf16_m3:
+** decb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 - svcnth () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 3))
+
+/*
+** ld1_bf16_m4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 - svcnth () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 4))
+
+/*
+** ld1_bf16_m32:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 - svcnth () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 32))
+
+/*
+** ld1_bf16_m36:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn8, x0 - svcnth () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 36))
+
+/*
+** ld1_bf16_z17:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_z17, svbfloat16x4_t, bfloat16_t,
+ z17 = svld1_bf16_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_bf16_z22:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_z22, svbfloat16x4_t, bfloat16_t,
+ z22 = svld1_bf16_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_bf16_z28:
+** ld1h {z28\.h(?: - |, )z31\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_z28, svbfloat16x4_t, bfloat16_t,
+ z28 = svld1_bf16_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_bf16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_pn0, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_bf16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_pn7, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_bf16_pn15:
+** ld1h {z0\.h(?: - |, )z3\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_bf16_pn15, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_bf16_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_bf16_0:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_0, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_bf16_1:
+** incb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_1, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_bf16_2:
+** incb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_2, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_bf16_3:
+** incb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_3, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_bf16_4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_4, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_bf16_28:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_28, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_bf16_32:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_32, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_bf16_m1:
+** decb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_bf16_m2:
+** decb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_bf16_m3:
+** decb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_bf16_m4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_bf16_m32:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_bf16_m36:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_bf16_x1, svbfloat16x4_t, bfloat16_t,
+ z0 = svld1_vnum_bf16_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f16_x2.c
new file mode 100644
index 0000000..204ed55
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_f16_base:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_base, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f16_index:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_index, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f16_1:
+** incb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_1, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn8, x0 + svcnth ()),
+ z0 = svld1_x2 (pn8, x0 + svcnth ()))
+
+/*
+** ld1_f16_2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_2, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn8, x0 + svcnth () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 2))
+
+/*
+** ld1_f16_14:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_14, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn8, x0 + svcnth () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f16_16:
+** incb x0, all, mul #16
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_16, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn8, x0 + svcnth () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f16_m1:
+** decb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_m1, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn8, x0 - svcnth ()),
+ z0 = svld1_x2 (pn8, x0 - svcnth ()))
+
+/*
+** ld1_f16_m2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_m2, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn8, x0 - svcnth () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 2))
+
+/*
+** ld1_f16_m16:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_m16, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn8, x0 - svcnth () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 16))
+
+/*
+** ld1_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_m18, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn8, x0 - svcnth () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 18))
+
+/*
+** ld1_f16_z17:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_z17, svfloat16x2_t, float16_t,
+ z17 = svld1_f16_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f16_z22:
+** ld1h {z22\.h(?: - |, )z23\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_z22, svfloat16x2_t, float16_t,
+ z22 = svld1_f16_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f16_z28:
+** ld1h {z28\.h(?: - |, )z29\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_z28, svfloat16x2_t, float16_t,
+ z28 = svld1_f16_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_pn0, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_f16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_pn7, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_f16_pn15:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_pn15, svfloat16x2_t, float16_t,
+ z0 = svld1_f16_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_f16_0:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_0, svfloat16x2_t, float16_t,
+ z0 = svld1_vnum_f16_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f16_1:
+** incb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_1, svfloat16x2_t, float16_t,
+ z0 = svld1_vnum_f16_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_f16_2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_2, svfloat16x2_t, float16_t,
+ z0 = svld1_vnum_f16_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_f16_14:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_14, svfloat16x2_t, float16_t,
+ z0 = svld1_vnum_f16_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f16_16:
+** incb x0, all, mul #16
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_16, svfloat16x2_t, float16_t,
+ z0 = svld1_vnum_f16_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f16_m1:
+** decb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_m1, svfloat16x2_t, float16_t,
+ z0 = svld1_vnum_f16_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_f16_m2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_m2, svfloat16x2_t, float16_t,
+ z0 = svld1_vnum_f16_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_f16_m16:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_m16, svfloat16x2_t, float16_t,
+ z0 = svld1_vnum_f16_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_m18, svfloat16x2_t, float16_t,
+ z0 = svld1_vnum_f16_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_x1, svfloat16x2_t, float16_t,
+ z0 = svld1_vnum_f16_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f16_x4.c
new file mode 100644
index 0000000..203f7c3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_f16_base:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_base, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f16_index:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_index, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f16_1:
+** incb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_1, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 + svcnth ()),
+ z0 = svld1_x4 (pn8, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f16_2:
+** incb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_2, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 + svcnth () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f16_3:
+** incb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_3, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 + svcnth () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 3))
+
+/*
+** ld1_f16_4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_4, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 + svcnth () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 4))
+
+/*
+** ld1_f16_28:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_28, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 + svcnth () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 28))
+
+/*
+** ld1_f16_32:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_32, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 + svcnth () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f16_m1:
+** decb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_m1, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 - svcnth ()),
+ z0 = svld1_x4 (pn8, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f16_m2:
+** decb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_m2, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 - svcnth () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f16_m3:
+** decb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_m3, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 - svcnth () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 3))
+
+/*
+** ld1_f16_m4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_f16_m4, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 - svcnth () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 4))
+
+/*
+** ld1_f16_m32:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_m32, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 - svcnth () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 32))
+
+/*
+** ld1_f16_m36:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_m36, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn8, x0 - svcnth () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 36))
+
+/*
+** ld1_f16_z17:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_z17, svfloat16x4_t, float16_t,
+ z17 = svld1_f16_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f16_z22:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_z22, svfloat16x4_t, float16_t,
+ z22 = svld1_f16_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f16_z28:
+** ld1h {z28\.h(?: - |, )z31\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_z28, svfloat16x4_t, float16_t,
+ z28 = svld1_f16_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_pn0, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_f16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_pn7, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_f16_pn15:
+** ld1h {z0\.h(?: - |, )z3\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f16_pn15, svfloat16x4_t, float16_t,
+ z0 = svld1_f16_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_f16_0:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_0, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f16_1:
+** incb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_1, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f16_2:
+** incb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_2, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f16_3:
+** incb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_3, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_f16_4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_4, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_f16_28:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_28, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_f16_32:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_32, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f16_m1:
+** decb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_m1, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f16_m2:
+** decb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_m2, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f16_m3:
+** decb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_m3, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_f16_m4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_m4, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_f16_m32:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_m32, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_f16_m36:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_m36, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f16_x1, svfloat16x4_t, float16_t,
+ z0 = svld1_vnum_f16_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f32_x2.c
new file mode 100644
index 0000000..f98f385
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_f32_base:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_base, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f32_index:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_index, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f32_1:
+** incb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_1, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn8, x0 + svcntw ()),
+ z0 = svld1_x2 (pn8, x0 + svcntw ()))
+
+/*
+** ld1_f32_2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_2, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn8, x0 + svcntw () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcntw () * 2))
+
+/*
+** ld1_f32_14:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_14, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn8, x0 + svcntw () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcntw () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f32_16:
+** incb x0, all, mul #16
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_16, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn8, x0 + svcntw () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcntw () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f32_m1:
+** decb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_m1, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn8, x0 - svcntw ()),
+ z0 = svld1_x2 (pn8, x0 - svcntw ()))
+
+/*
+** ld1_f32_m2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_m2, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn8, x0 - svcntw () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcntw () * 2))
+
+/*
+** ld1_f32_m16:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_m16, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn8, x0 - svcntw () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcntw () * 16))
+
+/*
+** ld1_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_m18, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn8, x0 - svcntw () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcntw () * 18))
+
+/*
+** ld1_f32_z17:
+** ld1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_z17, svfloat32x2_t, float32_t,
+ z17 = svld1_f32_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f32_z22:
+** ld1w {z22\.s(?: - |, )z23\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_z22, svfloat32x2_t, float32_t,
+ z22 = svld1_f32_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f32_z28:
+** ld1w {z28\.s(?: - |, )z29\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_z28, svfloat32x2_t, float32_t,
+ z28 = svld1_f32_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_pn0, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_f32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_pn7, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_f32_pn15:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_pn15, svfloat32x2_t, float32_t,
+ z0 = svld1_f32_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_f32_0:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_0, svfloat32x2_t, float32_t,
+ z0 = svld1_vnum_f32_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f32_1:
+** incb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_1, svfloat32x2_t, float32_t,
+ z0 = svld1_vnum_f32_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_f32_2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_2, svfloat32x2_t, float32_t,
+ z0 = svld1_vnum_f32_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_f32_14:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_14, svfloat32x2_t, float32_t,
+ z0 = svld1_vnum_f32_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f32_16:
+** incb x0, all, mul #16
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_16, svfloat32x2_t, float32_t,
+ z0 = svld1_vnum_f32_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f32_m1:
+** decb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_m1, svfloat32x2_t, float32_t,
+ z0 = svld1_vnum_f32_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_f32_m2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_m2, svfloat32x2_t, float32_t,
+ z0 = svld1_vnum_f32_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_f32_m16:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_m16, svfloat32x2_t, float32_t,
+ z0 = svld1_vnum_f32_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_m18, svfloat32x2_t, float32_t,
+ z0 = svld1_vnum_f32_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_x1, svfloat32x2_t, float32_t,
+ z0 = svld1_vnum_f32_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f32_x4.c
new file mode 100644
index 0000000..e6dcb9f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_f32_base:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_base, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f32_index:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_index, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f32_1:
+** incb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_1, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 + svcntw ()),
+ z0 = svld1_x4 (pn8, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f32_2:
+** incb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_2, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 + svcntw () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f32_3:
+** incb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_3, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 + svcntw () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 3))
+
+/*
+** ld1_f32_4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_4, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 + svcntw () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 4))
+
+/*
+** ld1_f32_28:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_28, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 + svcntw () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 28))
+
+/*
+** ld1_f32_32:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_32, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 + svcntw () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f32_m1:
+** decb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_m1, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 - svcntw ()),
+ z0 = svld1_x4 (pn8, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f32_m2:
+** decb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_m2, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 - svcntw () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f32_m3:
+** decb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_m3, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 - svcntw () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 3))
+
+/*
+** ld1_f32_m4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_f32_m4, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 - svcntw () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 4))
+
+/*
+** ld1_f32_m32:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_m32, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 - svcntw () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 32))
+
+/*
+** ld1_f32_m36:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_m36, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn8, x0 - svcntw () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 36))
+
+/*
+** ld1_f32_z17:
+** ld1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_z17, svfloat32x4_t, float32_t,
+ z17 = svld1_f32_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f32_z22:
+** ld1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_z22, svfloat32x4_t, float32_t,
+ z22 = svld1_f32_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f32_z28:
+** ld1w {z28\.s(?: - |, )z31\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_z28, svfloat32x4_t, float32_t,
+ z28 = svld1_f32_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_pn0, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_f32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_pn7, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_f32_pn15:
+** ld1w {z0\.s(?: - |, )z3\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f32_pn15, svfloat32x4_t, float32_t,
+ z0 = svld1_f32_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_f32_0:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_0, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f32_1:
+** incb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_1, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f32_2:
+** incb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_2, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f32_3:
+** incb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_3, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_f32_4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_4, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_f32_28:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_28, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_f32_32:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_32, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f32_m1:
+** decb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_m1, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f32_m2:
+** decb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_m2, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f32_m3:
+** decb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_m3, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_f32_m4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_m4, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_f32_m32:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_m32, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_f32_m36:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_m36, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f32_x1, svfloat32x4_t, float32_t,
+ z0 = svld1_vnum_f32_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f64_x2.c
new file mode 100644
index 0000000..80e6bed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_f64_base:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_base, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f64_index:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_index, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f64_1:
+** incb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_1, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn8, x0 + svcntd ()),
+ z0 = svld1_x2 (pn8, x0 + svcntd ()))
+
+/*
+** ld1_f64_2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_2, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn8, x0 + svcntd () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcntd () * 2))
+
+/*
+** ld1_f64_14:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_14, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn8, x0 + svcntd () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcntd () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f64_16:
+** incb x0, all, mul #16
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_16, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn8, x0 + svcntd () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcntd () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f64_m1:
+** decb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_m1, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn8, x0 - svcntd ()),
+ z0 = svld1_x2 (pn8, x0 - svcntd ()))
+
+/*
+** ld1_f64_m2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_m2, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn8, x0 - svcntd () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcntd () * 2))
+
+/*
+** ld1_f64_m16:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_m16, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn8, x0 - svcntd () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcntd () * 16))
+
+/*
+** ld1_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_m18, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn8, x0 - svcntd () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcntd () * 18))
+
+/*
+** ld1_f64_z17:
+** ld1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_z17, svfloat64x2_t, float64_t,
+ z17 = svld1_f64_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f64_z22:
+** ld1d {z22\.d(?: - |, )z23\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_z22, svfloat64x2_t, float64_t,
+ z22 = svld1_f64_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f64_z28:
+** ld1d {z28\.d(?: - |, )z29\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_z28, svfloat64x2_t, float64_t,
+ z28 = svld1_f64_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_f64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_pn0, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_f64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_pn7, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_f64_pn15:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_pn15, svfloat64x2_t, float64_t,
+ z0 = svld1_f64_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_f64_0:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_0, svfloat64x2_t, float64_t,
+ z0 = svld1_vnum_f64_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f64_1:
+** incb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_1, svfloat64x2_t, float64_t,
+ z0 = svld1_vnum_f64_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_f64_2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_2, svfloat64x2_t, float64_t,
+ z0 = svld1_vnum_f64_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_f64_14:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_14, svfloat64x2_t, float64_t,
+ z0 = svld1_vnum_f64_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f64_16:
+** incb x0, all, mul #16
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_16, svfloat64x2_t, float64_t,
+ z0 = svld1_vnum_f64_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f64_m1:
+** decb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_m1, svfloat64x2_t, float64_t,
+ z0 = svld1_vnum_f64_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_f64_m2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_m2, svfloat64x2_t, float64_t,
+ z0 = svld1_vnum_f64_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_f64_m16:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_m16, svfloat64x2_t, float64_t,
+ z0 = svld1_vnum_f64_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_m18, svfloat64x2_t, float64_t,
+ z0 = svld1_vnum_f64_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_x1, svfloat64x2_t, float64_t,
+ z0 = svld1_vnum_f64_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f64_x4.c
new file mode 100644
index 0000000..5153d64
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_f64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_f64_base:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_base, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f64_index:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_index, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f64_1:
+** incb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_1, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 + svcntd ()),
+ z0 = svld1_x4 (pn8, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f64_2:
+** incb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_2, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 + svcntd () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f64_3:
+** incb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_3, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 + svcntd () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 3))
+
+/*
+** ld1_f64_4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_4, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 + svcntd () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 4))
+
+/*
+** ld1_f64_28:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_28, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 + svcntd () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 28))
+
+/*
+** ld1_f64_32:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_32, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 + svcntd () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f64_m1:
+** decb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_m1, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 - svcntd ()),
+ z0 = svld1_x4 (pn8, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f64_m2:
+** decb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_m2, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 - svcntd () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_f64_m3:
+** decb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_m3, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 - svcntd () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 3))
+
+/*
+** ld1_f64_m4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_f64_m4, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 - svcntd () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 4))
+
+/*
+** ld1_f64_m32:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_m32, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 - svcntd () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 32))
+
+/*
+** ld1_f64_m36:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_m36, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn8, x0 - svcntd () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 36))
+
+/*
+** ld1_f64_z17:
+** ld1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_z17, svfloat64x4_t, float64_t,
+ z17 = svld1_f64_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f64_z22:
+** ld1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_z22, svfloat64x4_t, float64_t,
+ z22 = svld1_f64_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f64_z28:
+** ld1d {z28\.d(?: - |, )z31\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_z28, svfloat64x4_t, float64_t,
+ z28 = svld1_f64_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_f64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_pn0, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_f64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_pn7, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_f64_pn15:
+** ld1d {z0\.d(?: - |, )z3\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_f64_pn15, svfloat64x4_t, float64_t,
+ z0 = svld1_f64_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_f64_0:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_0, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f64_1:
+** incb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_1, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f64_2:
+** incb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_2, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f64_3:
+** incb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_3, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_f64_4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_4, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_f64_28:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_28, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_f64_32:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_32, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f64_m1:
+** decb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_m1, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f64_m2:
+** decb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_m2, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_f64_m3:
+** decb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_m3, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_f64_m4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_m4, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_f64_m32:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_m32, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_f64_m36:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_m36, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_f64_x1, svfloat64x4_t, float64_t,
+ z0 = svld1_vnum_f64_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s16_x2.c
new file mode 100644
index 0000000..5121bf3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_s16_base:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_base, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s16_index:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_index, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s16_1:
+** incb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_1, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn8, x0 + svcnth ()),
+ z0 = svld1_x2 (pn8, x0 + svcnth ()))
+
+/*
+** ld1_s16_2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_2, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn8, x0 + svcnth () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 2))
+
+/*
+** ld1_s16_14:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_14, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn8, x0 + svcnth () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s16_16:
+** incb x0, all, mul #16
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_16, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn8, x0 + svcnth () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s16_m1:
+** decb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_m1, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn8, x0 - svcnth ()),
+ z0 = svld1_x2 (pn8, x0 - svcnth ()))
+
+/*
+** ld1_s16_m2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_m2, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn8, x0 - svcnth () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 2))
+
+/*
+** ld1_s16_m16:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_m16, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn8, x0 - svcnth () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 16))
+
+/*
+** ld1_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_m18, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn8, x0 - svcnth () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 18))
+
+/*
+** ld1_s16_z17:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_z17, svint16x2_t, int16_t,
+ z17 = svld1_s16_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s16_z22:
+** ld1h {z22\.h(?: - |, )z23\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_z22, svint16x2_t, int16_t,
+ z22 = svld1_s16_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s16_z28:
+** ld1h {z28\.h(?: - |, )z29\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_z28, svint16x2_t, int16_t,
+ z28 = svld1_s16_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_pn0, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_s16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_pn7, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_s16_pn15:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_pn15, svint16x2_t, int16_t,
+ z0 = svld1_s16_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_s16_0:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_0, svint16x2_t, int16_t,
+ z0 = svld1_vnum_s16_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s16_1:
+** incb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_1, svint16x2_t, int16_t,
+ z0 = svld1_vnum_s16_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_s16_2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_2, svint16x2_t, int16_t,
+ z0 = svld1_vnum_s16_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_s16_14:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_14, svint16x2_t, int16_t,
+ z0 = svld1_vnum_s16_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s16_16:
+** incb x0, all, mul #16
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_16, svint16x2_t, int16_t,
+ z0 = svld1_vnum_s16_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s16_m1:
+** decb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_m1, svint16x2_t, int16_t,
+ z0 = svld1_vnum_s16_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_s16_m2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_m2, svint16x2_t, int16_t,
+ z0 = svld1_vnum_s16_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_s16_m16:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_m16, svint16x2_t, int16_t,
+ z0 = svld1_vnum_s16_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_m18, svint16x2_t, int16_t,
+ z0 = svld1_vnum_s16_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_x1, svint16x2_t, int16_t,
+ z0 = svld1_vnum_s16_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s16_x4.c
new file mode 100644
index 0000000..16e1fce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_s16_base:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_base, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s16_index:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_index, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s16_1:
+** incb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_1, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 + svcnth ()),
+ z0 = svld1_x4 (pn8, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s16_2:
+** incb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_2, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 + svcnth () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s16_3:
+** incb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_3, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 + svcnth () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 3))
+
+/*
+** ld1_s16_4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_4, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 + svcnth () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 4))
+
+/*
+** ld1_s16_28:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_28, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 + svcnth () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 28))
+
+/*
+** ld1_s16_32:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_32, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 + svcnth () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s16_m1:
+** decb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_m1, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 - svcnth ()),
+ z0 = svld1_x4 (pn8, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s16_m2:
+** decb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_m2, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 - svcnth () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s16_m3:
+** decb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_m3, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 - svcnth () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 3))
+
+/*
+** ld1_s16_m4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_s16_m4, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 - svcnth () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 4))
+
+/*
+** ld1_s16_m32:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_m32, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 - svcnth () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 32))
+
+/*
+** ld1_s16_m36:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_m36, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn8, x0 - svcnth () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 36))
+
+/*
+** ld1_s16_z17:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_z17, svint16x4_t, int16_t,
+ z17 = svld1_s16_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s16_z22:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_z22, svint16x4_t, int16_t,
+ z22 = svld1_s16_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s16_z28:
+** ld1h {z28\.h(?: - |, )z31\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_z28, svint16x4_t, int16_t,
+ z28 = svld1_s16_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_pn0, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_s16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_pn7, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_s16_pn15:
+** ld1h {z0\.h(?: - |, )z3\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s16_pn15, svint16x4_t, int16_t,
+ z0 = svld1_s16_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_s16_0:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_0, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s16_1:
+** incb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_1, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s16_2:
+** incb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_2, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s16_3:
+** incb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_3, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_s16_4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_4, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_s16_28:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_28, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_s16_32:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_32, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s16_m1:
+** decb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_m1, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s16_m2:
+** decb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_m2, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s16_m3:
+** decb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_m3, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_s16_m4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_m4, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_s16_m32:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_m32, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_s16_m36:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_m36, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s16_x1, svint16x4_t, int16_t,
+ z0 = svld1_vnum_s16_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s32_x2.c
new file mode 100644
index 0000000..884a0ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_s32_base:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_base, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s32_index:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_index, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s32_1:
+** incb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_1, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn8, x0 + svcntw ()),
+ z0 = svld1_x2 (pn8, x0 + svcntw ()))
+
+/*
+** ld1_s32_2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_2, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn8, x0 + svcntw () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcntw () * 2))
+
+/*
+** ld1_s32_14:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_14, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn8, x0 + svcntw () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcntw () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s32_16:
+** incb x0, all, mul #16
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_16, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn8, x0 + svcntw () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcntw () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s32_m1:
+** decb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_m1, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn8, x0 - svcntw ()),
+ z0 = svld1_x2 (pn8, x0 - svcntw ()))
+
+/*
+** ld1_s32_m2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_m2, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn8, x0 - svcntw () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcntw () * 2))
+
+/*
+** ld1_s32_m16:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_m16, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn8, x0 - svcntw () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcntw () * 16))
+
+/*
+** ld1_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_m18, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn8, x0 - svcntw () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcntw () * 18))
+
+/*
+** ld1_s32_z17:
+** ld1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_z17, svint32x2_t, int32_t,
+ z17 = svld1_s32_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s32_z22:
+** ld1w {z22\.s(?: - |, )z23\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_z22, svint32x2_t, int32_t,
+ z22 = svld1_s32_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s32_z28:
+** ld1w {z28\.s(?: - |, )z29\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_z28, svint32x2_t, int32_t,
+ z28 = svld1_s32_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_pn0, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_s32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_pn7, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_s32_pn15:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_pn15, svint32x2_t, int32_t,
+ z0 = svld1_s32_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_s32_0:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_0, svint32x2_t, int32_t,
+ z0 = svld1_vnum_s32_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s32_1:
+** incb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_1, svint32x2_t, int32_t,
+ z0 = svld1_vnum_s32_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_s32_2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_2, svint32x2_t, int32_t,
+ z0 = svld1_vnum_s32_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_s32_14:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_14, svint32x2_t, int32_t,
+ z0 = svld1_vnum_s32_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s32_16:
+** incb x0, all, mul #16
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_16, svint32x2_t, int32_t,
+ z0 = svld1_vnum_s32_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s32_m1:
+** decb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_m1, svint32x2_t, int32_t,
+ z0 = svld1_vnum_s32_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_s32_m2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_m2, svint32x2_t, int32_t,
+ z0 = svld1_vnum_s32_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_s32_m16:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_m16, svint32x2_t, int32_t,
+ z0 = svld1_vnum_s32_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_m18, svint32x2_t, int32_t,
+ z0 = svld1_vnum_s32_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_x1, svint32x2_t, int32_t,
+ z0 = svld1_vnum_s32_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s32_x4.c
new file mode 100644
index 0000000..372edc3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_s32_base:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_base, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s32_index:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_index, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s32_1:
+** incb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_1, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 + svcntw ()),
+ z0 = svld1_x4 (pn8, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s32_2:
+** incb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_2, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 + svcntw () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s32_3:
+** incb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_3, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 + svcntw () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 3))
+
+/*
+** ld1_s32_4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_4, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 + svcntw () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 4))
+
+/*
+** ld1_s32_28:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_28, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 + svcntw () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 28))
+
+/*
+** ld1_s32_32:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_32, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 + svcntw () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s32_m1:
+** decb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_m1, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 - svcntw ()),
+ z0 = svld1_x4 (pn8, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s32_m2:
+** decb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_m2, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 - svcntw () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s32_m3:
+** decb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_m3, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 - svcntw () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 3))
+
+/*
+** ld1_s32_m4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_s32_m4, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 - svcntw () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 4))
+
+/*
+** ld1_s32_m32:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_m32, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 - svcntw () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 32))
+
+/*
+** ld1_s32_m36:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_m36, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn8, x0 - svcntw () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 36))
+
+/*
+** ld1_s32_z17:
+** ld1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_z17, svint32x4_t, int32_t,
+ z17 = svld1_s32_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s32_z22:
+** ld1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_z22, svint32x4_t, int32_t,
+ z22 = svld1_s32_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s32_z28:
+** ld1w {z28\.s(?: - |, )z31\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_z28, svint32x4_t, int32_t,
+ z28 = svld1_s32_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_pn0, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_s32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_pn7, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_s32_pn15:
+** ld1w {z0\.s(?: - |, )z3\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s32_pn15, svint32x4_t, int32_t,
+ z0 = svld1_s32_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_s32_0:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_0, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s32_1:
+** incb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_1, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s32_2:
+** incb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_2, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s32_3:
+** incb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_3, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_s32_4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_4, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_s32_28:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_28, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_s32_32:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_32, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s32_m1:
+** decb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_m1, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s32_m2:
+** decb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_m2, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s32_m3:
+** decb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_m3, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_s32_m4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_m4, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_s32_m32:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_m32, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_s32_m36:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_m36, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s32_x1, svint32x4_t, int32_t,
+ z0 = svld1_vnum_s32_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s64_x2.c
new file mode 100644
index 0000000..70fe5b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_s64_base:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_base, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s64_index:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_index, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s64_1:
+** incb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_1, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn8, x0 + svcntd ()),
+ z0 = svld1_x2 (pn8, x0 + svcntd ()))
+
+/*
+** ld1_s64_2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_2, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn8, x0 + svcntd () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcntd () * 2))
+
+/*
+** ld1_s64_14:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_14, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn8, x0 + svcntd () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcntd () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s64_16:
+** incb x0, all, mul #16
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_16, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn8, x0 + svcntd () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcntd () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s64_m1:
+** decb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_m1, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn8, x0 - svcntd ()),
+ z0 = svld1_x2 (pn8, x0 - svcntd ()))
+
+/*
+** ld1_s64_m2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_m2, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn8, x0 - svcntd () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcntd () * 2))
+
+/*
+** ld1_s64_m16:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_m16, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn8, x0 - svcntd () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcntd () * 16))
+
+/*
+** ld1_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_m18, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn8, x0 - svcntd () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcntd () * 18))
+
+/*
+** ld1_s64_z17:
+** ld1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_z17, svint64x2_t, int64_t,
+ z17 = svld1_s64_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s64_z22:
+** ld1d {z22\.d(?: - |, )z23\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_z22, svint64x2_t, int64_t,
+ z22 = svld1_s64_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s64_z28:
+** ld1d {z28\.d(?: - |, )z29\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_z28, svint64x2_t, int64_t,
+ z28 = svld1_s64_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_pn0, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_s64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_pn7, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_s64_pn15:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_pn15, svint64x2_t, int64_t,
+ z0 = svld1_s64_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_s64_0:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_0, svint64x2_t, int64_t,
+ z0 = svld1_vnum_s64_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s64_1:
+** incb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_1, svint64x2_t, int64_t,
+ z0 = svld1_vnum_s64_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_s64_2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_2, svint64x2_t, int64_t,
+ z0 = svld1_vnum_s64_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_s64_14:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_14, svint64x2_t, int64_t,
+ z0 = svld1_vnum_s64_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s64_16:
+** incb x0, all, mul #16
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_16, svint64x2_t, int64_t,
+ z0 = svld1_vnum_s64_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s64_m1:
+** decb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_m1, svint64x2_t, int64_t,
+ z0 = svld1_vnum_s64_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_s64_m2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_m2, svint64x2_t, int64_t,
+ z0 = svld1_vnum_s64_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_s64_m16:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_m16, svint64x2_t, int64_t,
+ z0 = svld1_vnum_s64_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_m18, svint64x2_t, int64_t,
+ z0 = svld1_vnum_s64_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_x1, svint64x2_t, int64_t,
+ z0 = svld1_vnum_s64_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s64_x4.c
new file mode 100644
index 0000000..4f6f8b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_s64_base:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_base, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s64_index:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_index, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s64_1:
+** incb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_1, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 + svcntd ()),
+ z0 = svld1_x4 (pn8, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s64_2:
+** incb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_2, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 + svcntd () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s64_3:
+** incb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_3, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 + svcntd () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 3))
+
+/*
+** ld1_s64_4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_4, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 + svcntd () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 4))
+
+/*
+** ld1_s64_28:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_28, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 + svcntd () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 28))
+
+/*
+** ld1_s64_32:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_32, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 + svcntd () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s64_m1:
+** decb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_m1, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 - svcntd ()),
+ z0 = svld1_x4 (pn8, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s64_m2:
+** decb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_m2, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 - svcntd () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s64_m3:
+** decb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_m3, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 - svcntd () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 3))
+
+/*
+** ld1_s64_m4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_s64_m4, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 - svcntd () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 4))
+
+/*
+** ld1_s64_m32:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_m32, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 - svcntd () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 32))
+
+/*
+** ld1_s64_m36:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_m36, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn8, x0 - svcntd () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 36))
+
+/*
+** ld1_s64_z17:
+** ld1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_z17, svint64x4_t, int64_t,
+ z17 = svld1_s64_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s64_z22:
+** ld1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_z22, svint64x4_t, int64_t,
+ z22 = svld1_s64_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s64_z28:
+** ld1d {z28\.d(?: - |, )z31\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_z28, svint64x4_t, int64_t,
+ z28 = svld1_s64_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_pn0, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_s64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_pn7, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_s64_pn15:
+** ld1d {z0\.d(?: - |, )z3\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s64_pn15, svint64x4_t, int64_t,
+ z0 = svld1_s64_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_s64_0:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_0, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s64_1:
+** incb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_1, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s64_2:
+** incb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_2, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s64_3:
+** incb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_3, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_s64_4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_4, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_s64_28:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_28, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_s64_32:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_32, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s64_m1:
+** decb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_m1, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s64_m2:
+** decb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_m2, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s64_m3:
+** decb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_m3, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_s64_m4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_m4, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_s64_m32:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_m32, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_s64_m36:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_m36, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s64_x1, svint64x4_t, int64_t,
+ z0 = svld1_vnum_s64_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s8_x2.c
new file mode 100644
index 0000000..b18daa4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s8_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_s8_base:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_base, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s8_index:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, x1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_index, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s8_1:
+** incb x0
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_1, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn8, x0 + svcntb ()),
+ z0 = svld1_x2 (pn8, x0 + svcntb ()))
+
+/*
+** ld1_s8_2:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_2, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn8, x0 + svcntb () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcntb () * 2))
+
+/*
+** ld1_s8_14:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_14, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn8, x0 + svcntb () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcntb () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s8_16:
+** incb x0, all, mul #16
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_16, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn8, x0 + svcntb () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcntb () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s8_m1:
+** decb x0
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_m1, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn8, x0 - svcntb ()),
+ z0 = svld1_x2 (pn8, x0 - svcntb ()))
+
+/*
+** ld1_s8_m2:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_m2, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn8, x0 - svcntb () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcntb () * 2))
+
+/*
+** ld1_s8_m16:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_m16, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn8, x0 - svcntb () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcntb () * 16))
+
+/*
+** ld1_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_m18, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn8, x0 - svcntb () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcntb () * 18))
+
+/*
+** ld1_s8_z17:
+** ld1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_z17, svint8x2_t, int8_t,
+ z17 = svld1_s8_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s8_z22:
+** ld1b {z22\.b(?: - |, )z23\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_z22, svint8x2_t, int8_t,
+ z22 = svld1_s8_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s8_z28:
+** ld1b {z28\.b(?: - |, )z29\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_z28, svint8x2_t, int8_t,
+ z28 = svld1_s8_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_s8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1b {z0\.b(?: - |, )z1\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_pn0, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_s8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1b {z0\.b(?: - |, )z1\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_pn7, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_s8_pn15:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_pn15, svint8x2_t, int8_t,
+ z0 = svld1_s8_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_s8_0:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_0, svint8x2_t, int8_t,
+ z0 = svld1_vnum_s8_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s8_1:
+** incb x0
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_1, svint8x2_t, int8_t,
+ z0 = svld1_vnum_s8_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_s8_2:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_2, svint8x2_t, int8_t,
+ z0 = svld1_vnum_s8_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_s8_14:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_14, svint8x2_t, int8_t,
+ z0 = svld1_vnum_s8_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s8_16:
+** incb x0, all, mul #16
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_16, svint8x2_t, int8_t,
+ z0 = svld1_vnum_s8_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s8_m1:
+** decb x0
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_m1, svint8x2_t, int8_t,
+ z0 = svld1_vnum_s8_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_s8_m2:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_m2, svint8x2_t, int8_t,
+ z0 = svld1_vnum_s8_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_s8_m16:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_m16, svint8x2_t, int8_t,
+ z0 = svld1_vnum_s8_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_m18, svint8x2_t, int8_t,
+ z0 = svld1_vnum_s8_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_x1, svint8x2_t, int8_t,
+ z0 = svld1_vnum_s8_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s8_x4.c
new file mode 100644
index 0000000..cbcc27a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_s8_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_s8_base:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_base, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s8_index:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, x1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_index, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s8_1:
+** incb x0
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_1, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 + svcntb ()),
+ z0 = svld1_x4 (pn8, x0 + svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s8_2:
+** incb x0, all, mul #2
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_2, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 + svcntb () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s8_3:
+** incb x0, all, mul #3
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_3, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 + svcntb () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcntb () * 3))
+
+/*
+** ld1_s8_4:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_4, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 + svcntb () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcntb () * 4))
+
+/*
+** ld1_s8_28:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_28, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 + svcntb () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcntb () * 28))
+
+/*
+** ld1_s8_32:
+** [^{]*
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_32, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 + svcntb () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcntb () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s8_m1:
+** decb x0
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_m1, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 - svcntb ()),
+ z0 = svld1_x4 (pn8, x0 - svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s8_m2:
+** decb x0, all, mul #2
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_m2, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 - svcntb () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_s8_m3:
+** decb x0, all, mul #3
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_m3, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 - svcntb () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcntb () * 3))
+
+/*
+** ld1_s8_m4:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_s8_m4, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 - svcntb () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcntb () * 4))
+
+/*
+** ld1_s8_m32:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_m32, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 - svcntb () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcntb () * 32))
+
+/*
+** ld1_s8_m36:
+** [^{]*
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_m36, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn8, x0 - svcntb () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcntb () * 36))
+
+/*
+** ld1_s8_z17:
+** ld1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_z17, svint8x4_t, int8_t,
+ z17 = svld1_s8_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s8_z22:
+** ld1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_z22, svint8x4_t, int8_t,
+ z22 = svld1_s8_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s8_z28:
+** ld1b {z28\.b(?: - |, )z31\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_z28, svint8x4_t, int8_t,
+ z28 = svld1_s8_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_s8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1b {z0\.b(?: - |, )z3\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_pn0, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_s8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1b {z0\.b(?: - |, )z3\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_pn7, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_s8_pn15:
+** ld1b {z0\.b(?: - |, )z3\.b}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_s8_pn15, svint8x4_t, int8_t,
+ z0 = svld1_s8_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_s8_0:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_0, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s8_1:
+** incb x0
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_1, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s8_2:
+** incb x0, all, mul #2
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_2, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s8_3:
+** incb x0, all, mul #3
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_3, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_s8_4:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_4, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_s8_28:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_28, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_s8_32:
+** [^{]*
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_32, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s8_m1:
+** decb x0
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_m1, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s8_m2:
+** decb x0, all, mul #2
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_m2, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_s8_m3:
+** decb x0, all, mul #3
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_m3, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_s8_m4:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_m4, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_s8_m32:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_m32, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_s8_m36:
+** [^{]*
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_m36, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1b {z0\.b - z3\.b}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_s8_x1, svint8x4_t, int8_t,
+ z0 = svld1_vnum_s8_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u16_x2.c
new file mode 100644
index 0000000..8c15a76
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_u16_base:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_base, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u16_index:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_index, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u16_1:
+** incb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_1, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn8, x0 + svcnth ()),
+ z0 = svld1_x2 (pn8, x0 + svcnth ()))
+
+/*
+** ld1_u16_2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_2, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn8, x0 + svcnth () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 2))
+
+/*
+** ld1_u16_14:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_14, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn8, x0 + svcnth () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u16_16:
+** incb x0, all, mul #16
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_16, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn8, x0 + svcnth () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u16_m1:
+** decb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_m1, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn8, x0 - svcnth ()),
+ z0 = svld1_x2 (pn8, x0 - svcnth ()))
+
+/*
+** ld1_u16_m2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_m2, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn8, x0 - svcnth () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 2))
+
+/*
+** ld1_u16_m16:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_m16, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn8, x0 - svcnth () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 16))
+
+/*
+** ld1_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_m18, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn8, x0 - svcnth () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcnth () * 18))
+
+/*
+** ld1_u16_z17:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_z17, svuint16x2_t, uint16_t,
+ z17 = svld1_u16_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u16_z22:
+** ld1h {z22\.h(?: - |, )z23\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_z22, svuint16x2_t, uint16_t,
+ z22 = svld1_u16_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u16_z28:
+** ld1h {z28\.h(?: - |, )z29\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_z28, svuint16x2_t, uint16_t,
+ z28 = svld1_u16_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_pn0, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_u16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_pn7, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_u16_pn15:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_pn15, svuint16x2_t, uint16_t,
+ z0 = svld1_u16_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_u16_0:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_0, svuint16x2_t, uint16_t,
+ z0 = svld1_vnum_u16_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u16_1:
+** incb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_1, svuint16x2_t, uint16_t,
+ z0 = svld1_vnum_u16_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_u16_2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_2, svuint16x2_t, uint16_t,
+ z0 = svld1_vnum_u16_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_u16_14:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_14, svuint16x2_t, uint16_t,
+ z0 = svld1_vnum_u16_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u16_16:
+** incb x0, all, mul #16
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_16, svuint16x2_t, uint16_t,
+ z0 = svld1_vnum_u16_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u16_m1:
+** decb x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_m1, svuint16x2_t, uint16_t,
+ z0 = svld1_vnum_u16_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_u16_m2:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_m2, svuint16x2_t, uint16_t,
+ z0 = svld1_vnum_u16_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_u16_m16:
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_m16, svuint16x2_t, uint16_t,
+ z0 = svld1_vnum_u16_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_m18, svuint16x2_t, uint16_t,
+ z0 = svld1_vnum_u16_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_x1, svuint16x2_t, uint16_t,
+ z0 = svld1_vnum_u16_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u16_x4.c
new file mode 100644
index 0000000..41ed814
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_u16_base:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_base, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u16_index:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_index, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u16_1:
+** incb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_1, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 + svcnth ()),
+ z0 = svld1_x4 (pn8, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u16_2:
+** incb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_2, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 + svcnth () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u16_3:
+** incb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_3, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 + svcnth () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 3))
+
+/*
+** ld1_u16_4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_4, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 + svcnth () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 4))
+
+/*
+** ld1_u16_28:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_28, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 + svcnth () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 28))
+
+/*
+** ld1_u16_32:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_32, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 + svcnth () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u16_m1:
+** decb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_m1, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 - svcnth ()),
+ z0 = svld1_x4 (pn8, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u16_m2:
+** decb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_m2, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 - svcnth () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u16_m3:
+** decb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_m3, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 - svcnth () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 3))
+
+/*
+** ld1_u16_m4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_u16_m4, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 - svcnth () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 4))
+
+/*
+** ld1_u16_m32:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_m32, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 - svcnth () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 32))
+
+/*
+** ld1_u16_m36:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_m36, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn8, x0 - svcnth () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcnth () * 36))
+
+/*
+** ld1_u16_z17:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_z17, svuint16x4_t, uint16_t,
+ z17 = svld1_u16_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u16_z22:
+** ld1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_z22, svuint16x4_t, uint16_t,
+ z22 = svld1_u16_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u16_z28:
+** ld1h {z28\.h(?: - |, )z31\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_z28, svuint16x4_t, uint16_t,
+ z28 = svld1_u16_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_pn0, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_u16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_pn7, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_u16_pn15:
+** ld1h {z0\.h(?: - |, )z3\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u16_pn15, svuint16x4_t, uint16_t,
+ z0 = svld1_u16_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_u16_0:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_0, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u16_1:
+** incb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_1, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u16_2:
+** incb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_2, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u16_3:
+** incb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_3, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_u16_4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_4, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_u16_28:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_28, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_u16_32:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_32, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u16_m1:
+** decb x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_m1, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u16_m2:
+** decb x0, all, mul #2
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_m2, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u16_m3:
+** decb x0, all, mul #3
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_m3, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_u16_m4:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_m4, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_u16_m32:
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_m32, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_u16_m36:
+** [^{]*
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_m36, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1h {z0\.h - z3\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1h {z0\.h - z3\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u16_x1, svuint16x4_t, uint16_t,
+ z0 = svld1_vnum_u16_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u32_x2.c
new file mode 100644
index 0000000..8262d8c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_u32_base:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_base, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u32_index:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_index, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u32_1:
+** incb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_1, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn8, x0 + svcntw ()),
+ z0 = svld1_x2 (pn8, x0 + svcntw ()))
+
+/*
+** ld1_u32_2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_2, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn8, x0 + svcntw () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcntw () * 2))
+
+/*
+** ld1_u32_14:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_14, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn8, x0 + svcntw () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcntw () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u32_16:
+** incb x0, all, mul #16
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_16, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn8, x0 + svcntw () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcntw () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u32_m1:
+** decb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_m1, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn8, x0 - svcntw ()),
+ z0 = svld1_x2 (pn8, x0 - svcntw ()))
+
+/*
+** ld1_u32_m2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_m2, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn8, x0 - svcntw () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcntw () * 2))
+
+/*
+** ld1_u32_m16:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_m16, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn8, x0 - svcntw () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcntw () * 16))
+
+/*
+** ld1_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_m18, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn8, x0 - svcntw () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcntw () * 18))
+
+/*
+** ld1_u32_z17:
+** ld1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_z17, svuint32x2_t, uint32_t,
+ z17 = svld1_u32_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u32_z22:
+** ld1w {z22\.s(?: - |, )z23\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_z22, svuint32x2_t, uint32_t,
+ z22 = svld1_u32_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u32_z28:
+** ld1w {z28\.s(?: - |, )z29\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_z28, svuint32x2_t, uint32_t,
+ z28 = svld1_u32_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_pn0, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_u32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_pn7, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_u32_pn15:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_pn15, svuint32x2_t, uint32_t,
+ z0 = svld1_u32_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_u32_0:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_0, svuint32x2_t, uint32_t,
+ z0 = svld1_vnum_u32_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u32_1:
+** incb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_1, svuint32x2_t, uint32_t,
+ z0 = svld1_vnum_u32_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_u32_2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_2, svuint32x2_t, uint32_t,
+ z0 = svld1_vnum_u32_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_u32_14:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_14, svuint32x2_t, uint32_t,
+ z0 = svld1_vnum_u32_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u32_16:
+** incb x0, all, mul #16
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_16, svuint32x2_t, uint32_t,
+ z0 = svld1_vnum_u32_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u32_m1:
+** decb x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_m1, svuint32x2_t, uint32_t,
+ z0 = svld1_vnum_u32_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_u32_m2:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_m2, svuint32x2_t, uint32_t,
+ z0 = svld1_vnum_u32_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_u32_m16:
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_m16, svuint32x2_t, uint32_t,
+ z0 = svld1_vnum_u32_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_m18, svuint32x2_t, uint32_t,
+ z0 = svld1_vnum_u32_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_x1, svuint32x2_t, uint32_t,
+ z0 = svld1_vnum_u32_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u32_x4.c
new file mode 100644
index 0000000..0c2a9fa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_u32_base:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_base, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u32_index:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_index, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u32_1:
+** incb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_1, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 + svcntw ()),
+ z0 = svld1_x4 (pn8, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u32_2:
+** incb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_2, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 + svcntw () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u32_3:
+** incb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_3, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 + svcntw () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 3))
+
+/*
+** ld1_u32_4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_4, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 + svcntw () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 4))
+
+/*
+** ld1_u32_28:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_28, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 + svcntw () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 28))
+
+/*
+** ld1_u32_32:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_32, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 + svcntw () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcntw () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u32_m1:
+** decb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_m1, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 - svcntw ()),
+ z0 = svld1_x4 (pn8, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u32_m2:
+** decb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_m2, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 - svcntw () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u32_m3:
+** decb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_m3, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 - svcntw () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 3))
+
+/*
+** ld1_u32_m4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_u32_m4, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 - svcntw () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 4))
+
+/*
+** ld1_u32_m32:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_m32, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 - svcntw () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 32))
+
+/*
+** ld1_u32_m36:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_m36, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn8, x0 - svcntw () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcntw () * 36))
+
+/*
+** ld1_u32_z17:
+** ld1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_z17, svuint32x4_t, uint32_t,
+ z17 = svld1_u32_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u32_z22:
+** ld1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_z22, svuint32x4_t, uint32_t,
+ z22 = svld1_u32_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u32_z28:
+** ld1w {z28\.s(?: - |, )z31\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_z28, svuint32x4_t, uint32_t,
+ z28 = svld1_u32_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_pn0, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_u32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_pn7, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_u32_pn15:
+** ld1w {z0\.s(?: - |, )z3\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u32_pn15, svuint32x4_t, uint32_t,
+ z0 = svld1_u32_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_u32_0:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_0, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u32_1:
+** incb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_1, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u32_2:
+** incb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_2, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u32_3:
+** incb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_3, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_u32_4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_4, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_u32_28:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_28, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_u32_32:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_32, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u32_m1:
+** decb x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_m1, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u32_m2:
+** decb x0, all, mul #2
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_m2, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u32_m3:
+** decb x0, all, mul #3
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_m3, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_u32_m4:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_m4, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_u32_m32:
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_m32, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_u32_m36:
+** [^{]*
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_m36, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1w {z0\.s - z3\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1w {z0\.s - z3\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u32_x1, svuint32x4_t, uint32_t,
+ z0 = svld1_vnum_u32_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u64_x2.c
new file mode 100644
index 0000000..7cd45ef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_u64_base:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_base, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u64_index:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_index, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u64_1:
+** incb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_1, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn8, x0 + svcntd ()),
+ z0 = svld1_x2 (pn8, x0 + svcntd ()))
+
+/*
+** ld1_u64_2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_2, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn8, x0 + svcntd () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcntd () * 2))
+
+/*
+** ld1_u64_14:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_14, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn8, x0 + svcntd () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcntd () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u64_16:
+** incb x0, all, mul #16
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_16, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn8, x0 + svcntd () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcntd () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u64_m1:
+** decb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_m1, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn8, x0 - svcntd ()),
+ z0 = svld1_x2 (pn8, x0 - svcntd ()))
+
+/*
+** ld1_u64_m2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_m2, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn8, x0 - svcntd () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcntd () * 2))
+
+/*
+** ld1_u64_m16:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_m16, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn8, x0 - svcntd () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcntd () * 16))
+
+/*
+** ld1_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_m18, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn8, x0 - svcntd () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcntd () * 18))
+
+/*
+** ld1_u64_z17:
+** ld1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_z17, svuint64x2_t, uint64_t,
+ z17 = svld1_u64_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u64_z22:
+** ld1d {z22\.d(?: - |, )z23\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_z22, svuint64x2_t, uint64_t,
+ z22 = svld1_u64_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u64_z28:
+** ld1d {z28\.d(?: - |, )z29\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_z28, svuint64x2_t, uint64_t,
+ z28 = svld1_u64_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_pn0, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_u64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_pn7, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_u64_pn15:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_pn15, svuint64x2_t, uint64_t,
+ z0 = svld1_u64_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_u64_0:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_0, svuint64x2_t, uint64_t,
+ z0 = svld1_vnum_u64_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u64_1:
+** incb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_1, svuint64x2_t, uint64_t,
+ z0 = svld1_vnum_u64_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_u64_2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_2, svuint64x2_t, uint64_t,
+ z0 = svld1_vnum_u64_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_u64_14:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_14, svuint64x2_t, uint64_t,
+ z0 = svld1_vnum_u64_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u64_16:
+** incb x0, all, mul #16
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_16, svuint64x2_t, uint64_t,
+ z0 = svld1_vnum_u64_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u64_m1:
+** decb x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_m1, svuint64x2_t, uint64_t,
+ z0 = svld1_vnum_u64_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_u64_m2:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_m2, svuint64x2_t, uint64_t,
+ z0 = svld1_vnum_u64_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_u64_m16:
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_m16, svuint64x2_t, uint64_t,
+ z0 = svld1_vnum_u64_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_m18, svuint64x2_t, uint64_t,
+ z0 = svld1_vnum_u64_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_x1, svuint64x2_t, uint64_t,
+ z0 = svld1_vnum_u64_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u64_x4.c
new file mode 100644
index 0000000..37f58df
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_u64_base:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_base, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u64_index:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_index, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u64_1:
+** incb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_1, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 + svcntd ()),
+ z0 = svld1_x4 (pn8, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u64_2:
+** incb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_2, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 + svcntd () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u64_3:
+** incb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_3, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 + svcntd () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 3))
+
+/*
+** ld1_u64_4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_4, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 + svcntd () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 4))
+
+/*
+** ld1_u64_28:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_28, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 + svcntd () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 28))
+
+/*
+** ld1_u64_32:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_32, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 + svcntd () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcntd () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u64_m1:
+** decb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_m1, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 - svcntd ()),
+ z0 = svld1_x4 (pn8, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u64_m2:
+** decb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_m2, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 - svcntd () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u64_m3:
+** decb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_m3, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 - svcntd () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 3))
+
+/*
+** ld1_u64_m4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_u64_m4, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 - svcntd () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 4))
+
+/*
+** ld1_u64_m32:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_m32, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 - svcntd () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 32))
+
+/*
+** ld1_u64_m36:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_m36, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn8, x0 - svcntd () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcntd () * 36))
+
+/*
+** ld1_u64_z17:
+** ld1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_z17, svuint64x4_t, uint64_t,
+ z17 = svld1_u64_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u64_z22:
+** ld1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_z22, svuint64x4_t, uint64_t,
+ z22 = svld1_u64_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u64_z28:
+** ld1d {z28\.d(?: - |, )z31\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_z28, svuint64x4_t, uint64_t,
+ z28 = svld1_u64_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_pn0, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_u64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_pn7, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_u64_pn15:
+** ld1d {z0\.d(?: - |, )z3\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u64_pn15, svuint64x4_t, uint64_t,
+ z0 = svld1_u64_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_u64_0:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_0, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u64_1:
+** incb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_1, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u64_2:
+** incb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_2, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u64_3:
+** incb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_3, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_u64_4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_4, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_u64_28:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_28, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_u64_32:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_32, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u64_m1:
+** decb x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_m1, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u64_m2:
+** decb x0, all, mul #2
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_m2, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u64_m3:
+** decb x0, all, mul #3
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_m3, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_u64_m4:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_m4, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_u64_m32:
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_m32, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_u64_m36:
+** [^{]*
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_m36, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1d {z0\.d - z3\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1d {z0\.d - z3\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u64_x1, svuint64x4_t, uint64_t,
+ z0 = svld1_vnum_u64_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u8_x2.c
new file mode 100644
index 0000000..83e6b35
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u8_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_u8_base:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_base, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn8, x0),
+ z0 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u8_index:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, x1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_index, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn8, x0 + x1),
+ z0 = svld1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u8_1:
+** incb x0
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_1, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn8, x0 + svcntb ()),
+ z0 = svld1_x2 (pn8, x0 + svcntb ()))
+
+/*
+** ld1_u8_2:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_2, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn8, x0 + svcntb () * 2),
+ z0 = svld1_x2 (pn8, x0 + svcntb () * 2))
+
+/*
+** ld1_u8_14:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_14, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn8, x0 + svcntb () * 14),
+ z0 = svld1_x2 (pn8, x0 + svcntb () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u8_16:
+** incb x0, all, mul #16
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_16, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn8, x0 + svcntb () * 16),
+ z0 = svld1_x2 (pn8, x0 + svcntb () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u8_m1:
+** decb x0
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_m1, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn8, x0 - svcntb ()),
+ z0 = svld1_x2 (pn8, x0 - svcntb ()))
+
+/*
+** ld1_u8_m2:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_m2, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn8, x0 - svcntb () * 2),
+ z0 = svld1_x2 (pn8, x0 - svcntb () * 2))
+
+/*
+** ld1_u8_m16:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_m16, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn8, x0 - svcntb () * 16),
+ z0 = svld1_x2 (pn8, x0 - svcntb () * 16))
+
+/*
+** ld1_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_m18, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn8, x0 - svcntb () * 18),
+ z0 = svld1_x2 (pn8, x0 - svcntb () * 18))
+
+/*
+** ld1_u8_z17:
+** ld1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_z17, svuint8x2_t, uint8_t,
+ z17 = svld1_u8_x2 (pn8, x0),
+ z17 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u8_z22:
+** ld1b {z22\.b(?: - |, )z23\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_z22, svuint8x2_t, uint8_t,
+ z22 = svld1_u8_x2 (pn8, x0),
+ z22 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u8_z28:
+** ld1b {z28\.b(?: - |, )z29\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_z28, svuint8x2_t, uint8_t,
+ z28 = svld1_u8_x2 (pn8, x0),
+ z28 = svld1_x2 (pn8, x0))
+
+/*
+** ld1_u8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1b {z0\.b(?: - |, )z1\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_pn0, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn0, x0),
+ z0 = svld1_x2 (pn0, x0))
+
+/*
+** ld1_u8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1b {z0\.b(?: - |, )z1\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_pn7, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn7, x0),
+ z0 = svld1_x2 (pn7, x0))
+
+/*
+** ld1_u8_pn15:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_pn15, svuint8x2_t, uint8_t,
+ z0 = svld1_u8_x2 (pn15, x0),
+ z0 = svld1_x2 (pn15, x0))
+
+/*
+** ld1_vnum_u8_0:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_0, svuint8x2_t, uint8_t,
+ z0 = svld1_vnum_u8_x2 (pn8, x0, 0),
+ z0 = svld1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u8_1:
+** incb x0
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_1, svuint8x2_t, uint8_t,
+ z0 = svld1_vnum_u8_x2 (pn8, x0, 1),
+ z0 = svld1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ld1_vnum_u8_2:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_2, svuint8x2_t, uint8_t,
+ z0 = svld1_vnum_u8_x2 (pn8, x0, 2),
+ z0 = svld1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ld1_vnum_u8_14:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_14, svuint8x2_t, uint8_t,
+ z0 = svld1_vnum_u8_x2 (pn8, x0, 14),
+ z0 = svld1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u8_16:
+** incb x0, all, mul #16
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_16, svuint8x2_t, uint8_t,
+ z0 = svld1_vnum_u8_x2 (pn8, x0, 16),
+ z0 = svld1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u8_m1:
+** decb x0
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_m1, svuint8x2_t, uint8_t,
+ z0 = svld1_vnum_u8_x2 (pn8, x0, -1),
+ z0 = svld1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ld1_vnum_u8_m2:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_m2, svuint8x2_t, uint8_t,
+ z0 = svld1_vnum_u8_x2 (pn8, x0, -2),
+ z0 = svld1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ld1_vnum_u8_m16:
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_m16, svuint8x2_t, uint8_t,
+ z0 = svld1_vnum_u8_x2 (pn8, x0, -16),
+ z0 = svld1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ld1_vnum_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_m18, svuint8x2_t, uint8_t,
+ z0 = svld1_vnum_u8_x2 (pn8, x0, -18),
+ z0 = svld1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ld1_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_x1, svuint8x2_t, uint8_t,
+ z0 = svld1_vnum_u8_x2 (pn8, x0, x1),
+ z0 = svld1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u8_x4.c
new file mode 100644
index 0000000..e6361a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ld1_u8_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ld1_u8_base:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_base, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0),
+ z0 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u8_index:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, x1\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_index, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 + x1),
+ z0 = svld1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u8_1:
+** incb x0
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_1, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 + svcntb ()),
+ z0 = svld1_x4 (pn8, x0 + svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u8_2:
+** incb x0, all, mul #2
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_2, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 + svcntb () * 2),
+ z0 = svld1_x4 (pn8, x0 + svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u8_3:
+** incb x0, all, mul #3
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_3, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 + svcntb () * 3),
+ z0 = svld1_x4 (pn8, x0 + svcntb () * 3))
+
+/*
+** ld1_u8_4:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_4, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 + svcntb () * 4),
+ z0 = svld1_x4 (pn8, x0 + svcntb () * 4))
+
+/*
+** ld1_u8_28:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_28, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 + svcntb () * 28),
+ z0 = svld1_x4 (pn8, x0 + svcntb () * 28))
+
+/*
+** ld1_u8_32:
+** [^{]*
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_32, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 + svcntb () * 32),
+ z0 = svld1_x4 (pn8, x0 + svcntb () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u8_m1:
+** decb x0
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_m1, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 - svcntb ()),
+ z0 = svld1_x4 (pn8, x0 - svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u8_m2:
+** decb x0, all, mul #2
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_m2, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 - svcntb () * 2),
+ z0 = svld1_x4 (pn8, x0 - svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_u8_m3:
+** decb x0, all, mul #3
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_m3, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 - svcntb () * 3),
+ z0 = svld1_x4 (pn8, x0 - svcntb () * 3))
+
+/*
+** ld1_u8_m4:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ld1_u8_m4, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 - svcntb () * 4),
+ z0 = svld1_x4 (pn8, x0 - svcntb () * 4))
+
+/*
+** ld1_u8_m32:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_m32, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 - svcntb () * 32),
+ z0 = svld1_x4 (pn8, x0 - svcntb () * 32))
+
+/*
+** ld1_u8_m36:
+** [^{]*
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_m36, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn8, x0 - svcntb () * 36),
+ z0 = svld1_x4 (pn8, x0 - svcntb () * 36))
+
+/*
+** ld1_u8_z17:
+** ld1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_z17, svuint8x4_t, uint8_t,
+ z17 = svld1_u8_x4 (pn8, x0),
+ z17 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u8_z22:
+** ld1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_z22, svuint8x4_t, uint8_t,
+ z22 = svld1_u8_x4 (pn8, x0),
+ z22 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u8_z28:
+** ld1b {z28\.b(?: - |, )z31\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_z28, svuint8x4_t, uint8_t,
+ z28 = svld1_u8_x4 (pn8, x0),
+ z28 = svld1_x4 (pn8, x0))
+
+/*
+** ld1_u8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ld1b {z0\.b(?: - |, )z3\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_pn0, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn0, x0),
+ z0 = svld1_x4 (pn0, x0))
+
+/*
+** ld1_u8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ld1b {z0\.b(?: - |, )z3\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_pn7, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn7, x0),
+ z0 = svld1_x4 (pn7, x0))
+
+/*
+** ld1_u8_pn15:
+** ld1b {z0\.b(?: - |, )z3\.b}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_u8_pn15, svuint8x4_t, uint8_t,
+ z0 = svld1_u8_x4 (pn15, x0),
+ z0 = svld1_x4 (pn15, x0))
+
+/*
+** ld1_vnum_u8_0:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_0, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, 0),
+ z0 = svld1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u8_1:
+** incb x0
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_1, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, 1),
+ z0 = svld1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u8_2:
+** incb x0, all, mul #2
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_2, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, 2),
+ z0 = svld1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u8_3:
+** incb x0, all, mul #3
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_3, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, 3),
+ z0 = svld1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ld1_vnum_u8_4:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_4, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, 4),
+ z0 = svld1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ld1_vnum_u8_28:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_28, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, 28),
+ z0 = svld1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ld1_vnum_u8_32:
+** [^{]*
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_32, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, 32),
+ z0 = svld1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u8_m1:
+** decb x0
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_m1, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, -1),
+ z0 = svld1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u8_m2:
+** decb x0, all, mul #2
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_m2, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, -2),
+ z0 = svld1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1_vnum_u8_m3:
+** decb x0, all, mul #3
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_m3, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, -3),
+ z0 = svld1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ld1_vnum_u8_m4:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_m4, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, -4),
+ z0 = svld1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ld1_vnum_u8_m32:
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_m32, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, -32),
+ z0 = svld1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ld1_vnum_u8_m36:
+** [^{]*
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_m36, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, -36),
+ z0 = svld1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ld1_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld1b {z0\.b - z3\.b}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld1b {z0\.b - z3\.b}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ld1_vnum_u8_x1, svuint8x4_t, uint8_t,
+ z0 = svld1_vnum_u8_x4 (pn8, x0, x1),
+ z0 = svld1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_bf16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_bf16_x2.c
new file mode 100644
index 0000000..d196144
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_bf16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_bf16_base:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_base, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_bf16_index:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_index, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_bf16_1:
+** incb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_1, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn8, x0 + svcnth ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth ()))
+
+/*
+** ldnt1_bf16_2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_2, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn8, x0 + svcnth () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 2))
+
+/*
+** ldnt1_bf16_14:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_14, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn8, x0 + svcnth () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_bf16_16:
+** incb x0, all, mul #16
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_16, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn8, x0 + svcnth () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_bf16_m1:
+** decb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn8, x0 - svcnth ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth ()))
+
+/*
+** ldnt1_bf16_m2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn8, x0 - svcnth () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 2))
+
+/*
+** ldnt1_bf16_m16:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn8, x0 - svcnth () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 16))
+
+/*
+** ldnt1_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn8, x0 - svcnth () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 18))
+
+/*
+** ldnt1_bf16_z17:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_z17, svbfloat16x2_t, bfloat16_t,
+ z17 = svldnt1_bf16_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_bf16_z22:
+** ldnt1h {z22\.h(?: - |, )z23\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_z22, svbfloat16x2_t, bfloat16_t,
+ z22 = svldnt1_bf16_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_bf16_z28:
+** ldnt1h {z28\.h(?: - |, )z29\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_z28, svbfloat16x2_t, bfloat16_t,
+ z28 = svldnt1_bf16_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_bf16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_pn0, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_bf16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_pn7, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_bf16_pn15:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_pn15, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_bf16_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_bf16_0:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_0, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_bf16_1:
+** incb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_1, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_bf16_2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_2, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_bf16_14:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_14, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_bf16_16:
+** incb x0, all, mul #16
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_16, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_bf16_m1:
+** decb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_bf16_m2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_bf16_m16:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_x1, svbfloat16x2_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_bf16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_bf16_x4.c
new file mode 100644
index 0000000..0b9903f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_bf16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_bf16_base:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_base, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_bf16_index:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_index, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_bf16_1:
+** incb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_1, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 + svcnth ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_bf16_2:
+** incb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_2, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 + svcnth () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_bf16_3:
+** incb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_3, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 + svcnth () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 3))
+
+/*
+** ldnt1_bf16_4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_4, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 + svcnth () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 4))
+
+/*
+** ldnt1_bf16_28:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_28, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 + svcnth () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 28))
+
+/*
+** ldnt1_bf16_32:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_32, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 + svcnth () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_bf16_m1:
+** decb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 - svcnth ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_bf16_m2:
+** decb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 - svcnth () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_bf16_m3:
+** decb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 - svcnth () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 3))
+
+/*
+** ldnt1_bf16_m4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 - svcnth () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 4))
+
+/*
+** ldnt1_bf16_m32:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 - svcnth () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 32))
+
+/*
+** ldnt1_bf16_m36:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn8, x0 - svcnth () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 36))
+
+/*
+** ldnt1_bf16_z17:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_z17, svbfloat16x4_t, bfloat16_t,
+ z17 = svldnt1_bf16_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_bf16_z22:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_z22, svbfloat16x4_t, bfloat16_t,
+ z22 = svldnt1_bf16_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_bf16_z28:
+** ldnt1h {z28\.h(?: - |, )z31\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_z28, svbfloat16x4_t, bfloat16_t,
+ z28 = svldnt1_bf16_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_bf16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_pn0, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_bf16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_pn7, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_bf16_pn15:
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_bf16_pn15, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_bf16_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_bf16_0:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_0, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_bf16_1:
+** incb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_1, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_bf16_2:
+** incb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_2, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_bf16_3:
+** incb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_3, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_bf16_4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_4, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_bf16_28:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_28, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_bf16_32:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_32, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_bf16_m1:
+** decb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_bf16_m2:
+** decb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_bf16_m3:
+** decb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_bf16_m4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_bf16_m32:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_bf16_m36:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_bf16_x1, svbfloat16x4_t, bfloat16_t,
+ z0 = svldnt1_vnum_bf16_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f16_x2.c
new file mode 100644
index 0000000..6ae6114
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_f16_base:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_base, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f16_index:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_index, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f16_1:
+** incb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_1, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn8, x0 + svcnth ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth ()))
+
+/*
+** ldnt1_f16_2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_2, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn8, x0 + svcnth () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 2))
+
+/*
+** ldnt1_f16_14:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_14, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn8, x0 + svcnth () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f16_16:
+** incb x0, all, mul #16
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_16, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn8, x0 + svcnth () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f16_m1:
+** decb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_m1, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn8, x0 - svcnth ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth ()))
+
+/*
+** ldnt1_f16_m2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_m2, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn8, x0 - svcnth () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 2))
+
+/*
+** ldnt1_f16_m16:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_m16, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn8, x0 - svcnth () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 16))
+
+/*
+** ldnt1_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_m18, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn8, x0 - svcnth () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 18))
+
+/*
+** ldnt1_f16_z17:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_z17, svfloat16x2_t, float16_t,
+ z17 = svldnt1_f16_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f16_z22:
+** ldnt1h {z22\.h(?: - |, )z23\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_z22, svfloat16x2_t, float16_t,
+ z22 = svldnt1_f16_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f16_z28:
+** ldnt1h {z28\.h(?: - |, )z29\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_z28, svfloat16x2_t, float16_t,
+ z28 = svldnt1_f16_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_pn0, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_f16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_pn7, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_f16_pn15:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_pn15, svfloat16x2_t, float16_t,
+ z0 = svldnt1_f16_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_f16_0:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_0, svfloat16x2_t, float16_t,
+ z0 = svldnt1_vnum_f16_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f16_1:
+** incb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_1, svfloat16x2_t, float16_t,
+ z0 = svldnt1_vnum_f16_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_f16_2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_2, svfloat16x2_t, float16_t,
+ z0 = svldnt1_vnum_f16_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_f16_14:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_14, svfloat16x2_t, float16_t,
+ z0 = svldnt1_vnum_f16_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f16_16:
+** incb x0, all, mul #16
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_16, svfloat16x2_t, float16_t,
+ z0 = svldnt1_vnum_f16_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f16_m1:
+** decb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_m1, svfloat16x2_t, float16_t,
+ z0 = svldnt1_vnum_f16_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_f16_m2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_m2, svfloat16x2_t, float16_t,
+ z0 = svldnt1_vnum_f16_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_f16_m16:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_m16, svfloat16x2_t, float16_t,
+ z0 = svldnt1_vnum_f16_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_m18, svfloat16x2_t, float16_t,
+ z0 = svldnt1_vnum_f16_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_x1, svfloat16x2_t, float16_t,
+ z0 = svldnt1_vnum_f16_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f16_x4.c
new file mode 100644
index 0000000..3dad40c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_f16_base:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_base, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f16_index:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_index, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f16_1:
+** incb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_1, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 + svcnth ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f16_2:
+** incb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_2, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 + svcnth () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f16_3:
+** incb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_3, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 + svcnth () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 3))
+
+/*
+** ldnt1_f16_4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_4, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 + svcnth () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 4))
+
+/*
+** ldnt1_f16_28:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_28, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 + svcnth () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 28))
+
+/*
+** ldnt1_f16_32:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_32, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 + svcnth () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f16_m1:
+** decb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_m1, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 - svcnth ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f16_m2:
+** decb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_m2, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 - svcnth () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f16_m3:
+** decb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_m3, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 - svcnth () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 3))
+
+/*
+** ldnt1_f16_m4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_f16_m4, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 - svcnth () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 4))
+
+/*
+** ldnt1_f16_m32:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_m32, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 - svcnth () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 32))
+
+/*
+** ldnt1_f16_m36:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_m36, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn8, x0 - svcnth () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 36))
+
+/*
+** ldnt1_f16_z17:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_z17, svfloat16x4_t, float16_t,
+ z17 = svldnt1_f16_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f16_z22:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_z22, svfloat16x4_t, float16_t,
+ z22 = svldnt1_f16_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f16_z28:
+** ldnt1h {z28\.h(?: - |, )z31\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_z28, svfloat16x4_t, float16_t,
+ z28 = svldnt1_f16_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_pn0, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_f16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_pn7, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_f16_pn15:
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f16_pn15, svfloat16x4_t, float16_t,
+ z0 = svldnt1_f16_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_f16_0:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_0, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f16_1:
+** incb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_1, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f16_2:
+** incb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_2, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f16_3:
+** incb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_3, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_f16_4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_4, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_f16_28:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_28, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_f16_32:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_32, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f16_m1:
+** decb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_m1, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f16_m2:
+** decb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_m2, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f16_m3:
+** decb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_m3, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_f16_m4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_m4, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_f16_m32:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_m32, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_f16_m36:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_m36, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f16_x1, svfloat16x4_t, float16_t,
+ z0 = svldnt1_vnum_f16_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f32_x2.c
new file mode 100644
index 0000000..dce1b82
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_f32_base:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_base, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f32_index:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_index, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f32_1:
+** incb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_1, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn8, x0 + svcntw ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw ()))
+
+/*
+** ldnt1_f32_2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_2, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn8, x0 + svcntw () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw () * 2))
+
+/*
+** ldnt1_f32_14:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_14, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn8, x0 + svcntw () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f32_16:
+** incb x0, all, mul #16
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_16, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn8, x0 + svcntw () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f32_m1:
+** decb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_m1, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn8, x0 - svcntw ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw ()))
+
+/*
+** ldnt1_f32_m2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_m2, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn8, x0 - svcntw () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw () * 2))
+
+/*
+** ldnt1_f32_m16:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_m16, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn8, x0 - svcntw () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw () * 16))
+
+/*
+** ldnt1_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_m18, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn8, x0 - svcntw () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw () * 18))
+
+/*
+** ldnt1_f32_z17:
+** ldnt1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_z17, svfloat32x2_t, float32_t,
+ z17 = svldnt1_f32_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f32_z22:
+** ldnt1w {z22\.s(?: - |, )z23\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_z22, svfloat32x2_t, float32_t,
+ z22 = svldnt1_f32_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f32_z28:
+** ldnt1w {z28\.s(?: - |, )z29\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_z28, svfloat32x2_t, float32_t,
+ z28 = svldnt1_f32_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_pn0, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_f32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_pn7, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_f32_pn15:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_pn15, svfloat32x2_t, float32_t,
+ z0 = svldnt1_f32_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_f32_0:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_0, svfloat32x2_t, float32_t,
+ z0 = svldnt1_vnum_f32_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f32_1:
+** incb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_1, svfloat32x2_t, float32_t,
+ z0 = svldnt1_vnum_f32_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_f32_2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_2, svfloat32x2_t, float32_t,
+ z0 = svldnt1_vnum_f32_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_f32_14:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_14, svfloat32x2_t, float32_t,
+ z0 = svldnt1_vnum_f32_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f32_16:
+** incb x0, all, mul #16
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_16, svfloat32x2_t, float32_t,
+ z0 = svldnt1_vnum_f32_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f32_m1:
+** decb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_m1, svfloat32x2_t, float32_t,
+ z0 = svldnt1_vnum_f32_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_f32_m2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_m2, svfloat32x2_t, float32_t,
+ z0 = svldnt1_vnum_f32_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_f32_m16:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_m16, svfloat32x2_t, float32_t,
+ z0 = svldnt1_vnum_f32_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_m18, svfloat32x2_t, float32_t,
+ z0 = svldnt1_vnum_f32_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_x1, svfloat32x2_t, float32_t,
+ z0 = svldnt1_vnum_f32_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f32_x4.c
new file mode 100644
index 0000000..852c7c4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_f32_base:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_base, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f32_index:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_index, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f32_1:
+** incb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_1, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 + svcntw ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f32_2:
+** incb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_2, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 + svcntw () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f32_3:
+** incb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_3, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 + svcntw () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 3))
+
+/*
+** ldnt1_f32_4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_4, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 + svcntw () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 4))
+
+/*
+** ldnt1_f32_28:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_28, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 + svcntw () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 28))
+
+/*
+** ldnt1_f32_32:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_32, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 + svcntw () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f32_m1:
+** decb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_m1, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 - svcntw ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f32_m2:
+** decb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_m2, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 - svcntw () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f32_m3:
+** decb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_m3, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 - svcntw () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 3))
+
+/*
+** ldnt1_f32_m4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_f32_m4, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 - svcntw () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 4))
+
+/*
+** ldnt1_f32_m32:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_m32, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 - svcntw () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 32))
+
+/*
+** ldnt1_f32_m36:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_m36, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn8, x0 - svcntw () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 36))
+
+/*
+** ldnt1_f32_z17:
+** ldnt1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_z17, svfloat32x4_t, float32_t,
+ z17 = svldnt1_f32_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f32_z22:
+** ldnt1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_z22, svfloat32x4_t, float32_t,
+ z22 = svldnt1_f32_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f32_z28:
+** ldnt1w {z28\.s(?: - |, )z31\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_z28, svfloat32x4_t, float32_t,
+ z28 = svldnt1_f32_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_pn0, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_f32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_pn7, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_f32_pn15:
+** ldnt1w {z0\.s(?: - |, )z3\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f32_pn15, svfloat32x4_t, float32_t,
+ z0 = svldnt1_f32_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_f32_0:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_0, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f32_1:
+** incb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_1, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f32_2:
+** incb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_2, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f32_3:
+** incb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_3, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_f32_4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_4, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_f32_28:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_28, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_f32_32:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_32, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f32_m1:
+** decb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_m1, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f32_m2:
+** decb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_m2, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f32_m3:
+** decb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_m3, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_f32_m4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_m4, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_f32_m32:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_m32, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_f32_m36:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_m36, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f32_x1, svfloat32x4_t, float32_t,
+ z0 = svldnt1_vnum_f32_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f64_x2.c
new file mode 100644
index 0000000..77319bb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_f64_base:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_base, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f64_index:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_index, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f64_1:
+** incb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_1, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn8, x0 + svcntd ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd ()))
+
+/*
+** ldnt1_f64_2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_2, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn8, x0 + svcntd () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd () * 2))
+
+/*
+** ldnt1_f64_14:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_14, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn8, x0 + svcntd () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f64_16:
+** incb x0, all, mul #16
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_16, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn8, x0 + svcntd () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f64_m1:
+** decb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_m1, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn8, x0 - svcntd ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd ()))
+
+/*
+** ldnt1_f64_m2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_m2, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn8, x0 - svcntd () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd () * 2))
+
+/*
+** ldnt1_f64_m16:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_m16, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn8, x0 - svcntd () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd () * 16))
+
+/*
+** ldnt1_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_m18, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn8, x0 - svcntd () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd () * 18))
+
+/*
+** ldnt1_f64_z17:
+** ldnt1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_z17, svfloat64x2_t, float64_t,
+ z17 = svldnt1_f64_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f64_z22:
+** ldnt1d {z22\.d(?: - |, )z23\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_z22, svfloat64x2_t, float64_t,
+ z22 = svldnt1_f64_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f64_z28:
+** ldnt1d {z28\.d(?: - |, )z29\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_z28, svfloat64x2_t, float64_t,
+ z28 = svldnt1_f64_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_f64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_pn0, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_f64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_pn7, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_f64_pn15:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_pn15, svfloat64x2_t, float64_t,
+ z0 = svldnt1_f64_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_f64_0:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_0, svfloat64x2_t, float64_t,
+ z0 = svldnt1_vnum_f64_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f64_1:
+** incb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_1, svfloat64x2_t, float64_t,
+ z0 = svldnt1_vnum_f64_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_f64_2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_2, svfloat64x2_t, float64_t,
+ z0 = svldnt1_vnum_f64_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_f64_14:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_14, svfloat64x2_t, float64_t,
+ z0 = svldnt1_vnum_f64_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f64_16:
+** incb x0, all, mul #16
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_16, svfloat64x2_t, float64_t,
+ z0 = svldnt1_vnum_f64_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f64_m1:
+** decb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_m1, svfloat64x2_t, float64_t,
+ z0 = svldnt1_vnum_f64_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_f64_m2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_m2, svfloat64x2_t, float64_t,
+ z0 = svldnt1_vnum_f64_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_f64_m16:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_m16, svfloat64x2_t, float64_t,
+ z0 = svldnt1_vnum_f64_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_m18, svfloat64x2_t, float64_t,
+ z0 = svldnt1_vnum_f64_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_x1, svfloat64x2_t, float64_t,
+ z0 = svldnt1_vnum_f64_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f64_x4.c
new file mode 100644
index 0000000..bbaf66a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_f64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_f64_base:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_base, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f64_index:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_index, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f64_1:
+** incb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_1, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 + svcntd ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f64_2:
+** incb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_2, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 + svcntd () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f64_3:
+** incb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_3, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 + svcntd () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 3))
+
+/*
+** ldnt1_f64_4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_4, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 + svcntd () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 4))
+
+/*
+** ldnt1_f64_28:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_28, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 + svcntd () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 28))
+
+/*
+** ldnt1_f64_32:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_32, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 + svcntd () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f64_m1:
+** decb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_m1, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 - svcntd ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f64_m2:
+** decb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_m2, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 - svcntd () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_f64_m3:
+** decb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_m3, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 - svcntd () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 3))
+
+/*
+** ldnt1_f64_m4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_f64_m4, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 - svcntd () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 4))
+
+/*
+** ldnt1_f64_m32:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_m32, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 - svcntd () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 32))
+
+/*
+** ldnt1_f64_m36:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_m36, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn8, x0 - svcntd () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 36))
+
+/*
+** ldnt1_f64_z17:
+** ldnt1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_z17, svfloat64x4_t, float64_t,
+ z17 = svldnt1_f64_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f64_z22:
+** ldnt1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_z22, svfloat64x4_t, float64_t,
+ z22 = svldnt1_f64_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f64_z28:
+** ldnt1d {z28\.d(?: - |, )z31\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_z28, svfloat64x4_t, float64_t,
+ z28 = svldnt1_f64_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_f64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_pn0, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_f64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_pn7, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_f64_pn15:
+** ldnt1d {z0\.d(?: - |, )z3\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_f64_pn15, svfloat64x4_t, float64_t,
+ z0 = svldnt1_f64_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_f64_0:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_0, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f64_1:
+** incb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_1, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f64_2:
+** incb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_2, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f64_3:
+** incb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_3, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_f64_4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_4, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_f64_28:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_28, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_f64_32:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_32, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f64_m1:
+** decb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_m1, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f64_m2:
+** decb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_m2, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_f64_m3:
+** decb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_m3, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_f64_m4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_m4, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_f64_m32:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_m32, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_f64_m36:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_m36, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_f64_x1, svfloat64x4_t, float64_t,
+ z0 = svldnt1_vnum_f64_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s16_x2.c
new file mode 100644
index 0000000..b96df05
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_s16_base:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_base, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s16_index:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_index, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s16_1:
+** incb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_1, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn8, x0 + svcnth ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth ()))
+
+/*
+** ldnt1_s16_2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_2, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn8, x0 + svcnth () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 2))
+
+/*
+** ldnt1_s16_14:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_14, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn8, x0 + svcnth () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s16_16:
+** incb x0, all, mul #16
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_16, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn8, x0 + svcnth () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s16_m1:
+** decb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_m1, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn8, x0 - svcnth ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth ()))
+
+/*
+** ldnt1_s16_m2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_m2, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn8, x0 - svcnth () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 2))
+
+/*
+** ldnt1_s16_m16:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_m16, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn8, x0 - svcnth () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 16))
+
+/*
+** ldnt1_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_m18, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn8, x0 - svcnth () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 18))
+
+/*
+** ldnt1_s16_z17:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_z17, svint16x2_t, int16_t,
+ z17 = svldnt1_s16_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s16_z22:
+** ldnt1h {z22\.h(?: - |, )z23\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_z22, svint16x2_t, int16_t,
+ z22 = svldnt1_s16_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s16_z28:
+** ldnt1h {z28\.h(?: - |, )z29\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_z28, svint16x2_t, int16_t,
+ z28 = svldnt1_s16_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_pn0, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_s16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_pn7, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_s16_pn15:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_pn15, svint16x2_t, int16_t,
+ z0 = svldnt1_s16_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_s16_0:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_0, svint16x2_t, int16_t,
+ z0 = svldnt1_vnum_s16_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s16_1:
+** incb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_1, svint16x2_t, int16_t,
+ z0 = svldnt1_vnum_s16_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_s16_2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_2, svint16x2_t, int16_t,
+ z0 = svldnt1_vnum_s16_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_s16_14:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_14, svint16x2_t, int16_t,
+ z0 = svldnt1_vnum_s16_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s16_16:
+** incb x0, all, mul #16
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_16, svint16x2_t, int16_t,
+ z0 = svldnt1_vnum_s16_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s16_m1:
+** decb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_m1, svint16x2_t, int16_t,
+ z0 = svldnt1_vnum_s16_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_s16_m2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_m2, svint16x2_t, int16_t,
+ z0 = svldnt1_vnum_s16_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_s16_m16:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_m16, svint16x2_t, int16_t,
+ z0 = svldnt1_vnum_s16_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_m18, svint16x2_t, int16_t,
+ z0 = svldnt1_vnum_s16_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_x1, svint16x2_t, int16_t,
+ z0 = svldnt1_vnum_s16_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s16_x4.c
new file mode 100644
index 0000000..c64ab09
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_s16_base:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_base, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s16_index:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_index, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s16_1:
+** incb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_1, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 + svcnth ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s16_2:
+** incb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_2, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 + svcnth () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s16_3:
+** incb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_3, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 + svcnth () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 3))
+
+/*
+** ldnt1_s16_4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_4, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 + svcnth () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 4))
+
+/*
+** ldnt1_s16_28:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_28, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 + svcnth () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 28))
+
+/*
+** ldnt1_s16_32:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_32, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 + svcnth () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s16_m1:
+** decb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_m1, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 - svcnth ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s16_m2:
+** decb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_m2, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 - svcnth () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s16_m3:
+** decb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_m3, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 - svcnth () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 3))
+
+/*
+** ldnt1_s16_m4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_s16_m4, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 - svcnth () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 4))
+
+/*
+** ldnt1_s16_m32:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_m32, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 - svcnth () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 32))
+
+/*
+** ldnt1_s16_m36:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_m36, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn8, x0 - svcnth () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 36))
+
+/*
+** ldnt1_s16_z17:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_z17, svint16x4_t, int16_t,
+ z17 = svldnt1_s16_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s16_z22:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_z22, svint16x4_t, int16_t,
+ z22 = svldnt1_s16_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s16_z28:
+** ldnt1h {z28\.h(?: - |, )z31\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_z28, svint16x4_t, int16_t,
+ z28 = svldnt1_s16_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_pn0, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_s16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_pn7, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_s16_pn15:
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s16_pn15, svint16x4_t, int16_t,
+ z0 = svldnt1_s16_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_s16_0:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_0, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s16_1:
+** incb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_1, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s16_2:
+** incb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_2, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s16_3:
+** incb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_3, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_s16_4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_4, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_s16_28:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_28, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_s16_32:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_32, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s16_m1:
+** decb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_m1, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s16_m2:
+** decb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_m2, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s16_m3:
+** decb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_m3, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_s16_m4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_m4, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_s16_m32:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_m32, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_s16_m36:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_m36, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s16_x1, svint16x4_t, int16_t,
+ z0 = svldnt1_vnum_s16_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s32_x2.c
new file mode 100644
index 0000000..e37e544
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_s32_base:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_base, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s32_index:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_index, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s32_1:
+** incb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_1, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn8, x0 + svcntw ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw ()))
+
+/*
+** ldnt1_s32_2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_2, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn8, x0 + svcntw () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw () * 2))
+
+/*
+** ldnt1_s32_14:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_14, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn8, x0 + svcntw () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s32_16:
+** incb x0, all, mul #16
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_16, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn8, x0 + svcntw () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s32_m1:
+** decb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_m1, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn8, x0 - svcntw ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw ()))
+
+/*
+** ldnt1_s32_m2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_m2, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn8, x0 - svcntw () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw () * 2))
+
+/*
+** ldnt1_s32_m16:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_m16, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn8, x0 - svcntw () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw () * 16))
+
+/*
+** ldnt1_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_m18, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn8, x0 - svcntw () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw () * 18))
+
+/*
+** ldnt1_s32_z17:
+** ldnt1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_z17, svint32x2_t, int32_t,
+ z17 = svldnt1_s32_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s32_z22:
+** ldnt1w {z22\.s(?: - |, )z23\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_z22, svint32x2_t, int32_t,
+ z22 = svldnt1_s32_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s32_z28:
+** ldnt1w {z28\.s(?: - |, )z29\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_z28, svint32x2_t, int32_t,
+ z28 = svldnt1_s32_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_pn0, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_s32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_pn7, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_s32_pn15:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_pn15, svint32x2_t, int32_t,
+ z0 = svldnt1_s32_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_s32_0:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_0, svint32x2_t, int32_t,
+ z0 = svldnt1_vnum_s32_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s32_1:
+** incb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_1, svint32x2_t, int32_t,
+ z0 = svldnt1_vnum_s32_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_s32_2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_2, svint32x2_t, int32_t,
+ z0 = svldnt1_vnum_s32_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_s32_14:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_14, svint32x2_t, int32_t,
+ z0 = svldnt1_vnum_s32_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s32_16:
+** incb x0, all, mul #16
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_16, svint32x2_t, int32_t,
+ z0 = svldnt1_vnum_s32_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s32_m1:
+** decb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_m1, svint32x2_t, int32_t,
+ z0 = svldnt1_vnum_s32_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_s32_m2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_m2, svint32x2_t, int32_t,
+ z0 = svldnt1_vnum_s32_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_s32_m16:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_m16, svint32x2_t, int32_t,
+ z0 = svldnt1_vnum_s32_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_m18, svint32x2_t, int32_t,
+ z0 = svldnt1_vnum_s32_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_x1, svint32x2_t, int32_t,
+ z0 = svldnt1_vnum_s32_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s32_x4.c
new file mode 100644
index 0000000..b97ff93
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_s32_base:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_base, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s32_index:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_index, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s32_1:
+** incb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_1, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 + svcntw ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s32_2:
+** incb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_2, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 + svcntw () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s32_3:
+** incb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_3, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 + svcntw () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 3))
+
+/*
+** ldnt1_s32_4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_4, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 + svcntw () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 4))
+
+/*
+** ldnt1_s32_28:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_28, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 + svcntw () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 28))
+
+/*
+** ldnt1_s32_32:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_32, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 + svcntw () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s32_m1:
+** decb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_m1, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 - svcntw ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s32_m2:
+** decb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_m2, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 - svcntw () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s32_m3:
+** decb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_m3, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 - svcntw () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 3))
+
+/*
+** ldnt1_s32_m4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_s32_m4, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 - svcntw () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 4))
+
+/*
+** ldnt1_s32_m32:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_m32, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 - svcntw () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 32))
+
+/*
+** ldnt1_s32_m36:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_m36, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn8, x0 - svcntw () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 36))
+
+/*
+** ldnt1_s32_z17:
+** ldnt1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_z17, svint32x4_t, int32_t,
+ z17 = svldnt1_s32_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s32_z22:
+** ldnt1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_z22, svint32x4_t, int32_t,
+ z22 = svldnt1_s32_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s32_z28:
+** ldnt1w {z28\.s(?: - |, )z31\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_z28, svint32x4_t, int32_t,
+ z28 = svldnt1_s32_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_pn0, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_s32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_pn7, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_s32_pn15:
+** ldnt1w {z0\.s(?: - |, )z3\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s32_pn15, svint32x4_t, int32_t,
+ z0 = svldnt1_s32_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_s32_0:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_0, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s32_1:
+** incb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_1, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s32_2:
+** incb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_2, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s32_3:
+** incb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_3, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_s32_4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_4, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_s32_28:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_28, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_s32_32:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_32, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s32_m1:
+** decb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_m1, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s32_m2:
+** decb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_m2, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s32_m3:
+** decb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_m3, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_s32_m4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_m4, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_s32_m32:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_m32, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_s32_m36:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_m36, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s32_x1, svint32x4_t, int32_t,
+ z0 = svldnt1_vnum_s32_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s64_x2.c
new file mode 100644
index 0000000..1e063fb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_s64_base:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_base, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s64_index:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_index, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s64_1:
+** incb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_1, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn8, x0 + svcntd ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd ()))
+
+/*
+** ldnt1_s64_2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_2, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn8, x0 + svcntd () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd () * 2))
+
+/*
+** ldnt1_s64_14:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_14, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn8, x0 + svcntd () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s64_16:
+** incb x0, all, mul #16
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_16, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn8, x0 + svcntd () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s64_m1:
+** decb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_m1, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn8, x0 - svcntd ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd ()))
+
+/*
+** ldnt1_s64_m2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_m2, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn8, x0 - svcntd () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd () * 2))
+
+/*
+** ldnt1_s64_m16:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_m16, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn8, x0 - svcntd () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd () * 16))
+
+/*
+** ldnt1_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_m18, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn8, x0 - svcntd () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd () * 18))
+
+/*
+** ldnt1_s64_z17:
+** ldnt1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_z17, svint64x2_t, int64_t,
+ z17 = svldnt1_s64_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s64_z22:
+** ldnt1d {z22\.d(?: - |, )z23\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_z22, svint64x2_t, int64_t,
+ z22 = svldnt1_s64_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s64_z28:
+** ldnt1d {z28\.d(?: - |, )z29\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_z28, svint64x2_t, int64_t,
+ z28 = svldnt1_s64_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_pn0, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_s64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_pn7, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_s64_pn15:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_pn15, svint64x2_t, int64_t,
+ z0 = svldnt1_s64_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_s64_0:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_0, svint64x2_t, int64_t,
+ z0 = svldnt1_vnum_s64_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s64_1:
+** incb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_1, svint64x2_t, int64_t,
+ z0 = svldnt1_vnum_s64_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_s64_2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_2, svint64x2_t, int64_t,
+ z0 = svldnt1_vnum_s64_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_s64_14:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_14, svint64x2_t, int64_t,
+ z0 = svldnt1_vnum_s64_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s64_16:
+** incb x0, all, mul #16
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_16, svint64x2_t, int64_t,
+ z0 = svldnt1_vnum_s64_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s64_m1:
+** decb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_m1, svint64x2_t, int64_t,
+ z0 = svldnt1_vnum_s64_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_s64_m2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_m2, svint64x2_t, int64_t,
+ z0 = svldnt1_vnum_s64_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_s64_m16:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_m16, svint64x2_t, int64_t,
+ z0 = svldnt1_vnum_s64_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_m18, svint64x2_t, int64_t,
+ z0 = svldnt1_vnum_s64_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_x1, svint64x2_t, int64_t,
+ z0 = svldnt1_vnum_s64_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s64_x4.c
new file mode 100644
index 0000000..90f9292
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_s64_base:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_base, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s64_index:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_index, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s64_1:
+** incb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_1, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 + svcntd ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s64_2:
+** incb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_2, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 + svcntd () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s64_3:
+** incb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_3, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 + svcntd () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 3))
+
+/*
+** ldnt1_s64_4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_4, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 + svcntd () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 4))
+
+/*
+** ldnt1_s64_28:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_28, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 + svcntd () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 28))
+
+/*
+** ldnt1_s64_32:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_32, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 + svcntd () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s64_m1:
+** decb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_m1, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 - svcntd ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s64_m2:
+** decb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_m2, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 - svcntd () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s64_m3:
+** decb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_m3, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 - svcntd () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 3))
+
+/*
+** ldnt1_s64_m4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_s64_m4, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 - svcntd () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 4))
+
+/*
+** ldnt1_s64_m32:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_m32, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 - svcntd () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 32))
+
+/*
+** ldnt1_s64_m36:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_m36, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn8, x0 - svcntd () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 36))
+
+/*
+** ldnt1_s64_z17:
+** ldnt1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_z17, svint64x4_t, int64_t,
+ z17 = svldnt1_s64_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s64_z22:
+** ldnt1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_z22, svint64x4_t, int64_t,
+ z22 = svldnt1_s64_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s64_z28:
+** ldnt1d {z28\.d(?: - |, )z31\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_z28, svint64x4_t, int64_t,
+ z28 = svldnt1_s64_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_pn0, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_s64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_pn7, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_s64_pn15:
+** ldnt1d {z0\.d(?: - |, )z3\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s64_pn15, svint64x4_t, int64_t,
+ z0 = svldnt1_s64_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_s64_0:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_0, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s64_1:
+** incb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_1, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s64_2:
+** incb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_2, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s64_3:
+** incb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_3, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_s64_4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_4, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_s64_28:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_28, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_s64_32:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_32, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s64_m1:
+** decb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_m1, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s64_m2:
+** decb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_m2, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s64_m3:
+** decb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_m3, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_s64_m4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_m4, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_s64_m32:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_m32, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_s64_m36:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_m36, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s64_x1, svint64x4_t, int64_t,
+ z0 = svldnt1_vnum_s64_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s8_x2.c
new file mode 100644
index 0000000..a93f516
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s8_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_s8_base:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_base, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s8_index:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, x1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_index, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s8_1:
+** incb x0
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_1, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn8, x0 + svcntb ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcntb ()))
+
+/*
+** ldnt1_s8_2:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_2, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn8, x0 + svcntb () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcntb () * 2))
+
+/*
+** ldnt1_s8_14:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_14, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn8, x0 + svcntb () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcntb () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s8_16:
+** incb x0, all, mul #16
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_16, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn8, x0 + svcntb () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcntb () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s8_m1:
+** decb x0
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_m1, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn8, x0 - svcntb ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcntb ()))
+
+/*
+** ldnt1_s8_m2:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_m2, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn8, x0 - svcntb () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcntb () * 2))
+
+/*
+** ldnt1_s8_m16:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_m16, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn8, x0 - svcntb () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcntb () * 16))
+
+/*
+** ldnt1_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_m18, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn8, x0 - svcntb () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcntb () * 18))
+
+/*
+** ldnt1_s8_z17:
+** ldnt1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_z17, svint8x2_t, int8_t,
+ z17 = svldnt1_s8_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s8_z22:
+** ldnt1b {z22\.b(?: - |, )z23\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_z22, svint8x2_t, int8_t,
+ z22 = svldnt1_s8_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s8_z28:
+** ldnt1b {z28\.b(?: - |, )z29\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_z28, svint8x2_t, int8_t,
+ z28 = svldnt1_s8_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_s8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_pn0, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_s8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_pn7, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_s8_pn15:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_pn15, svint8x2_t, int8_t,
+ z0 = svldnt1_s8_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_s8_0:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_0, svint8x2_t, int8_t,
+ z0 = svldnt1_vnum_s8_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s8_1:
+** incb x0
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_1, svint8x2_t, int8_t,
+ z0 = svldnt1_vnum_s8_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_s8_2:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_2, svint8x2_t, int8_t,
+ z0 = svldnt1_vnum_s8_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_s8_14:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_14, svint8x2_t, int8_t,
+ z0 = svldnt1_vnum_s8_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s8_16:
+** incb x0, all, mul #16
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_16, svint8x2_t, int8_t,
+ z0 = svldnt1_vnum_s8_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s8_m1:
+** decb x0
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_m1, svint8x2_t, int8_t,
+ z0 = svldnt1_vnum_s8_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_s8_m2:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_m2, svint8x2_t, int8_t,
+ z0 = svldnt1_vnum_s8_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_s8_m16:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_m16, svint8x2_t, int8_t,
+ z0 = svldnt1_vnum_s8_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_m18, svint8x2_t, int8_t,
+ z0 = svldnt1_vnum_s8_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_x1, svint8x2_t, int8_t,
+ z0 = svldnt1_vnum_s8_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s8_x4.c
new file mode 100644
index 0000000..ccf7596
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_s8_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_s8_base:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_base, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s8_index:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, x1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_index, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s8_1:
+** incb x0
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_1, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 + svcntb ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s8_2:
+** incb x0, all, mul #2
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_2, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 + svcntb () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s8_3:
+** incb x0, all, mul #3
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_3, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 + svcntb () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb () * 3))
+
+/*
+** ldnt1_s8_4:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_4, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 + svcntb () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb () * 4))
+
+/*
+** ldnt1_s8_28:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_28, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 + svcntb () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb () * 28))
+
+/*
+** ldnt1_s8_32:
+** [^{]*
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_32, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 + svcntb () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s8_m1:
+** decb x0
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_m1, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 - svcntb ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s8_m2:
+** decb x0, all, mul #2
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_m2, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 - svcntb () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_s8_m3:
+** decb x0, all, mul #3
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_m3, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 - svcntb () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb () * 3))
+
+/*
+** ldnt1_s8_m4:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_s8_m4, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 - svcntb () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb () * 4))
+
+/*
+** ldnt1_s8_m32:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_m32, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 - svcntb () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb () * 32))
+
+/*
+** ldnt1_s8_m36:
+** [^{]*
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_m36, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn8, x0 - svcntb () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb () * 36))
+
+/*
+** ldnt1_s8_z17:
+** ldnt1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_z17, svint8x4_t, int8_t,
+ z17 = svldnt1_s8_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s8_z22:
+** ldnt1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_z22, svint8x4_t, int8_t,
+ z22 = svldnt1_s8_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s8_z28:
+** ldnt1b {z28\.b(?: - |, )z31\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_z28, svint8x4_t, int8_t,
+ z28 = svldnt1_s8_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_s8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1b {z0\.b(?: - |, )z3\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_pn0, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_s8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1b {z0\.b(?: - |, )z3\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_pn7, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_s8_pn15:
+** ldnt1b {z0\.b(?: - |, )z3\.b}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_s8_pn15, svint8x4_t, int8_t,
+ z0 = svldnt1_s8_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_s8_0:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_0, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s8_1:
+** incb x0
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_1, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s8_2:
+** incb x0, all, mul #2
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_2, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s8_3:
+** incb x0, all, mul #3
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_3, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_s8_4:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_4, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_s8_28:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_28, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_s8_32:
+** [^{]*
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_32, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s8_m1:
+** decb x0
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_m1, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s8_m2:
+** decb x0, all, mul #2
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_m2, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_s8_m3:
+** decb x0, all, mul #3
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_m3, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_s8_m4:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_m4, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_s8_m32:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_m32, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_s8_m36:
+** [^{]*
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_m36, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_s8_x1, svint8x4_t, int8_t,
+ z0 = svldnt1_vnum_s8_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u16_x2.c
new file mode 100644
index 0000000..66eb0ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_u16_base:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_base, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u16_index:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_index, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u16_1:
+** incb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_1, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn8, x0 + svcnth ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth ()))
+
+/*
+** ldnt1_u16_2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_2, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn8, x0 + svcnth () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 2))
+
+/*
+** ldnt1_u16_14:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_14, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn8, x0 + svcnth () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u16_16:
+** incb x0, all, mul #16
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_16, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn8, x0 + svcnth () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u16_m1:
+** decb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_m1, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn8, x0 - svcnth ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth ()))
+
+/*
+** ldnt1_u16_m2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_m2, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn8, x0 - svcnth () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 2))
+
+/*
+** ldnt1_u16_m16:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_m16, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn8, x0 - svcnth () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 16))
+
+/*
+** ldnt1_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_m18, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn8, x0 - svcnth () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcnth () * 18))
+
+/*
+** ldnt1_u16_z17:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_z17, svuint16x2_t, uint16_t,
+ z17 = svldnt1_u16_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u16_z22:
+** ldnt1h {z22\.h(?: - |, )z23\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_z22, svuint16x2_t, uint16_t,
+ z22 = svldnt1_u16_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u16_z28:
+** ldnt1h {z28\.h(?: - |, )z29\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_z28, svuint16x2_t, uint16_t,
+ z28 = svldnt1_u16_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_pn0, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_u16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_pn7, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_u16_pn15:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_pn15, svuint16x2_t, uint16_t,
+ z0 = svldnt1_u16_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_u16_0:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_0, svuint16x2_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u16_1:
+** incb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_1, svuint16x2_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_u16_2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_2, svuint16x2_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_u16_14:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_14, svuint16x2_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u16_16:
+** incb x0, all, mul #16
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_16, svuint16x2_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u16_m1:
+** decb x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_m1, svuint16x2_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_u16_m2:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_m2, svuint16x2_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_u16_m16:
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_m16, svuint16x2_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_m18, svuint16x2_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1h {z0\.h(?: - |, )z1\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_x1, svuint16x2_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u16_x4.c
new file mode 100644
index 0000000..7a53c18
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_u16_base:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_base, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u16_index:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_index, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u16_1:
+** incb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_1, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 + svcnth ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u16_2:
+** incb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_2, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 + svcnth () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u16_3:
+** incb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_3, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 + svcnth () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 3))
+
+/*
+** ldnt1_u16_4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_4, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 + svcnth () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 4))
+
+/*
+** ldnt1_u16_28:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_28, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 + svcnth () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 28))
+
+/*
+** ldnt1_u16_32:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_32, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 + svcnth () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u16_m1:
+** decb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_m1, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 - svcnth ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u16_m2:
+** decb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_m2, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 - svcnth () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u16_m3:
+** decb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_m3, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 - svcnth () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 3))
+
+/*
+** ldnt1_u16_m4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_u16_m4, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 - svcnth () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 4))
+
+/*
+** ldnt1_u16_m32:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_m32, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 - svcnth () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 32))
+
+/*
+** ldnt1_u16_m36:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_m36, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn8, x0 - svcnth () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcnth () * 36))
+
+/*
+** ldnt1_u16_z17:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_z17, svuint16x4_t, uint16_t,
+ z17 = svldnt1_u16_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u16_z22:
+** ldnt1h {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_z22, svuint16x4_t, uint16_t,
+ z22 = svldnt1_u16_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u16_z28:
+** ldnt1h {z28\.h(?: - |, )z31\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_z28, svuint16x4_t, uint16_t,
+ z28 = svldnt1_u16_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_pn0, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_u16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_pn7, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_u16_pn15:
+** ldnt1h {z0\.h(?: - |, )z3\.h}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u16_pn15, svuint16x4_t, uint16_t,
+ z0 = svldnt1_u16_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_u16_0:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_0, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u16_1:
+** incb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_1, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u16_2:
+** incb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_2, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u16_3:
+** incb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_3, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_u16_4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_4, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_u16_28:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_28, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_u16_32:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_32, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u16_m1:
+** decb x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_m1, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u16_m2:
+** decb x0, all, mul #2
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_m2, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u16_m3:
+** decb x0, all, mul #3
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_m3, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_u16_m4:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_m4, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_u16_m32:
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_m32, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_u16_m36:
+** [^{]*
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_m36, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1h {z0\.h - z3\.h}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u16_x1, svuint16x4_t, uint16_t,
+ z0 = svldnt1_vnum_u16_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u32_x2.c
new file mode 100644
index 0000000..6dd278c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_u32_base:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_base, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u32_index:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_index, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u32_1:
+** incb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_1, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn8, x0 + svcntw ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw ()))
+
+/*
+** ldnt1_u32_2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_2, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn8, x0 + svcntw () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw () * 2))
+
+/*
+** ldnt1_u32_14:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_14, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn8, x0 + svcntw () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u32_16:
+** incb x0, all, mul #16
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_16, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn8, x0 + svcntw () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcntw () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u32_m1:
+** decb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_m1, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn8, x0 - svcntw ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw ()))
+
+/*
+** ldnt1_u32_m2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_m2, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn8, x0 - svcntw () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw () * 2))
+
+/*
+** ldnt1_u32_m16:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_m16, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn8, x0 - svcntw () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw () * 16))
+
+/*
+** ldnt1_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_m18, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn8, x0 - svcntw () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcntw () * 18))
+
+/*
+** ldnt1_u32_z17:
+** ldnt1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_z17, svuint32x2_t, uint32_t,
+ z17 = svldnt1_u32_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u32_z22:
+** ldnt1w {z22\.s(?: - |, )z23\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_z22, svuint32x2_t, uint32_t,
+ z22 = svldnt1_u32_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u32_z28:
+** ldnt1w {z28\.s(?: - |, )z29\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_z28, svuint32x2_t, uint32_t,
+ z28 = svldnt1_u32_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_pn0, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_u32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_pn7, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_u32_pn15:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_pn15, svuint32x2_t, uint32_t,
+ z0 = svldnt1_u32_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_u32_0:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_0, svuint32x2_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u32_1:
+** incb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_1, svuint32x2_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_u32_2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_2, svuint32x2_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_u32_14:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_14, svuint32x2_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u32_16:
+** incb x0, all, mul #16
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_16, svuint32x2_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u32_m1:
+** decb x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_m1, svuint32x2_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_u32_m2:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_m2, svuint32x2_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_u32_m16:
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_m16, svuint32x2_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_m18, svuint32x2_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1w {z0\.s(?: - |, )z1\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_x1, svuint32x2_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u32_x4.c
new file mode 100644
index 0000000..2e32801
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_u32_base:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_base, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u32_index:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_index, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u32_1:
+** incb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_1, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 + svcntw ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u32_2:
+** incb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_2, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 + svcntw () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u32_3:
+** incb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_3, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 + svcntw () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 3))
+
+/*
+** ldnt1_u32_4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_4, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 + svcntw () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 4))
+
+/*
+** ldnt1_u32_28:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_28, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 + svcntw () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 28))
+
+/*
+** ldnt1_u32_32:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_32, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 + svcntw () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcntw () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u32_m1:
+** decb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_m1, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 - svcntw ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u32_m2:
+** decb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_m2, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 - svcntw () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u32_m3:
+** decb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_m3, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 - svcntw () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 3))
+
+/*
+** ldnt1_u32_m4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_u32_m4, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 - svcntw () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 4))
+
+/*
+** ldnt1_u32_m32:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_m32, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 - svcntw () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 32))
+
+/*
+** ldnt1_u32_m36:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_m36, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn8, x0 - svcntw () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcntw () * 36))
+
+/*
+** ldnt1_u32_z17:
+** ldnt1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_z17, svuint32x4_t, uint32_t,
+ z17 = svldnt1_u32_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u32_z22:
+** ldnt1w {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_z22, svuint32x4_t, uint32_t,
+ z22 = svldnt1_u32_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u32_z28:
+** ldnt1w {z28\.s(?: - |, )z31\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_z28, svuint32x4_t, uint32_t,
+ z28 = svldnt1_u32_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_pn0, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_u32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1w {z0\.s(?: - |, )z3\.s}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_pn7, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_u32_pn15:
+** ldnt1w {z0\.s(?: - |, )z3\.s}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u32_pn15, svuint32x4_t, uint32_t,
+ z0 = svldnt1_u32_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_u32_0:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_0, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u32_1:
+** incb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_1, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u32_2:
+** incb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_2, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u32_3:
+** incb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_3, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_u32_4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_4, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_u32_28:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_28, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_u32_32:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_32, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u32_m1:
+** decb x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_m1, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u32_m2:
+** decb x0, all, mul #2
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_m2, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u32_m3:
+** decb x0, all, mul #3
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_m3, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_u32_m4:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_m4, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_u32_m32:
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_m32, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_u32_m36:
+** [^{]*
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_m36, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1w {z0\.s - z3\.s}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u32_x1, svuint32x4_t, uint32_t,
+ z0 = svldnt1_vnum_u32_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u64_x2.c
new file mode 100644
index 0000000..fdabbbf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_u64_base:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_base, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u64_index:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_index, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u64_1:
+** incb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_1, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn8, x0 + svcntd ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd ()))
+
+/*
+** ldnt1_u64_2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_2, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn8, x0 + svcntd () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd () * 2))
+
+/*
+** ldnt1_u64_14:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_14, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn8, x0 + svcntd () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u64_16:
+** incb x0, all, mul #16
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_16, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn8, x0 + svcntd () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcntd () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u64_m1:
+** decb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_m1, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn8, x0 - svcntd ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd ()))
+
+/*
+** ldnt1_u64_m2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_m2, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn8, x0 - svcntd () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd () * 2))
+
+/*
+** ldnt1_u64_m16:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_m16, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn8, x0 - svcntd () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd () * 16))
+
+/*
+** ldnt1_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_m18, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn8, x0 - svcntd () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcntd () * 18))
+
+/*
+** ldnt1_u64_z17:
+** ldnt1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_z17, svuint64x2_t, uint64_t,
+ z17 = svldnt1_u64_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u64_z22:
+** ldnt1d {z22\.d(?: - |, )z23\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_z22, svuint64x2_t, uint64_t,
+ z22 = svldnt1_u64_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u64_z28:
+** ldnt1d {z28\.d(?: - |, )z29\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_z28, svuint64x2_t, uint64_t,
+ z28 = svldnt1_u64_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_pn0, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_u64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_pn7, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_u64_pn15:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_pn15, svuint64x2_t, uint64_t,
+ z0 = svldnt1_u64_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_u64_0:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_0, svuint64x2_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u64_1:
+** incb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_1, svuint64x2_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_u64_2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_2, svuint64x2_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_u64_14:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_14, svuint64x2_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u64_16:
+** incb x0, all, mul #16
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_16, svuint64x2_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u64_m1:
+** decb x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_m1, svuint64x2_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_u64_m2:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_m2, svuint64x2_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_u64_m16:
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_m16, svuint64x2_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_m18, svuint64x2_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1d {z0\.d(?: - |, )z1\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_x1, svuint64x2_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u64_x4.c
new file mode 100644
index 0000000..e56fa00
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_u64_base:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_base, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u64_index:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_index, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u64_1:
+** incb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_1, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 + svcntd ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u64_2:
+** incb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_2, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 + svcntd () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u64_3:
+** incb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_3, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 + svcntd () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 3))
+
+/*
+** ldnt1_u64_4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_4, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 + svcntd () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 4))
+
+/*
+** ldnt1_u64_28:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_28, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 + svcntd () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 28))
+
+/*
+** ldnt1_u64_32:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_32, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 + svcntd () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcntd () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u64_m1:
+** decb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_m1, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 - svcntd ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u64_m2:
+** decb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_m2, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 - svcntd () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u64_m3:
+** decb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_m3, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 - svcntd () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 3))
+
+/*
+** ldnt1_u64_m4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_u64_m4, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 - svcntd () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 4))
+
+/*
+** ldnt1_u64_m32:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_m32, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 - svcntd () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 32))
+
+/*
+** ldnt1_u64_m36:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_m36, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn8, x0 - svcntd () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcntd () * 36))
+
+/*
+** ldnt1_u64_z17:
+** ldnt1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_z17, svuint64x4_t, uint64_t,
+ z17 = svldnt1_u64_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u64_z22:
+** ldnt1d {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_z22, svuint64x4_t, uint64_t,
+ z22 = svldnt1_u64_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u64_z28:
+** ldnt1d {z28\.d(?: - |, )z31\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_z28, svuint64x4_t, uint64_t,
+ z28 = svldnt1_u64_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_pn0, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_u64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1d {z0\.d(?: - |, )z3\.d}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_pn7, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_u64_pn15:
+** ldnt1d {z0\.d(?: - |, )z3\.d}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u64_pn15, svuint64x4_t, uint64_t,
+ z0 = svldnt1_u64_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_u64_0:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_0, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u64_1:
+** incb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_1, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u64_2:
+** incb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_2, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u64_3:
+** incb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_3, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_u64_4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_4, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_u64_28:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_28, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_u64_32:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_32, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u64_m1:
+** decb x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_m1, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u64_m2:
+** decb x0, all, mul #2
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_m2, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u64_m3:
+** decb x0, all, mul #3
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_m3, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_u64_m4:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_m4, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_u64_m32:
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_m32, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_u64_m36:
+** [^{]*
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_m36, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1d {z0\.d - z3\.d}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u64_x1, svuint64x4_t, uint64_t,
+ z0 = svldnt1_vnum_u64_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u8_x2.c
new file mode 100644
index 0000000..67f3faa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u8_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_u8_base:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_base, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn8, x0),
+ z0 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u8_index:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, x1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_index, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn8, x0 + x1),
+ z0 = svldnt1_x2 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u8_1:
+** incb x0
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_1, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn8, x0 + svcntb ()),
+ z0 = svldnt1_x2 (pn8, x0 + svcntb ()))
+
+/*
+** ldnt1_u8_2:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_2, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn8, x0 + svcntb () * 2),
+ z0 = svldnt1_x2 (pn8, x0 + svcntb () * 2))
+
+/*
+** ldnt1_u8_14:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_14, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn8, x0 + svcntb () * 14),
+ z0 = svldnt1_x2 (pn8, x0 + svcntb () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u8_16:
+** incb x0, all, mul #16
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_16, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn8, x0 + svcntb () * 16),
+ z0 = svldnt1_x2 (pn8, x0 + svcntb () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u8_m1:
+** decb x0
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_m1, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn8, x0 - svcntb ()),
+ z0 = svldnt1_x2 (pn8, x0 - svcntb ()))
+
+/*
+** ldnt1_u8_m2:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_m2, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn8, x0 - svcntb () * 2),
+ z0 = svldnt1_x2 (pn8, x0 - svcntb () * 2))
+
+/*
+** ldnt1_u8_m16:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_m16, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn8, x0 - svcntb () * 16),
+ z0 = svldnt1_x2 (pn8, x0 - svcntb () * 16))
+
+/*
+** ldnt1_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_m18, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn8, x0 - svcntb () * 18),
+ z0 = svldnt1_x2 (pn8, x0 - svcntb () * 18))
+
+/*
+** ldnt1_u8_z17:
+** ldnt1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_z17, svuint8x2_t, uint8_t,
+ z17 = svldnt1_u8_x2 (pn8, x0),
+ z17 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u8_z22:
+** ldnt1b {z22\.b(?: - |, )z23\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_z22, svuint8x2_t, uint8_t,
+ z22 = svldnt1_u8_x2 (pn8, x0),
+ z22 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u8_z28:
+** ldnt1b {z28\.b(?: - |, )z29\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_z28, svuint8x2_t, uint8_t,
+ z28 = svldnt1_u8_x2 (pn8, x0),
+ z28 = svldnt1_x2 (pn8, x0))
+
+/*
+** ldnt1_u8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_pn0, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn0, x0),
+ z0 = svldnt1_x2 (pn0, x0))
+
+/*
+** ldnt1_u8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_pn7, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn7, x0),
+ z0 = svldnt1_x2 (pn7, x0))
+
+/*
+** ldnt1_u8_pn15:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_pn15, svuint8x2_t, uint8_t,
+ z0 = svldnt1_u8_x2 (pn15, x0),
+ z0 = svldnt1_x2 (pn15, x0))
+
+/*
+** ldnt1_vnum_u8_0:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_0, svuint8x2_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x2 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u8_1:
+** incb x0
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_1, svuint8x2_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x2 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 1))
+
+/*
+** ldnt1_vnum_u8_2:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_2, svuint8x2_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x2 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 2))
+
+/*
+** ldnt1_vnum_u8_14:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_14, svuint8x2_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x2 (pn8, x0, 14),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u8_16:
+** incb x0, all, mul #16
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_16, svuint8x2_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x2 (pn8, x0, 16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u8_m1:
+** decb x0
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_m1, svuint8x2_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x2 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -1))
+
+/*
+** ldnt1_vnum_u8_m2:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_m2, svuint8x2_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x2 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -2))
+
+/*
+** ldnt1_vnum_u8_m16:
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_m16, svuint8x2_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x2 (pn8, x0, -16),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -16))
+
+/*
+** ldnt1_vnum_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_m18, svuint8x2_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x2 (pn8, x0, -18),
+ z0 = svldnt1_vnum_x2 (pn8, x0, -18))
+
+/*
+** ldnt1_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1b {z0\.b(?: - |, )z1\.b}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_x1, svuint8x2_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x2 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x2 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u8_x4.c
new file mode 100644
index 0000000..827b994
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldnt1_u8_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ldnt1_u8_base:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_base, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0),
+ z0 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u8_index:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, x1\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_index, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 + x1),
+ z0 = svldnt1_x4 (pn8, x0 + x1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u8_1:
+** incb x0
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_1, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 + svcntb ()),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u8_2:
+** incb x0, all, mul #2
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_2, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 + svcntb () * 2),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u8_3:
+** incb x0, all, mul #3
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_3, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 + svcntb () * 3),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb () * 3))
+
+/*
+** ldnt1_u8_4:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_4, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 + svcntb () * 4),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb () * 4))
+
+/*
+** ldnt1_u8_28:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_28, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 + svcntb () * 28),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb () * 28))
+
+/*
+** ldnt1_u8_32:
+** [^{]*
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_32, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 + svcntb () * 32),
+ z0 = svldnt1_x4 (pn8, x0 + svcntb () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u8_m1:
+** decb x0
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_m1, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 - svcntb ()),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u8_m2:
+** decb x0, all, mul #2
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_m2, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 - svcntb () * 2),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_u8_m3:
+** decb x0, all, mul #3
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_m3, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 - svcntb () * 3),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb () * 3))
+
+/*
+** ldnt1_u8_m4:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+ TEST_LOAD_COUNT (ldnt1_u8_m4, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 - svcntb () * 4),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb () * 4))
+
+/*
+** ldnt1_u8_m32:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_m32, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 - svcntb () * 32),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb () * 32))
+
+/*
+** ldnt1_u8_m36:
+** [^{]*
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_m36, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn8, x0 - svcntb () * 36),
+ z0 = svldnt1_x4 (pn8, x0 - svcntb () * 36))
+
+/*
+** ldnt1_u8_z17:
+** ldnt1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_z17, svuint8x4_t, uint8_t,
+ z17 = svldnt1_u8_x4 (pn8, x0),
+ z17 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u8_z22:
+** ldnt1b {z[^\n]+}, pn8/z, \[x0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_z22, svuint8x4_t, uint8_t,
+ z22 = svldnt1_u8_x4 (pn8, x0),
+ z22 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u8_z28:
+** ldnt1b {z28\.b(?: - |, )z31\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_z28, svuint8x4_t, uint8_t,
+ z28 = svldnt1_u8_x4 (pn8, x0),
+ z28 = svldnt1_x4 (pn8, x0))
+
+/*
+** ldnt1_u8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** ldnt1b {z0\.b(?: - |, )z3\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_pn0, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn0, x0),
+ z0 = svldnt1_x4 (pn0, x0))
+
+/*
+** ldnt1_u8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** ldnt1b {z0\.b(?: - |, )z3\.b}, pn\1/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_pn7, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn7, x0),
+ z0 = svldnt1_x4 (pn7, x0))
+
+/*
+** ldnt1_u8_pn15:
+** ldnt1b {z0\.b(?: - |, )z3\.b}, pn15/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_u8_pn15, svuint8x4_t, uint8_t,
+ z0 = svldnt1_u8_x4 (pn15, x0),
+ z0 = svldnt1_x4 (pn15, x0))
+
+/*
+** ldnt1_vnum_u8_0:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_0, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, 0),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u8_1:
+** incb x0
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_1, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, 1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u8_2:
+** incb x0, all, mul #2
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_2, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, 2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u8_3:
+** incb x0, all, mul #3
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_3, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, 3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 3))
+
+/*
+** ldnt1_vnum_u8_4:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_4, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, 4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 4))
+
+/*
+** ldnt1_vnum_u8_28:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_28, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, 28),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 28))
+
+/*
+** ldnt1_vnum_u8_32:
+** [^{]*
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_32, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, 32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u8_m1:
+** decb x0
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_m1, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, -1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u8_m2:
+** decb x0, all, mul #2
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_m2, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, -2),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ldnt1_vnum_u8_m3:
+** decb x0, all, mul #3
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_m3, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, -3),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -3))
+
+/*
+** ldnt1_vnum_u8_m4:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_m4, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, -4),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -4))
+
+/*
+** ldnt1_vnum_u8_m32:
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_m32, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, -32),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -32))
+
+/*
+** ldnt1_vnum_u8_m36:
+** [^{]*
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_m36, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, -36),
+ z0 = svldnt1_vnum_x4 (pn8, x0, -36))
+
+/*
+** ldnt1_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ldnt1b {z0\.b - z3\.b}, pn8/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD_COUNT (ldnt1_vnum_u8_x1, svuint8x4_t, uint8_t,
+ z0 = svldnt1_vnum_u8_x4 (pn8, x0, x1),
+ z0 = svldnt1_vnum_x4 (pn8, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldr_zt.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldr_zt.c
new file mode 100644
index 0000000..a614fbc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ldr_zt.c
@@ -0,0 +1,36 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#define SHARED_ZT0
+#include "test_sme2_acle.h"
+
+/*
+** ldr_zt0_x0:
+** ldr zt0, \[x0\]
+** ret
+*/
+PROTO (ldr_zt0_x0, void, (char *x0)) { svldr_zt (0, x0); }
+
+/*
+** ldr_zt0_x0p1:
+** add (x[0-9]+), x0, #?1
+** ldr zt0, \[\1\]
+** ret
+*/
+PROTO (ldr_zt0_x0p1, void, (char *x0)) { svldr_zt (0, x0 + 1); }
+
+/*
+** ldr_zt0_x0p64:
+** add (x[0-9]+), x0, #?64
+** ldr zt0, \[\1\]
+** ret
+*/
+PROTO (ldr_zt0_x0p64, void, (char *x0)) { svldr_zt (0, x0 + 64); }
+
+/*
+** ldr_zt0_x0_vl1:
+** incb x0
+** ldr zt0, \[x0\]
+** ret
+*/
+PROTO (ldr_zt0_x0_vl1, void, (char *x0)) { svldr_zt (0, x0 + svcntb()); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16.c
new file mode 100644
index 0000000..fd33428
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 z1\.h, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svbfloat16_t, svuint8_t, z1,
+ svluti2_lane_zt_bf16 (0, z0, 0),
+ svluti2_lane_zt_bf16 (0, z0, 0))
+
+/*
+** luti2_z18_z5_15:
+** luti2 z18\.h, zt0, z5\[15\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_15, svbfloat16_t, svuint8_t, z18,
+ svluti2_lane_zt_bf16 (0, z5, 15),
+ svluti2_lane_zt_bf16 (0, z5, 15))
+
+/*
+** luti2_z24_z7_13:
+** luti2 z24\.h, zt0, z7\[13\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_13, svbfloat16_t, svuint8_t, z24,
+ svluti2_lane_zt_bf16 (0, z7, 13),
+ svluti2_lane_zt_bf16 (0, z7, 13))
+
+/*
+** luti2_z28_z16_11:
+** luti2 z28\.h, zt0, z16\[11\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_11, svbfloat16_t, svuint8_t, z28,
+ svluti2_lane_zt_bf16 (0, z16, 11),
+ svluti2_lane_zt_bf16 (0, z16, 11))
+
+/*
+** luti2_z24_z23_1:
+** luti2 z24\.h, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svbfloat16_t, svuint8_t, z24,
+ svluti2_lane_zt_bf16 (0, z23, 1),
+ svluti2_lane_zt_bf16 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16_x2.c
new file mode 100644
index 0000000..52c0bc3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svbfloat16x2_t, svuint8_t, z1,
+ svluti2_lane_zt_bf16_x2 (0, z0, 0),
+ svluti2_lane_zt_bf16_x2 (0, z0, 0))
+
+/*
+** luti2_z18_z5_7:
+** luti2 {z18\.h - z19\.h}, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_7, svbfloat16x2_t, svuint8_t, z18,
+ svluti2_lane_zt_bf16_x2 (0, z5, 7),
+ svluti2_lane_zt_bf16_x2 (0, z5, 7))
+
+/*
+** luti2_z24_z7_6:
+** luti2 {z24\.h - z25\.h}, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_6, svbfloat16x2_t, svuint8_t, z24,
+ svluti2_lane_zt_bf16_x2 (0, z7, 6),
+ svluti2_lane_zt_bf16_x2 (0, z7, 6))
+
+/*
+** luti2_z28_z16_3:
+** luti2 {z28\.h - z29\.h}, zt0, z16\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_3, svbfloat16x2_t, svuint8_t, z28,
+ svluti2_lane_zt_bf16_x2 (0, z16, 3),
+ svluti2_lane_zt_bf16_x2 (0, z16, 3))
+
+/*
+** luti2_z24_z23_1:
+** luti2 {z24\.h - z25\.h}, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svbfloat16x2_t, svuint8_t, z24,
+ svluti2_lane_zt_bf16_x2 (0, z23, 1),
+ svluti2_lane_zt_bf16_x2 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16_x4.c
new file mode 100644
index 0000000..6a88c26
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_bf16_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svbfloat16x4_t, svuint8_t, z1,
+ svluti2_lane_zt_bf16_x4 (0, z0, 0),
+ svluti2_lane_zt_bf16_x4 (0, z0, 0))
+
+/*
+** luti2_z18_z5_3:
+** luti2 {[^\n]+}, zt0, z5\[3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_3, svbfloat16x4_t, svuint8_t, z18,
+ svluti2_lane_zt_bf16_x4 (0, z5, 3),
+ svluti2_lane_zt_bf16_x4 (0, z5, 3))
+
+/*
+** luti2_z24_z7_2:
+** luti2 {z24\.h - z27\.h}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_2, svbfloat16x4_t, svuint8_t, z24,
+ svluti2_lane_zt_bf16_x4 (0, z7, 2),
+ svluti2_lane_zt_bf16_x4 (0, z7, 2))
+
+/*
+** luti2_z28_z16_1:
+** luti2 {z28\.h - z31\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_1, svbfloat16x4_t, svuint8_t, z28,
+ svluti2_lane_zt_bf16_x4 (0, z16, 1),
+ svluti2_lane_zt_bf16_x4 (0, z16, 1))
+
+/*
+** luti2_z24_z23_0:
+** luti2 {z24\.h - z27\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_0, svbfloat16x4_t, svuint8_t, z24,
+ svluti2_lane_zt_bf16_x4 (0, z23, 0),
+ svluti2_lane_zt_bf16_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16.c
new file mode 100644
index 0000000..9907e70
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 z1\.h, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svfloat16_t, svuint8_t, z1,
+ svluti2_lane_zt_f16 (0, z0, 0),
+ svluti2_lane_zt_f16 (0, z0, 0))
+
+/*
+** luti2_z18_z5_15:
+** luti2 z18\.h, zt0, z5\[15\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_15, svfloat16_t, svuint8_t, z18,
+ svluti2_lane_zt_f16 (0, z5, 15),
+ svluti2_lane_zt_f16 (0, z5, 15))
+
+/*
+** luti2_z24_z7_13:
+** luti2 z24\.h, zt0, z7\[13\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_13, svfloat16_t, svuint8_t, z24,
+ svluti2_lane_zt_f16 (0, z7, 13),
+ svluti2_lane_zt_f16 (0, z7, 13))
+
+/*
+** luti2_z28_z16_11:
+** luti2 z28\.h, zt0, z16\[11\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_11, svfloat16_t, svuint8_t, z28,
+ svluti2_lane_zt_f16 (0, z16, 11),
+ svluti2_lane_zt_f16 (0, z16, 11))
+
+/*
+** luti2_z24_z23_1:
+** luti2 z24\.h, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svfloat16_t, svuint8_t, z24,
+ svluti2_lane_zt_f16 (0, z23, 1),
+ svluti2_lane_zt_f16 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16_x2.c
new file mode 100644
index 0000000..1a21257
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svfloat16x2_t, svuint8_t, z1,
+ svluti2_lane_zt_f16_x2 (0, z0, 0),
+ svluti2_lane_zt_f16_x2 (0, z0, 0))
+
+/*
+** luti2_z18_z5_7:
+** luti2 {z18\.h - z19\.h}, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_7, svfloat16x2_t, svuint8_t, z18,
+ svluti2_lane_zt_f16_x2 (0, z5, 7),
+ svluti2_lane_zt_f16_x2 (0, z5, 7))
+
+/*
+** luti2_z24_z7_6:
+** luti2 {z24\.h - z25\.h}, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_6, svfloat16x2_t, svuint8_t, z24,
+ svluti2_lane_zt_f16_x2 (0, z7, 6),
+ svluti2_lane_zt_f16_x2 (0, z7, 6))
+
+/*
+** luti2_z28_z16_3:
+** luti2 {z28\.h - z29\.h}, zt0, z16\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_3, svfloat16x2_t, svuint8_t, z28,
+ svluti2_lane_zt_f16_x2 (0, z16, 3),
+ svluti2_lane_zt_f16_x2 (0, z16, 3))
+
+/*
+** luti2_z24_z23_1:
+** luti2 {z24\.h - z25\.h}, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svfloat16x2_t, svuint8_t, z24,
+ svluti2_lane_zt_f16_x2 (0, z23, 1),
+ svluti2_lane_zt_f16_x2 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16_x4.c
new file mode 100644
index 0000000..dd18bcc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f16_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svfloat16x4_t, svuint8_t, z1,
+ svluti2_lane_zt_f16_x4 (0, z0, 0),
+ svluti2_lane_zt_f16_x4 (0, z0, 0))
+
+/*
+** luti2_z18_z5_3:
+** luti2 {[^\n]+}, zt0, z5\[3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_3, svfloat16x4_t, svuint8_t, z18,
+ svluti2_lane_zt_f16_x4 (0, z5, 3),
+ svluti2_lane_zt_f16_x4 (0, z5, 3))
+
+/*
+** luti2_z24_z7_2:
+** luti2 {z24\.h - z27\.h}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_2, svfloat16x4_t, svuint8_t, z24,
+ svluti2_lane_zt_f16_x4 (0, z7, 2),
+ svluti2_lane_zt_f16_x4 (0, z7, 2))
+
+/*
+** luti2_z28_z16_1:
+** luti2 {z28\.h - z31\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_1, svfloat16x4_t, svuint8_t, z28,
+ svluti2_lane_zt_f16_x4 (0, z16, 1),
+ svluti2_lane_zt_f16_x4 (0, z16, 1))
+
+/*
+** luti2_z24_z23_0:
+** luti2 {z24\.h - z27\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_0, svfloat16x4_t, svuint8_t, z24,
+ svluti2_lane_zt_f16_x4 (0, z23, 0),
+ svluti2_lane_zt_f16_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32.c
new file mode 100644
index 0000000..17cf957
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 z1\.s, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svfloat32_t, svuint8_t, z1,
+ svluti2_lane_zt_f32 (0, z0, 0),
+ svluti2_lane_zt_f32 (0, z0, 0))
+
+/*
+** luti2_z18_z5_15:
+** luti2 z18\.s, zt0, z5\[15\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_15, svfloat32_t, svuint8_t, z18,
+ svluti2_lane_zt_f32 (0, z5, 15),
+ svluti2_lane_zt_f32 (0, z5, 15))
+
+/*
+** luti2_z24_z7_13:
+** luti2 z24\.s, zt0, z7\[13\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_13, svfloat32_t, svuint8_t, z24,
+ svluti2_lane_zt_f32 (0, z7, 13),
+ svluti2_lane_zt_f32 (0, z7, 13))
+
+/*
+** luti2_z28_z16_11:
+** luti2 z28\.s, zt0, z16\[11\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_11, svfloat32_t, svuint8_t, z28,
+ svluti2_lane_zt_f32 (0, z16, 11),
+ svluti2_lane_zt_f32 (0, z16, 11))
+
+/*
+** luti2_z24_z23_1:
+** luti2 z24\.s, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svfloat32_t, svuint8_t, z24,
+ svluti2_lane_zt_f32 (0, z23, 1),
+ svluti2_lane_zt_f32 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32_x2.c
new file mode 100644
index 0000000..f391529
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svfloat32x2_t, svuint8_t, z1,
+ svluti2_lane_zt_f32_x2 (0, z0, 0),
+ svluti2_lane_zt_f32_x2 (0, z0, 0))
+
+/*
+** luti2_z18_z5_7:
+** luti2 {z18\.s - z19\.s}, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_7, svfloat32x2_t, svuint8_t, z18,
+ svluti2_lane_zt_f32_x2 (0, z5, 7),
+ svluti2_lane_zt_f32_x2 (0, z5, 7))
+
+/*
+** luti2_z24_z7_6:
+** luti2 {z24\.s - z25\.s}, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_6, svfloat32x2_t, svuint8_t, z24,
+ svluti2_lane_zt_f32_x2 (0, z7, 6),
+ svluti2_lane_zt_f32_x2 (0, z7, 6))
+
+/*
+** luti2_z28_z16_3:
+** luti2 {z28\.s - z29\.s}, zt0, z16\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_3, svfloat32x2_t, svuint8_t, z28,
+ svluti2_lane_zt_f32_x2 (0, z16, 3),
+ svluti2_lane_zt_f32_x2 (0, z16, 3))
+
+/*
+** luti2_z24_z23_1:
+** luti2 {z24\.s - z25\.s}, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svfloat32x2_t, svuint8_t, z24,
+ svluti2_lane_zt_f32_x2 (0, z23, 1),
+ svluti2_lane_zt_f32_x2 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32_x4.c
new file mode 100644
index 0000000..ad08c84
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_f32_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svfloat32x4_t, svuint8_t, z1,
+ svluti2_lane_zt_f32_x4 (0, z0, 0),
+ svluti2_lane_zt_f32_x4 (0, z0, 0))
+
+/*
+** luti2_z18_z5_3:
+** luti2 {[^\n]+}, zt0, z5\[3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_3, svfloat32x4_t, svuint8_t, z18,
+ svluti2_lane_zt_f32_x4 (0, z5, 3),
+ svluti2_lane_zt_f32_x4 (0, z5, 3))
+
+/*
+** luti2_z24_z7_2:
+** luti2 {z24\.s - z27\.s}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_2, svfloat32x4_t, svuint8_t, z24,
+ svluti2_lane_zt_f32_x4 (0, z7, 2),
+ svluti2_lane_zt_f32_x4 (0, z7, 2))
+
+/*
+** luti2_z28_z16_1:
+** luti2 {z28\.s - z31\.s}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_1, svfloat32x4_t, svuint8_t, z28,
+ svluti2_lane_zt_f32_x4 (0, z16, 1),
+ svluti2_lane_zt_f32_x4 (0, z16, 1))
+
+/*
+** luti2_z24_z23_0:
+** luti2 {z24\.s - z27\.s}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_0, svfloat32x4_t, svuint8_t, z24,
+ svluti2_lane_zt_f32_x4 (0, z23, 0),
+ svluti2_lane_zt_f32_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16.c
new file mode 100644
index 0000000..5d802cf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 z1\.h, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svint16_t, svuint8_t, z1,
+ svluti2_lane_zt_s16 (0, z0, 0),
+ svluti2_lane_zt_s16 (0, z0, 0))
+
+/*
+** luti2_z18_z5_15:
+** luti2 z18\.h, zt0, z5\[15\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_15, svint16_t, svuint8_t, z18,
+ svluti2_lane_zt_s16 (0, z5, 15),
+ svluti2_lane_zt_s16 (0, z5, 15))
+
+/*
+** luti2_z24_z7_13:
+** luti2 z24\.h, zt0, z7\[13\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_13, svint16_t, svuint8_t, z24,
+ svluti2_lane_zt_s16 (0, z7, 13),
+ svluti2_lane_zt_s16 (0, z7, 13))
+
+/*
+** luti2_z28_z16_11:
+** luti2 z28\.h, zt0, z16\[11\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_11, svint16_t, svuint8_t, z28,
+ svluti2_lane_zt_s16 (0, z16, 11),
+ svluti2_lane_zt_s16 (0, z16, 11))
+
+/*
+** luti2_z24_z23_1:
+** luti2 z24\.h, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svint16_t, svuint8_t, z24,
+ svluti2_lane_zt_s16 (0, z23, 1),
+ svluti2_lane_zt_s16 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16_x2.c
new file mode 100644
index 0000000..35100f8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svint16x2_t, svuint8_t, z1,
+ svluti2_lane_zt_s16_x2 (0, z0, 0),
+ svluti2_lane_zt_s16_x2 (0, z0, 0))
+
+/*
+** luti2_z18_z5_7:
+** luti2 {z18\.h - z19\.h}, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_7, svint16x2_t, svuint8_t, z18,
+ svluti2_lane_zt_s16_x2 (0, z5, 7),
+ svluti2_lane_zt_s16_x2 (0, z5, 7))
+
+/*
+** luti2_z24_z7_6:
+** luti2 {z24\.h - z25\.h}, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_6, svint16x2_t, svuint8_t, z24,
+ svluti2_lane_zt_s16_x2 (0, z7, 6),
+ svluti2_lane_zt_s16_x2 (0, z7, 6))
+
+/*
+** luti2_z28_z16_3:
+** luti2 {z28\.h - z29\.h}, zt0, z16\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_3, svint16x2_t, svuint8_t, z28,
+ svluti2_lane_zt_s16_x2 (0, z16, 3),
+ svluti2_lane_zt_s16_x2 (0, z16, 3))
+
+/*
+** luti2_z24_z23_1:
+** luti2 {z24\.h - z25\.h}, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svint16x2_t, svuint8_t, z24,
+ svluti2_lane_zt_s16_x2 (0, z23, 1),
+ svluti2_lane_zt_s16_x2 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16_x4.c
new file mode 100644
index 0000000..6870070
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s16_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svint16x4_t, svuint8_t, z1,
+ svluti2_lane_zt_s16_x4 (0, z0, 0),
+ svluti2_lane_zt_s16_x4 (0, z0, 0))
+
+/*
+** luti2_z18_z5_3:
+** luti2 {[^\n]+}, zt0, z5\[3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_3, svint16x4_t, svuint8_t, z18,
+ svluti2_lane_zt_s16_x4 (0, z5, 3),
+ svluti2_lane_zt_s16_x4 (0, z5, 3))
+
+/*
+** luti2_z24_z7_2:
+** luti2 {z24\.h - z27\.h}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_2, svint16x4_t, svuint8_t, z24,
+ svluti2_lane_zt_s16_x4 (0, z7, 2),
+ svluti2_lane_zt_s16_x4 (0, z7, 2))
+
+/*
+** luti2_z28_z16_1:
+** luti2 {z28\.h - z31\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_1, svint16x4_t, svuint8_t, z28,
+ svluti2_lane_zt_s16_x4 (0, z16, 1),
+ svluti2_lane_zt_s16_x4 (0, z16, 1))
+
+/*
+** luti2_z24_z23_0:
+** luti2 {z24\.h - z27\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_0, svint16x4_t, svuint8_t, z24,
+ svluti2_lane_zt_s16_x4 (0, z23, 0),
+ svluti2_lane_zt_s16_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32.c
new file mode 100644
index 0000000..5829dcb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 z1\.s, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svint32_t, svuint8_t, z1,
+ svluti2_lane_zt_s32 (0, z0, 0),
+ svluti2_lane_zt_s32 (0, z0, 0))
+
+/*
+** luti2_z18_z5_15:
+** luti2 z18\.s, zt0, z5\[15\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_15, svint32_t, svuint8_t, z18,
+ svluti2_lane_zt_s32 (0, z5, 15),
+ svluti2_lane_zt_s32 (0, z5, 15))
+
+/*
+** luti2_z24_z7_13:
+** luti2 z24\.s, zt0, z7\[13\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_13, svint32_t, svuint8_t, z24,
+ svluti2_lane_zt_s32 (0, z7, 13),
+ svluti2_lane_zt_s32 (0, z7, 13))
+
+/*
+** luti2_z28_z16_11:
+** luti2 z28\.s, zt0, z16\[11\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_11, svint32_t, svuint8_t, z28,
+ svluti2_lane_zt_s32 (0, z16, 11),
+ svluti2_lane_zt_s32 (0, z16, 11))
+
+/*
+** luti2_z24_z23_1:
+** luti2 z24\.s, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svint32_t, svuint8_t, z24,
+ svluti2_lane_zt_s32 (0, z23, 1),
+ svluti2_lane_zt_s32 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32_x2.c
new file mode 100644
index 0000000..b28b607
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svint32x2_t, svuint8_t, z1,
+ svluti2_lane_zt_s32_x2 (0, z0, 0),
+ svluti2_lane_zt_s32_x2 (0, z0, 0))
+
+/*
+** luti2_z18_z5_7:
+** luti2 {z18\.s - z19\.s}, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_7, svint32x2_t, svuint8_t, z18,
+ svluti2_lane_zt_s32_x2 (0, z5, 7),
+ svluti2_lane_zt_s32_x2 (0, z5, 7))
+
+/*
+** luti2_z24_z7_6:
+** luti2 {z24\.s - z25\.s}, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_6, svint32x2_t, svuint8_t, z24,
+ svluti2_lane_zt_s32_x2 (0, z7, 6),
+ svluti2_lane_zt_s32_x2 (0, z7, 6))
+
+/*
+** luti2_z28_z16_3:
+** luti2 {z28\.s - z29\.s}, zt0, z16\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_3, svint32x2_t, svuint8_t, z28,
+ svluti2_lane_zt_s32_x2 (0, z16, 3),
+ svluti2_lane_zt_s32_x2 (0, z16, 3))
+
+/*
+** luti2_z24_z23_1:
+** luti2 {z24\.s - z25\.s}, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svint32x2_t, svuint8_t, z24,
+ svluti2_lane_zt_s32_x2 (0, z23, 1),
+ svluti2_lane_zt_s32_x2 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32_x4.c
new file mode 100644
index 0000000..9b3dc1d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s32_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svint32x4_t, svuint8_t, z1,
+ svluti2_lane_zt_s32_x4 (0, z0, 0),
+ svluti2_lane_zt_s32_x4 (0, z0, 0))
+
+/*
+** luti2_z18_z5_3:
+** luti2 {[^\n]+}, zt0, z5\[3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_3, svint32x4_t, svuint8_t, z18,
+ svluti2_lane_zt_s32_x4 (0, z5, 3),
+ svluti2_lane_zt_s32_x4 (0, z5, 3))
+
+/*
+** luti2_z24_z7_2:
+** luti2 {z24\.s - z27\.s}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_2, svint32x4_t, svuint8_t, z24,
+ svluti2_lane_zt_s32_x4 (0, z7, 2),
+ svluti2_lane_zt_s32_x4 (0, z7, 2))
+
+/*
+** luti2_z28_z16_1:
+** luti2 {z28\.s - z31\.s}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_1, svint32x4_t, svuint8_t, z28,
+ svluti2_lane_zt_s32_x4 (0, z16, 1),
+ svluti2_lane_zt_s32_x4 (0, z16, 1))
+
+/*
+** luti2_z24_z23_0:
+** luti2 {z24\.s - z27\.s}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_0, svint32x4_t, svuint8_t, z24,
+ svluti2_lane_zt_s32_x4 (0, z23, 0),
+ svluti2_lane_zt_s32_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8.c
new file mode 100644
index 0000000..5ff4671
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 z1\.b, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svint8_t, svuint8_t, z1,
+ svluti2_lane_zt_s8 (0, z0, 0),
+ svluti2_lane_zt_s8 (0, z0, 0))
+
+/*
+** luti2_z18_z5_15:
+** luti2 z18\.b, zt0, z5\[15\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_15, svint8_t, svuint8_t, z18,
+ svluti2_lane_zt_s8 (0, z5, 15),
+ svluti2_lane_zt_s8 (0, z5, 15))
+
+/*
+** luti2_z24_z7_13:
+** luti2 z24\.b, zt0, z7\[13\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_13, svint8_t, svuint8_t, z24,
+ svluti2_lane_zt_s8 (0, z7, 13),
+ svluti2_lane_zt_s8 (0, z7, 13))
+
+/*
+** luti2_z28_z16_11:
+** luti2 z28\.b, zt0, z16\[11\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_11, svint8_t, svuint8_t, z28,
+ svluti2_lane_zt_s8 (0, z16, 11),
+ svluti2_lane_zt_s8 (0, z16, 11))
+
+/*
+** luti2_z24_z23_1:
+** luti2 z24\.b, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svint8_t, svuint8_t, z24,
+ svluti2_lane_zt_s8 (0, z23, 1),
+ svluti2_lane_zt_s8 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8_x2.c
new file mode 100644
index 0000000..a6ff0cd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svint8x2_t, svuint8_t, z1,
+ svluti2_lane_zt_s8_x2 (0, z0, 0),
+ svluti2_lane_zt_s8_x2 (0, z0, 0))
+
+/*
+** luti2_z18_z5_7:
+** luti2 {z18\.b - z19\.b}, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_7, svint8x2_t, svuint8_t, z18,
+ svluti2_lane_zt_s8_x2 (0, z5, 7),
+ svluti2_lane_zt_s8_x2 (0, z5, 7))
+
+/*
+** luti2_z24_z7_6:
+** luti2 {z24\.b - z25\.b}, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_6, svint8x2_t, svuint8_t, z24,
+ svluti2_lane_zt_s8_x2 (0, z7, 6),
+ svluti2_lane_zt_s8_x2 (0, z7, 6))
+
+/*
+** luti2_z28_z16_3:
+** luti2 {z28\.b - z29\.b}, zt0, z16\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_3, svint8x2_t, svuint8_t, z28,
+ svluti2_lane_zt_s8_x2 (0, z16, 3),
+ svluti2_lane_zt_s8_x2 (0, z16, 3))
+
+/*
+** luti2_z24_z23_1:
+** luti2 {z24\.b - z25\.b}, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svint8x2_t, svuint8_t, z24,
+ svluti2_lane_zt_s8_x2 (0, z23, 1),
+ svluti2_lane_zt_s8_x2 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8_x4.c
new file mode 100644
index 0000000..23dd23e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_s8_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svint8x4_t, svuint8_t, z1,
+ svluti2_lane_zt_s8_x4 (0, z0, 0),
+ svluti2_lane_zt_s8_x4 (0, z0, 0))
+
+/*
+** luti2_z18_z5_3:
+** luti2 {[^\n]+}, zt0, z5\[3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_3, svint8x4_t, svuint8_t, z18,
+ svluti2_lane_zt_s8_x4 (0, z5, 3),
+ svluti2_lane_zt_s8_x4 (0, z5, 3))
+
+/*
+** luti2_z24_z7_2:
+** luti2 {z24\.b - z27\.b}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_2, svint8x4_t, svuint8_t, z24,
+ svluti2_lane_zt_s8_x4 (0, z7, 2),
+ svluti2_lane_zt_s8_x4 (0, z7, 2))
+
+/*
+** luti2_z28_z16_1:
+** luti2 {z28\.b - z31\.b}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_1, svint8x4_t, svuint8_t, z28,
+ svluti2_lane_zt_s8_x4 (0, z16, 1),
+ svluti2_lane_zt_s8_x4 (0, z16, 1))
+
+/*
+** luti2_z24_z23_0:
+** luti2 {z24\.b - z27\.b}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_0, svint8x4_t, svuint8_t, z24,
+ svluti2_lane_zt_s8_x4 (0, z23, 0),
+ svluti2_lane_zt_s8_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16.c
new file mode 100644
index 0000000..56c9146
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 z1\.h, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svuint16_t, svuint8_t, z1,
+ svluti2_lane_zt_u16 (0, z0, 0),
+ svluti2_lane_zt_u16 (0, z0, 0))
+
+/*
+** luti2_z18_z5_15:
+** luti2 z18\.h, zt0, z5\[15\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_15, svuint16_t, svuint8_t, z18,
+ svluti2_lane_zt_u16 (0, z5, 15),
+ svluti2_lane_zt_u16 (0, z5, 15))
+
+/*
+** luti2_z24_z7_13:
+** luti2 z24\.h, zt0, z7\[13\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_13, svuint16_t, svuint8_t, z24,
+ svluti2_lane_zt_u16 (0, z7, 13),
+ svluti2_lane_zt_u16 (0, z7, 13))
+
+/*
+** luti2_z28_z16_11:
+** luti2 z28\.h, zt0, z16\[11\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_11, svuint16_t, svuint8_t, z28,
+ svluti2_lane_zt_u16 (0, z16, 11),
+ svluti2_lane_zt_u16 (0, z16, 11))
+
+/*
+** luti2_z24_z23_1:
+** luti2 z24\.h, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svuint16_t, svuint8_t, z24,
+ svluti2_lane_zt_u16 (0, z23, 1),
+ svluti2_lane_zt_u16 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16_x2.c
new file mode 100644
index 0000000..cda16d8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svuint16x2_t, svuint8_t, z1,
+ svluti2_lane_zt_u16_x2 (0, z0, 0),
+ svluti2_lane_zt_u16_x2 (0, z0, 0))
+
+/*
+** luti2_z18_z5_7:
+** luti2 {z18\.h - z19\.h}, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_7, svuint16x2_t, svuint8_t, z18,
+ svluti2_lane_zt_u16_x2 (0, z5, 7),
+ svluti2_lane_zt_u16_x2 (0, z5, 7))
+
+/*
+** luti2_z24_z7_6:
+** luti2 {z24\.h - z25\.h}, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_6, svuint16x2_t, svuint8_t, z24,
+ svluti2_lane_zt_u16_x2 (0, z7, 6),
+ svluti2_lane_zt_u16_x2 (0, z7, 6))
+
+/*
+** luti2_z28_z16_3:
+** luti2 {z28\.h - z29\.h}, zt0, z16\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_3, svuint16x2_t, svuint8_t, z28,
+ svluti2_lane_zt_u16_x2 (0, z16, 3),
+ svluti2_lane_zt_u16_x2 (0, z16, 3))
+
+/*
+** luti2_z24_z23_1:
+** luti2 {z24\.h - z25\.h}, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svuint16x2_t, svuint8_t, z24,
+ svluti2_lane_zt_u16_x2 (0, z23, 1),
+ svluti2_lane_zt_u16_x2 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16_x4.c
new file mode 100644
index 0000000..deb5ca1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u16_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svuint16x4_t, svuint8_t, z1,
+ svluti2_lane_zt_u16_x4 (0, z0, 0),
+ svluti2_lane_zt_u16_x4 (0, z0, 0))
+
+/*
+** luti2_z18_z5_3:
+** luti2 {[^\n]+}, zt0, z5\[3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_3, svuint16x4_t, svuint8_t, z18,
+ svluti2_lane_zt_u16_x4 (0, z5, 3),
+ svluti2_lane_zt_u16_x4 (0, z5, 3))
+
+/*
+** luti2_z24_z7_2:
+** luti2 {z24\.h - z27\.h}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_2, svuint16x4_t, svuint8_t, z24,
+ svluti2_lane_zt_u16_x4 (0, z7, 2),
+ svluti2_lane_zt_u16_x4 (0, z7, 2))
+
+/*
+** luti2_z28_z16_1:
+** luti2 {z28\.h - z31\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_1, svuint16x4_t, svuint8_t, z28,
+ svluti2_lane_zt_u16_x4 (0, z16, 1),
+ svluti2_lane_zt_u16_x4 (0, z16, 1))
+
+/*
+** luti2_z24_z23_0:
+** luti2 {z24\.h - z27\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_0, svuint16x4_t, svuint8_t, z24,
+ svluti2_lane_zt_u16_x4 (0, z23, 0),
+ svluti2_lane_zt_u16_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32.c
new file mode 100644
index 0000000..f88d615
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 z1\.s, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svuint32_t, svuint8_t, z1,
+ svluti2_lane_zt_u32 (0, z0, 0),
+ svluti2_lane_zt_u32 (0, z0, 0))
+
+/*
+** luti2_z18_z5_15:
+** luti2 z18\.s, zt0, z5\[15\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_15, svuint32_t, svuint8_t, z18,
+ svluti2_lane_zt_u32 (0, z5, 15),
+ svluti2_lane_zt_u32 (0, z5, 15))
+
+/*
+** luti2_z24_z7_13:
+** luti2 z24\.s, zt0, z7\[13\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_13, svuint32_t, svuint8_t, z24,
+ svluti2_lane_zt_u32 (0, z7, 13),
+ svluti2_lane_zt_u32 (0, z7, 13))
+
+/*
+** luti2_z28_z16_11:
+** luti2 z28\.s, zt0, z16\[11\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_11, svuint32_t, svuint8_t, z28,
+ svluti2_lane_zt_u32 (0, z16, 11),
+ svluti2_lane_zt_u32 (0, z16, 11))
+
+/*
+** luti2_z24_z23_1:
+** luti2 z24\.s, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svuint32_t, svuint8_t, z24,
+ svluti2_lane_zt_u32 (0, z23, 1),
+ svluti2_lane_zt_u32 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32_x2.c
new file mode 100644
index 0000000..78d0159
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svuint32x2_t, svuint8_t, z1,
+ svluti2_lane_zt_u32_x2 (0, z0, 0),
+ svluti2_lane_zt_u32_x2 (0, z0, 0))
+
+/*
+** luti2_z18_z5_7:
+** luti2 {z18\.s - z19\.s}, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_7, svuint32x2_t, svuint8_t, z18,
+ svluti2_lane_zt_u32_x2 (0, z5, 7),
+ svluti2_lane_zt_u32_x2 (0, z5, 7))
+
+/*
+** luti2_z24_z7_6:
+** luti2 {z24\.s - z25\.s}, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_6, svuint32x2_t, svuint8_t, z24,
+ svluti2_lane_zt_u32_x2 (0, z7, 6),
+ svluti2_lane_zt_u32_x2 (0, z7, 6))
+
+/*
+** luti2_z28_z16_3:
+** luti2 {z28\.s - z29\.s}, zt0, z16\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_3, svuint32x2_t, svuint8_t, z28,
+ svluti2_lane_zt_u32_x2 (0, z16, 3),
+ svluti2_lane_zt_u32_x2 (0, z16, 3))
+
+/*
+** luti2_z24_z23_1:
+** luti2 {z24\.s - z25\.s}, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svuint32x2_t, svuint8_t, z24,
+ svluti2_lane_zt_u32_x2 (0, z23, 1),
+ svluti2_lane_zt_u32_x2 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32_x4.c
new file mode 100644
index 0000000..f7e8b63
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u32_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svuint32x4_t, svuint8_t, z1,
+ svluti2_lane_zt_u32_x4 (0, z0, 0),
+ svluti2_lane_zt_u32_x4 (0, z0, 0))
+
+/*
+** luti2_z18_z5_3:
+** luti2 {[^\n]+}, zt0, z5\[3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_3, svuint32x4_t, svuint8_t, z18,
+ svluti2_lane_zt_u32_x4 (0, z5, 3),
+ svluti2_lane_zt_u32_x4 (0, z5, 3))
+
+/*
+** luti2_z24_z7_2:
+** luti2 {z24\.s - z27\.s}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_2, svuint32x4_t, svuint8_t, z24,
+ svluti2_lane_zt_u32_x4 (0, z7, 2),
+ svluti2_lane_zt_u32_x4 (0, z7, 2))
+
+/*
+** luti2_z28_z16_1:
+** luti2 {z28\.s - z31\.s}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_1, svuint32x4_t, svuint8_t, z28,
+ svluti2_lane_zt_u32_x4 (0, z16, 1),
+ svluti2_lane_zt_u32_x4 (0, z16, 1))
+
+/*
+** luti2_z24_z23_0:
+** luti2 {z24\.s - z27\.s}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_0, svuint32x4_t, svuint8_t, z24,
+ svluti2_lane_zt_u32_x4 (0, z23, 0),
+ svluti2_lane_zt_u32_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8.c
new file mode 100644
index 0000000..1cef4d5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 z1\.b, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svuint8_t, svuint8_t, z1,
+ svluti2_lane_zt_u8 (0, z0, 0),
+ svluti2_lane_zt_u8 (0, z0, 0))
+
+/*
+** luti2_z18_z5_15:
+** luti2 z18\.b, zt0, z5\[15\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_15, svuint8_t, svuint8_t, z18,
+ svluti2_lane_zt_u8 (0, z5, 15),
+ svluti2_lane_zt_u8 (0, z5, 15))
+
+/*
+** luti2_z24_z7_13:
+** luti2 z24\.b, zt0, z7\[13\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_13, svuint8_t, svuint8_t, z24,
+ svluti2_lane_zt_u8 (0, z7, 13),
+ svluti2_lane_zt_u8 (0, z7, 13))
+
+/*
+** luti2_z28_z16_11:
+** luti2 z28\.b, zt0, z16\[11\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_11, svuint8_t, svuint8_t, z28,
+ svluti2_lane_zt_u8 (0, z16, 11),
+ svluti2_lane_zt_u8 (0, z16, 11))
+
+/*
+** luti2_z24_z23_1:
+** luti2 z24\.b, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svuint8_t, svuint8_t, z24,
+ svluti2_lane_zt_u8 (0, z23, 1),
+ svluti2_lane_zt_u8 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8_x2.c
new file mode 100644
index 0000000..73447ec
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svuint8x2_t, svuint8_t, z1,
+ svluti2_lane_zt_u8_x2 (0, z0, 0),
+ svluti2_lane_zt_u8_x2 (0, z0, 0))
+
+/*
+** luti2_z18_z5_7:
+** luti2 {z18\.b - z19\.b}, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_7, svuint8x2_t, svuint8_t, z18,
+ svluti2_lane_zt_u8_x2 (0, z5, 7),
+ svluti2_lane_zt_u8_x2 (0, z5, 7))
+
+/*
+** luti2_z24_z7_6:
+** luti2 {z24\.b - z25\.b}, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_6, svuint8x2_t, svuint8_t, z24,
+ svluti2_lane_zt_u8_x2 (0, z7, 6),
+ svluti2_lane_zt_u8_x2 (0, z7, 6))
+
+/*
+** luti2_z28_z16_3:
+** luti2 {z28\.b - z29\.b}, zt0, z16\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_3, svuint8x2_t, svuint8_t, z28,
+ svluti2_lane_zt_u8_x2 (0, z16, 3),
+ svluti2_lane_zt_u8_x2 (0, z16, 3))
+
+/*
+** luti2_z24_z23_1:
+** luti2 {z24\.b - z25\.b}, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_1, svuint8x2_t, svuint8_t, z24,
+ svluti2_lane_zt_u8_x2 (0, z23, 1),
+ svluti2_lane_zt_u8_x2 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8_x4.c
new file mode 100644
index 0000000..3f64c61
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti2_u8_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti2_z1_z0_0:
+** luti2 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z1_z0_0, svuint8x4_t, svuint8_t, z1,
+ svluti2_lane_zt_u8_x4 (0, z0, 0),
+ svluti2_lane_zt_u8_x4 (0, z0, 0))
+
+/*
+** luti2_z18_z5_3:
+** luti2 {[^\n]+}, zt0, z5\[3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti2_z18_z5_3, svuint8x4_t, svuint8_t, z18,
+ svluti2_lane_zt_u8_x4 (0, z5, 3),
+ svluti2_lane_zt_u8_x4 (0, z5, 3))
+
+/*
+** luti2_z24_z7_2:
+** luti2 {z24\.b - z27\.b}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z7_2, svuint8x4_t, svuint8_t, z24,
+ svluti2_lane_zt_u8_x4 (0, z7, 2),
+ svluti2_lane_zt_u8_x4 (0, z7, 2))
+
+/*
+** luti2_z28_z16_1:
+** luti2 {z28\.b - z31\.b}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z28_z16_1, svuint8x4_t, svuint8_t, z28,
+ svluti2_lane_zt_u8_x4 (0, z16, 1),
+ svluti2_lane_zt_u8_x4 (0, z16, 1))
+
+/*
+** luti2_z24_z23_0:
+** luti2 {z24\.b - z27\.b}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti2_z24_z23_0, svuint8x4_t, svuint8_t, z24,
+ svluti2_lane_zt_u8_x4 (0, z23, 0),
+ svluti2_lane_zt_u8_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16.c
new file mode 100644
index 0000000..77d7b60
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 z1\.h, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svbfloat16_t, svuint8_t, z1,
+ svluti4_lane_zt_bf16 (0, z0, 0),
+ svluti4_lane_zt_bf16 (0, z0, 0))
+
+/*
+** luti4_z18_z5_7:
+** luti4 z18\.h, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_7, svbfloat16_t, svuint8_t, z18,
+ svluti4_lane_zt_bf16 (0, z5, 7),
+ svluti4_lane_zt_bf16 (0, z5, 7))
+
+/*
+** luti4_z24_z7_6:
+** luti4 z24\.h, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_6, svbfloat16_t, svuint8_t, z24,
+ svluti4_lane_zt_bf16 (0, z7, 6),
+ svluti4_lane_zt_bf16 (0, z7, 6))
+
+/*
+** luti4_z28_z16_4:
+** luti4 z28\.h, zt0, z16\[4\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_4, svbfloat16_t, svuint8_t, z28,
+ svluti4_lane_zt_bf16 (0, z16, 4),
+ svluti4_lane_zt_bf16 (0, z16, 4))
+
+/*
+** luti4_z24_z23_1:
+** luti4 z24\.h, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_1, svbfloat16_t, svuint8_t, z24,
+ svluti4_lane_zt_bf16 (0, z23, 1),
+ svluti4_lane_zt_bf16 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16_x2.c
new file mode 100644
index 0000000..b86b020
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svbfloat16x2_t, svuint8_t, z1,
+ svluti4_lane_zt_bf16_x2 (0, z0, 0),
+ svluti4_lane_zt_bf16_x2 (0, z0, 0))
+
+/*
+** luti4_z18_z5_3:
+** luti4 {z18\.h - z19\.h}, zt0, z5\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_3, svbfloat16x2_t, svuint8_t, z18,
+ svluti4_lane_zt_bf16_x2 (0, z5, 3),
+ svluti4_lane_zt_bf16_x2 (0, z5, 3))
+
+/*
+** luti4_z24_z7_2:
+** luti4 {z24\.h - z25\.h}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_2, svbfloat16x2_t, svuint8_t, z24,
+ svluti4_lane_zt_bf16_x2 (0, z7, 2),
+ svluti4_lane_zt_bf16_x2 (0, z7, 2))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.h - z29\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svbfloat16x2_t, svuint8_t, z28,
+ svluti4_lane_zt_bf16_x2 (0, z16, 1),
+ svluti4_lane_zt_bf16_x2 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.h - z25\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svbfloat16x2_t, svuint8_t, z24,
+ svluti4_lane_zt_bf16_x2 (0, z23, 0),
+ svluti4_lane_zt_bf16_x2 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16_x4.c
new file mode 100644
index 0000000..148db5d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_bf16_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svbfloat16x4_t, svuint8_t, z1,
+ svluti4_lane_zt_bf16_x4 (0, z0, 0),
+ svluti4_lane_zt_bf16_x4 (0, z0, 0))
+
+/*
+** luti4_z18_z5_1:
+** luti4 {[^\n]+}, zt0, z5\[1\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_1, svbfloat16x4_t, svuint8_t, z18,
+ svluti4_lane_zt_bf16_x4 (0, z5, 1),
+ svluti4_lane_zt_bf16_x4 (0, z5, 1))
+
+/*
+** luti4_z24_z7_0:
+** luti4 {z24\.h - z27\.h}, zt0, z7\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_0, svbfloat16x4_t, svuint8_t, z24,
+ svluti4_lane_zt_bf16_x4 (0, z7, 0),
+ svluti4_lane_zt_bf16_x4 (0, z7, 0))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.h - z31\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svbfloat16x4_t, svuint8_t, z28,
+ svluti4_lane_zt_bf16_x4 (0, z16, 1),
+ svluti4_lane_zt_bf16_x4 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.h - z27\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svbfloat16x4_t, svuint8_t, z24,
+ svluti4_lane_zt_bf16_x4 (0, z23, 0),
+ svluti4_lane_zt_bf16_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16.c
new file mode 100644
index 0000000..b8f6e06
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 z1\.h, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svfloat16_t, svuint8_t, z1,
+ svluti4_lane_zt_f16 (0, z0, 0),
+ svluti4_lane_zt_f16 (0, z0, 0))
+
+/*
+** luti4_z18_z5_7:
+** luti4 z18\.h, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_7, svfloat16_t, svuint8_t, z18,
+ svluti4_lane_zt_f16 (0, z5, 7),
+ svluti4_lane_zt_f16 (0, z5, 7))
+
+/*
+** luti4_z24_z7_6:
+** luti4 z24\.h, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_6, svfloat16_t, svuint8_t, z24,
+ svluti4_lane_zt_f16 (0, z7, 6),
+ svluti4_lane_zt_f16 (0, z7, 6))
+
+/*
+** luti4_z28_z16_4:
+** luti4 z28\.h, zt0, z16\[4\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_4, svfloat16_t, svuint8_t, z28,
+ svluti4_lane_zt_f16 (0, z16, 4),
+ svluti4_lane_zt_f16 (0, z16, 4))
+
+/*
+** luti4_z24_z23_1:
+** luti4 z24\.h, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_1, svfloat16_t, svuint8_t, z24,
+ svluti4_lane_zt_f16 (0, z23, 1),
+ svluti4_lane_zt_f16 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16_x2.c
new file mode 100644
index 0000000..b6c5f95
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svfloat16x2_t, svuint8_t, z1,
+ svluti4_lane_zt_f16_x2 (0, z0, 0),
+ svluti4_lane_zt_f16_x2 (0, z0, 0))
+
+/*
+** luti4_z18_z5_3:
+** luti4 {z18\.h - z19\.h}, zt0, z5\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_3, svfloat16x2_t, svuint8_t, z18,
+ svluti4_lane_zt_f16_x2 (0, z5, 3),
+ svluti4_lane_zt_f16_x2 (0, z5, 3))
+
+/*
+** luti4_z24_z7_2:
+** luti4 {z24\.h - z25\.h}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_2, svfloat16x2_t, svuint8_t, z24,
+ svluti4_lane_zt_f16_x2 (0, z7, 2),
+ svluti4_lane_zt_f16_x2 (0, z7, 2))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.h - z29\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svfloat16x2_t, svuint8_t, z28,
+ svluti4_lane_zt_f16_x2 (0, z16, 1),
+ svluti4_lane_zt_f16_x2 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.h - z25\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svfloat16x2_t, svuint8_t, z24,
+ svluti4_lane_zt_f16_x2 (0, z23, 0),
+ svluti4_lane_zt_f16_x2 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16_x4.c
new file mode 100644
index 0000000..8cbad8b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f16_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svfloat16x4_t, svuint8_t, z1,
+ svluti4_lane_zt_f16_x4 (0, z0, 0),
+ svluti4_lane_zt_f16_x4 (0, z0, 0))
+
+/*
+** luti4_z18_z5_1:
+** luti4 {[^\n]+}, zt0, z5\[1\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_1, svfloat16x4_t, svuint8_t, z18,
+ svluti4_lane_zt_f16_x4 (0, z5, 1),
+ svluti4_lane_zt_f16_x4 (0, z5, 1))
+
+/*
+** luti4_z24_z7_0:
+** luti4 {z24\.h - z27\.h}, zt0, z7\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_0, svfloat16x4_t, svuint8_t, z24,
+ svluti4_lane_zt_f16_x4 (0, z7, 0),
+ svluti4_lane_zt_f16_x4 (0, z7, 0))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.h - z31\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svfloat16x4_t, svuint8_t, z28,
+ svluti4_lane_zt_f16_x4 (0, z16, 1),
+ svluti4_lane_zt_f16_x4 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.h - z27\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svfloat16x4_t, svuint8_t, z24,
+ svluti4_lane_zt_f16_x4 (0, z23, 0),
+ svluti4_lane_zt_f16_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32.c
new file mode 100644
index 0000000..3dcc69c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 z1\.s, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svfloat32_t, svuint8_t, z1,
+ svluti4_lane_zt_f32 (0, z0, 0),
+ svluti4_lane_zt_f32 (0, z0, 0))
+
+/*
+** luti4_z18_z5_7:
+** luti4 z18\.s, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_7, svfloat32_t, svuint8_t, z18,
+ svluti4_lane_zt_f32 (0, z5, 7),
+ svluti4_lane_zt_f32 (0, z5, 7))
+
+/*
+** luti4_z24_z7_6:
+** luti4 z24\.s, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_6, svfloat32_t, svuint8_t, z24,
+ svluti4_lane_zt_f32 (0, z7, 6),
+ svluti4_lane_zt_f32 (0, z7, 6))
+
+/*
+** luti4_z28_z16_4:
+** luti4 z28\.s, zt0, z16\[4\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_4, svfloat32_t, svuint8_t, z28,
+ svluti4_lane_zt_f32 (0, z16, 4),
+ svluti4_lane_zt_f32 (0, z16, 4))
+
+/*
+** luti4_z24_z23_1:
+** luti4 z24\.s, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_1, svfloat32_t, svuint8_t, z24,
+ svluti4_lane_zt_f32 (0, z23, 1),
+ svluti4_lane_zt_f32 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32_x2.c
new file mode 100644
index 0000000..7f97058
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svfloat32x2_t, svuint8_t, z1,
+ svluti4_lane_zt_f32_x2 (0, z0, 0),
+ svluti4_lane_zt_f32_x2 (0, z0, 0))
+
+/*
+** luti4_z18_z5_3:
+** luti4 {z18\.s - z19\.s}, zt0, z5\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_3, svfloat32x2_t, svuint8_t, z18,
+ svluti4_lane_zt_f32_x2 (0, z5, 3),
+ svluti4_lane_zt_f32_x2 (0, z5, 3))
+
+/*
+** luti4_z24_z7_2:
+** luti4 {z24\.s - z25\.s}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_2, svfloat32x2_t, svuint8_t, z24,
+ svluti4_lane_zt_f32_x2 (0, z7, 2),
+ svluti4_lane_zt_f32_x2 (0, z7, 2))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.s - z29\.s}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svfloat32x2_t, svuint8_t, z28,
+ svluti4_lane_zt_f32_x2 (0, z16, 1),
+ svluti4_lane_zt_f32_x2 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.s - z25\.s}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svfloat32x2_t, svuint8_t, z24,
+ svluti4_lane_zt_f32_x2 (0, z23, 0),
+ svluti4_lane_zt_f32_x2 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32_x4.c
new file mode 100644
index 0000000..c32c674
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_f32_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svfloat32x4_t, svuint8_t, z1,
+ svluti4_lane_zt_f32_x4 (0, z0, 0),
+ svluti4_lane_zt_f32_x4 (0, z0, 0))
+
+/*
+** luti4_z18_z5_1:
+** luti4 {[^\n]+}, zt0, z5\[1\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_1, svfloat32x4_t, svuint8_t, z18,
+ svluti4_lane_zt_f32_x4 (0, z5, 1),
+ svluti4_lane_zt_f32_x4 (0, z5, 1))
+
+/*
+** luti4_z24_z7_0:
+** luti4 {z24\.s - z27\.s}, zt0, z7\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_0, svfloat32x4_t, svuint8_t, z24,
+ svluti4_lane_zt_f32_x4 (0, z7, 0),
+ svluti4_lane_zt_f32_x4 (0, z7, 0))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.s - z31\.s}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svfloat32x4_t, svuint8_t, z28,
+ svluti4_lane_zt_f32_x4 (0, z16, 1),
+ svluti4_lane_zt_f32_x4 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.s - z27\.s}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svfloat32x4_t, svuint8_t, z24,
+ svluti4_lane_zt_f32_x4 (0, z23, 0),
+ svluti4_lane_zt_f32_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16.c
new file mode 100644
index 0000000..792cf77
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 z1\.h, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svint16_t, svuint8_t, z1,
+ svluti4_lane_zt_s16 (0, z0, 0),
+ svluti4_lane_zt_s16 (0, z0, 0))
+
+/*
+** luti4_z18_z5_7:
+** luti4 z18\.h, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_7, svint16_t, svuint8_t, z18,
+ svluti4_lane_zt_s16 (0, z5, 7),
+ svluti4_lane_zt_s16 (0, z5, 7))
+
+/*
+** luti4_z24_z7_6:
+** luti4 z24\.h, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_6, svint16_t, svuint8_t, z24,
+ svluti4_lane_zt_s16 (0, z7, 6),
+ svluti4_lane_zt_s16 (0, z7, 6))
+
+/*
+** luti4_z28_z16_4:
+** luti4 z28\.h, zt0, z16\[4\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_4, svint16_t, svuint8_t, z28,
+ svluti4_lane_zt_s16 (0, z16, 4),
+ svluti4_lane_zt_s16 (0, z16, 4))
+
+/*
+** luti4_z24_z23_1:
+** luti4 z24\.h, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_1, svint16_t, svuint8_t, z24,
+ svluti4_lane_zt_s16 (0, z23, 1),
+ svluti4_lane_zt_s16 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16_x2.c
new file mode 100644
index 0000000..d51852b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svint16x2_t, svuint8_t, z1,
+ svluti4_lane_zt_s16_x2 (0, z0, 0),
+ svluti4_lane_zt_s16_x2 (0, z0, 0))
+
+/*
+** luti4_z18_z5_3:
+** luti4 {z18\.h - z19\.h}, zt0, z5\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_3, svint16x2_t, svuint8_t, z18,
+ svluti4_lane_zt_s16_x2 (0, z5, 3),
+ svluti4_lane_zt_s16_x2 (0, z5, 3))
+
+/*
+** luti4_z24_z7_2:
+** luti4 {z24\.h - z25\.h}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_2, svint16x2_t, svuint8_t, z24,
+ svluti4_lane_zt_s16_x2 (0, z7, 2),
+ svluti4_lane_zt_s16_x2 (0, z7, 2))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.h - z29\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svint16x2_t, svuint8_t, z28,
+ svluti4_lane_zt_s16_x2 (0, z16, 1),
+ svluti4_lane_zt_s16_x2 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.h - z25\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svint16x2_t, svuint8_t, z24,
+ svluti4_lane_zt_s16_x2 (0, z23, 0),
+ svluti4_lane_zt_s16_x2 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16_x4.c
new file mode 100644
index 0000000..d964a24
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s16_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svint16x4_t, svuint8_t, z1,
+ svluti4_lane_zt_s16_x4 (0, z0, 0),
+ svluti4_lane_zt_s16_x4 (0, z0, 0))
+
+/*
+** luti4_z18_z5_1:
+** luti4 {[^\n]+}, zt0, z5\[1\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_1, svint16x4_t, svuint8_t, z18,
+ svluti4_lane_zt_s16_x4 (0, z5, 1),
+ svluti4_lane_zt_s16_x4 (0, z5, 1))
+
+/*
+** luti4_z24_z7_0:
+** luti4 {z24\.h - z27\.h}, zt0, z7\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_0, svint16x4_t, svuint8_t, z24,
+ svluti4_lane_zt_s16_x4 (0, z7, 0),
+ svluti4_lane_zt_s16_x4 (0, z7, 0))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.h - z31\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svint16x4_t, svuint8_t, z28,
+ svluti4_lane_zt_s16_x4 (0, z16, 1),
+ svluti4_lane_zt_s16_x4 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.h - z27\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svint16x4_t, svuint8_t, z24,
+ svluti4_lane_zt_s16_x4 (0, z23, 0),
+ svluti4_lane_zt_s16_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32.c
new file mode 100644
index 0000000..36390ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 z1\.s, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svint32_t, svuint8_t, z1,
+ svluti4_lane_zt_s32 (0, z0, 0),
+ svluti4_lane_zt_s32 (0, z0, 0))
+
+/*
+** luti4_z18_z5_7:
+** luti4 z18\.s, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_7, svint32_t, svuint8_t, z18,
+ svluti4_lane_zt_s32 (0, z5, 7),
+ svluti4_lane_zt_s32 (0, z5, 7))
+
+/*
+** luti4_z24_z7_6:
+** luti4 z24\.s, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_6, svint32_t, svuint8_t, z24,
+ svluti4_lane_zt_s32 (0, z7, 6),
+ svluti4_lane_zt_s32 (0, z7, 6))
+
+/*
+** luti4_z28_z16_4:
+** luti4 z28\.s, zt0, z16\[4\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_4, svint32_t, svuint8_t, z28,
+ svluti4_lane_zt_s32 (0, z16, 4),
+ svluti4_lane_zt_s32 (0, z16, 4))
+
+/*
+** luti4_z24_z23_1:
+** luti4 z24\.s, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_1, svint32_t, svuint8_t, z24,
+ svluti4_lane_zt_s32 (0, z23, 1),
+ svluti4_lane_zt_s32 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32_x2.c
new file mode 100644
index 0000000..7345a1d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svint32x2_t, svuint8_t, z1,
+ svluti4_lane_zt_s32_x2 (0, z0, 0),
+ svluti4_lane_zt_s32_x2 (0, z0, 0))
+
+/*
+** luti4_z18_z5_3:
+** luti4 {z18\.s - z19\.s}, zt0, z5\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_3, svint32x2_t, svuint8_t, z18,
+ svluti4_lane_zt_s32_x2 (0, z5, 3),
+ svluti4_lane_zt_s32_x2 (0, z5, 3))
+
+/*
+** luti4_z24_z7_2:
+** luti4 {z24\.s - z25\.s}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_2, svint32x2_t, svuint8_t, z24,
+ svluti4_lane_zt_s32_x2 (0, z7, 2),
+ svluti4_lane_zt_s32_x2 (0, z7, 2))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.s - z29\.s}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svint32x2_t, svuint8_t, z28,
+ svluti4_lane_zt_s32_x2 (0, z16, 1),
+ svluti4_lane_zt_s32_x2 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.s - z25\.s}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svint32x2_t, svuint8_t, z24,
+ svluti4_lane_zt_s32_x2 (0, z23, 0),
+ svluti4_lane_zt_s32_x2 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32_x4.c
new file mode 100644
index 0000000..13844ce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s32_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svint32x4_t, svuint8_t, z1,
+ svluti4_lane_zt_s32_x4 (0, z0, 0),
+ svluti4_lane_zt_s32_x4 (0, z0, 0))
+
+/*
+** luti4_z18_z5_1:
+** luti4 {[^\n]+}, zt0, z5\[1\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_1, svint32x4_t, svuint8_t, z18,
+ svluti4_lane_zt_s32_x4 (0, z5, 1),
+ svluti4_lane_zt_s32_x4 (0, z5, 1))
+
+/*
+** luti4_z24_z7_0:
+** luti4 {z24\.s - z27\.s}, zt0, z7\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_0, svint32x4_t, svuint8_t, z24,
+ svluti4_lane_zt_s32_x4 (0, z7, 0),
+ svluti4_lane_zt_s32_x4 (0, z7, 0))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.s - z31\.s}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svint32x4_t, svuint8_t, z28,
+ svluti4_lane_zt_s32_x4 (0, z16, 1),
+ svluti4_lane_zt_s32_x4 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.s - z27\.s}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svint32x4_t, svuint8_t, z24,
+ svluti4_lane_zt_s32_x4 (0, z23, 0),
+ svluti4_lane_zt_s32_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s8.c
new file mode 100644
index 0000000..9c2573a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s8.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 z1\.b, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svint8_t, svuint8_t, z1,
+ svluti4_lane_zt_s8 (0, z0, 0),
+ svluti4_lane_zt_s8 (0, z0, 0))
+
+/*
+** luti4_z18_z5_7:
+** luti4 z18\.b, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_7, svint8_t, svuint8_t, z18,
+ svluti4_lane_zt_s8 (0, z5, 7),
+ svluti4_lane_zt_s8 (0, z5, 7))
+
+/*
+** luti4_z24_z7_6:
+** luti4 z24\.b, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_6, svint8_t, svuint8_t, z24,
+ svluti4_lane_zt_s8 (0, z7, 6),
+ svluti4_lane_zt_s8 (0, z7, 6))
+
+/*
+** luti4_z28_z16_4:
+** luti4 z28\.b, zt0, z16\[4\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_4, svint8_t, svuint8_t, z28,
+ svluti4_lane_zt_s8 (0, z16, 4),
+ svluti4_lane_zt_s8 (0, z16, 4))
+
+/*
+** luti4_z24_z23_1:
+** luti4 z24\.b, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_1, svint8_t, svuint8_t, z24,
+ svluti4_lane_zt_s8 (0, z23, 1),
+ svluti4_lane_zt_s8 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s8_x2.c
new file mode 100644
index 0000000..fcb74a6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_s8_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svint8x2_t, svuint8_t, z1,
+ svluti4_lane_zt_s8_x2 (0, z0, 0),
+ svluti4_lane_zt_s8_x2 (0, z0, 0))
+
+/*
+** luti4_z18_z5_3:
+** luti4 {z18\.b - z19\.b}, zt0, z5\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_3, svint8x2_t, svuint8_t, z18,
+ svluti4_lane_zt_s8_x2 (0, z5, 3),
+ svluti4_lane_zt_s8_x2 (0, z5, 3))
+
+/*
+** luti4_z24_z7_2:
+** luti4 {z24\.b - z25\.b}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_2, svint8x2_t, svuint8_t, z24,
+ svluti4_lane_zt_s8_x2 (0, z7, 2),
+ svluti4_lane_zt_s8_x2 (0, z7, 2))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.b - z29\.b}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svint8x2_t, svuint8_t, z28,
+ svluti4_lane_zt_s8_x2 (0, z16, 1),
+ svluti4_lane_zt_s8_x2 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.b - z25\.b}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svint8x2_t, svuint8_t, z24,
+ svluti4_lane_zt_s8_x2 (0, z23, 0),
+ svluti4_lane_zt_s8_x2 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16.c
new file mode 100644
index 0000000..c542051
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 z1\.h, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svuint16_t, svuint8_t, z1,
+ svluti4_lane_zt_u16 (0, z0, 0),
+ svluti4_lane_zt_u16 (0, z0, 0))
+
+/*
+** luti4_z18_z5_7:
+** luti4 z18\.h, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_7, svuint16_t, svuint8_t, z18,
+ svluti4_lane_zt_u16 (0, z5, 7),
+ svluti4_lane_zt_u16 (0, z5, 7))
+
+/*
+** luti4_z24_z7_6:
+** luti4 z24\.h, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_6, svuint16_t, svuint8_t, z24,
+ svluti4_lane_zt_u16 (0, z7, 6),
+ svluti4_lane_zt_u16 (0, z7, 6))
+
+/*
+** luti4_z28_z16_4:
+** luti4 z28\.h, zt0, z16\[4\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_4, svuint16_t, svuint8_t, z28,
+ svluti4_lane_zt_u16 (0, z16, 4),
+ svluti4_lane_zt_u16 (0, z16, 4))
+
+/*
+** luti4_z24_z23_1:
+** luti4 z24\.h, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_1, svuint16_t, svuint8_t, z24,
+ svluti4_lane_zt_u16 (0, z23, 1),
+ svluti4_lane_zt_u16 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16_x2.c
new file mode 100644
index 0000000..df22fd1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svuint16x2_t, svuint8_t, z1,
+ svluti4_lane_zt_u16_x2 (0, z0, 0),
+ svluti4_lane_zt_u16_x2 (0, z0, 0))
+
+/*
+** luti4_z18_z5_3:
+** luti4 {z18\.h - z19\.h}, zt0, z5\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_3, svuint16x2_t, svuint8_t, z18,
+ svluti4_lane_zt_u16_x2 (0, z5, 3),
+ svluti4_lane_zt_u16_x2 (0, z5, 3))
+
+/*
+** luti4_z24_z7_2:
+** luti4 {z24\.h - z25\.h}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_2, svuint16x2_t, svuint8_t, z24,
+ svluti4_lane_zt_u16_x2 (0, z7, 2),
+ svluti4_lane_zt_u16_x2 (0, z7, 2))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.h - z29\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svuint16x2_t, svuint8_t, z28,
+ svluti4_lane_zt_u16_x2 (0, z16, 1),
+ svluti4_lane_zt_u16_x2 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.h - z25\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svuint16x2_t, svuint8_t, z24,
+ svluti4_lane_zt_u16_x2 (0, z23, 0),
+ svluti4_lane_zt_u16_x2 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16_x4.c
new file mode 100644
index 0000000..06180f6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u16_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svuint16x4_t, svuint8_t, z1,
+ svluti4_lane_zt_u16_x4 (0, z0, 0),
+ svluti4_lane_zt_u16_x4 (0, z0, 0))
+
+/*
+** luti4_z18_z5_1:
+** luti4 {[^\n]+}, zt0, z5\[1\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_1, svuint16x4_t, svuint8_t, z18,
+ svluti4_lane_zt_u16_x4 (0, z5, 1),
+ svluti4_lane_zt_u16_x4 (0, z5, 1))
+
+/*
+** luti4_z24_z7_0:
+** luti4 {z24\.h - z27\.h}, zt0, z7\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_0, svuint16x4_t, svuint8_t, z24,
+ svluti4_lane_zt_u16_x4 (0, z7, 0),
+ svluti4_lane_zt_u16_x4 (0, z7, 0))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.h - z31\.h}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svuint16x4_t, svuint8_t, z28,
+ svluti4_lane_zt_u16_x4 (0, z16, 1),
+ svluti4_lane_zt_u16_x4 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.h - z27\.h}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svuint16x4_t, svuint8_t, z24,
+ svluti4_lane_zt_u16_x4 (0, z23, 0),
+ svluti4_lane_zt_u16_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32.c
new file mode 100644
index 0000000..6cba7fe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 z1\.s, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svuint32_t, svuint8_t, z1,
+ svluti4_lane_zt_u32 (0, z0, 0),
+ svluti4_lane_zt_u32 (0, z0, 0))
+
+/*
+** luti4_z18_z5_7:
+** luti4 z18\.s, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_7, svuint32_t, svuint8_t, z18,
+ svluti4_lane_zt_u32 (0, z5, 7),
+ svluti4_lane_zt_u32 (0, z5, 7))
+
+/*
+** luti4_z24_z7_6:
+** luti4 z24\.s, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_6, svuint32_t, svuint8_t, z24,
+ svluti4_lane_zt_u32 (0, z7, 6),
+ svluti4_lane_zt_u32 (0, z7, 6))
+
+/*
+** luti4_z28_z16_4:
+** luti4 z28\.s, zt0, z16\[4\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_4, svuint32_t, svuint8_t, z28,
+ svluti4_lane_zt_u32 (0, z16, 4),
+ svluti4_lane_zt_u32 (0, z16, 4))
+
+/*
+** luti4_z24_z23_1:
+** luti4 z24\.s, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_1, svuint32_t, svuint8_t, z24,
+ svluti4_lane_zt_u32 (0, z23, 1),
+ svluti4_lane_zt_u32 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32_x2.c
new file mode 100644
index 0000000..14bba18
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svuint32x2_t, svuint8_t, z1,
+ svluti4_lane_zt_u32_x2 (0, z0, 0),
+ svluti4_lane_zt_u32_x2 (0, z0, 0))
+
+/*
+** luti4_z18_z5_3:
+** luti4 {z18\.s - z19\.s}, zt0, z5\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_3, svuint32x2_t, svuint8_t, z18,
+ svluti4_lane_zt_u32_x2 (0, z5, 3),
+ svluti4_lane_zt_u32_x2 (0, z5, 3))
+
+/*
+** luti4_z24_z7_2:
+** luti4 {z24\.s - z25\.s}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_2, svuint32x2_t, svuint8_t, z24,
+ svluti4_lane_zt_u32_x2 (0, z7, 2),
+ svluti4_lane_zt_u32_x2 (0, z7, 2))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.s - z29\.s}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svuint32x2_t, svuint8_t, z28,
+ svluti4_lane_zt_u32_x2 (0, z16, 1),
+ svluti4_lane_zt_u32_x2 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.s - z25\.s}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svuint32x2_t, svuint8_t, z24,
+ svluti4_lane_zt_u32_x2 (0, z23, 0),
+ svluti4_lane_zt_u32_x2 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32_x4.c
new file mode 100644
index 0000000..841b9c0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u32_x4.c
@@ -0,0 +1,56 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svuint32x4_t, svuint8_t, z1,
+ svluti4_lane_zt_u32_x4 (0, z0, 0),
+ svluti4_lane_zt_u32_x4 (0, z0, 0))
+
+/*
+** luti4_z18_z5_1:
+** luti4 {[^\n]+}, zt0, z5\[1\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_1, svuint32x4_t, svuint8_t, z18,
+ svluti4_lane_zt_u32_x4 (0, z5, 1),
+ svluti4_lane_zt_u32_x4 (0, z5, 1))
+
+/*
+** luti4_z24_z7_0:
+** luti4 {z24\.s - z27\.s}, zt0, z7\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_0, svuint32x4_t, svuint8_t, z24,
+ svluti4_lane_zt_u32_x4 (0, z7, 0),
+ svluti4_lane_zt_u32_x4 (0, z7, 0))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.s - z31\.s}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svuint32x4_t, svuint8_t, z28,
+ svluti4_lane_zt_u32_x4 (0, z16, 1),
+ svluti4_lane_zt_u32_x4 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.s - z27\.s}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svuint32x4_t, svuint8_t, z24,
+ svluti4_lane_zt_u32_x4 (0, z23, 0),
+ svluti4_lane_zt_u32_x4 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u8.c
new file mode 100644
index 0000000..9524795
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u8.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 z1\.b, zt0, z0\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svuint8_t, svuint8_t, z1,
+ svluti4_lane_zt_u8 (0, z0, 0),
+ svluti4_lane_zt_u8 (0, z0, 0))
+
+/*
+** luti4_z18_z5_7:
+** luti4 z18\.b, zt0, z5\[7\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_7, svuint8_t, svuint8_t, z18,
+ svluti4_lane_zt_u8 (0, z5, 7),
+ svluti4_lane_zt_u8 (0, z5, 7))
+
+/*
+** luti4_z24_z7_6:
+** luti4 z24\.b, zt0, z7\[6\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_6, svuint8_t, svuint8_t, z24,
+ svluti4_lane_zt_u8 (0, z7, 6),
+ svluti4_lane_zt_u8 (0, z7, 6))
+
+/*
+** luti4_z28_z16_4:
+** luti4 z28\.b, zt0, z16\[4\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_4, svuint8_t, svuint8_t, z28,
+ svluti4_lane_zt_u8 (0, z16, 4),
+ svluti4_lane_zt_u8 (0, z16, 4))
+
+/*
+** luti4_z24_z23_1:
+** luti4 z24\.b, zt0, z23\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_1, svuint8_t, svuint8_t, z24,
+ svluti4_lane_zt_u8 (0, z23, 1),
+ svluti4_lane_zt_u8 (0, z23, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u8_x2.c
new file mode 100644
index 0000000..ce7a840
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/luti4_u8_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** luti4_z1_z0_0:
+** luti4 {[^\n]+}, zt0, z0\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (luti4_z1_z0_0, svuint8x2_t, svuint8_t, z1,
+ svluti4_lane_zt_u8_x2 (0, z0, 0),
+ svluti4_lane_zt_u8_x2 (0, z0, 0))
+
+/*
+** luti4_z18_z5_3:
+** luti4 {z18\.b - z19\.b}, zt0, z5\[3\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z18_z5_3, svuint8x2_t, svuint8_t, z18,
+ svluti4_lane_zt_u8_x2 (0, z5, 3),
+ svluti4_lane_zt_u8_x2 (0, z5, 3))
+
+/*
+** luti4_z24_z7_2:
+** luti4 {z24\.b - z25\.b}, zt0, z7\[2\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z7_2, svuint8x2_t, svuint8_t, z24,
+ svluti4_lane_zt_u8_x2 (0, z7, 2),
+ svluti4_lane_zt_u8_x2 (0, z7, 2))
+
+/*
+** luti4_z28_z16_1:
+** luti4 {z28\.b - z29\.b}, zt0, z16\[1\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z28_z16_1, svuint8x2_t, svuint8_t, z28,
+ svluti4_lane_zt_u8_x2 (0, z16, 1),
+ svluti4_lane_zt_u8_x2 (0, z16, 1))
+
+/*
+** luti4_z24_z23_0:
+** luti4 {z24\.b - z25\.b}, zt0, z23\[0\]
+** ret
+*/
+TEST_XN_SINGLE (luti4_z24_z23_0, svuint8x2_t, svuint8_t, z24,
+ svluti4_lane_zt_u8_x2 (0, z23, 0),
+ svluti4_lane_zt_u8_x2 (0, z23, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f16_x2.c
new file mode 100644
index 0000000..808e528
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f16_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** fmax {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svfloat16x2_t, z0,
+ svmax_f16_x2 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** fmax {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svfloat16x2_t, z0,
+ svmax_f16_x2 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z28\.h - z29\.h}
+** |
+** fmax [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svfloat16x2_t, z0,
+ svmax_f16_x2 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** fmax {z18\.h - z19\.h}, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svfloat16x2_t, z18,
+ svmax_f16_x2 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z18, svfloat16x2_t, z23,
+ svmax_f16_x2 (z23, z18),
+ svmax (z23, z18))
+
+/*
+** max_z28_z28_z0:
+** fmax {z28\.h - z29\.h}, {z28\.h - z29\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svfloat16x2_t, z28,
+ svmax_f16_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** fmax {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svfloat16x2_t, z0,
+ svmax_f16_x2 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** |
+** fmax {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svfloat16x2_t, z4,
+ svmax_f16_x2 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** fmax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svfloat16x2_t, svfloat16_t, z24,
+ svmax_single_f16_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** fmax {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svfloat16x2_t, svfloat16_t, z24,
+ svmax_single_f16_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fmax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svfloat16x2_t, svfloat16_t, z24,
+ svmax_single_f16_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** fmax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svfloat16x2_t, svfloat16_t, z1,
+ svmax_single_f16_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fmax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svfloat16x2_t, svfloat16_t, z1,
+ svmax_single_f16_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** fmax {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svfloat16x2_t, svfloat16_t, z18,
+ svmax_single_f16_x2 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** fmax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svfloat16x2_t, svfloat16_t,
+ z0_res = svmax_single_f16_x2 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** fmax {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svfloat16x2_t, svfloat16_t,
+ z0 = svmax_single_f16_x2 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmax {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svfloat16x2_t, svfloat16_t, z24,
+ svmax_single_f16_x2 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f16_x4.c
new file mode 100644
index 0000000..61cab28
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f16_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** fmax {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svfloat16x4_t, z0,
+ svmax_f16_x4 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** fmax {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svfloat16x4_t, z0,
+ svmax_f16_x4 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z28\.h - z31\.h}
+** |
+** fmax [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svfloat16x4_t, z0,
+ svmax_f16_x4 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svfloat16x4_t, z18,
+ svmax_f16_x4 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z28, svfloat16x4_t, z23,
+ svmax_f16_x4 (z23, z28),
+ svmax (z23, z28))
+
+/*
+** max_z28_z28_z0:
+** fmax {z28\.h - z31\.h}, {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svfloat16x4_t, z28,
+ svmax_f16_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** |
+** fmax {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svfloat16x4_t, z0,
+ svmax_f16_x4 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** |
+** fmax {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svfloat16x4_t, z4,
+ svmax_f16_x4 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** fmax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svfloat16x4_t, svfloat16_t, z24,
+ svmax_single_f16_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** fmax {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svfloat16x4_t, svfloat16_t, z24,
+ svmax_single_f16_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svfloat16x4_t, svfloat16_t, z24,
+ svmax_single_f16_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** fmax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svfloat16x4_t, svfloat16_t, z1,
+ svmax_single_f16_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svfloat16x4_t, svfloat16_t, z1,
+ svmax_single_f16_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svfloat16x4_t, svfloat16_t, z18,
+ svmax_single_f16_x4 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** fmax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svfloat16x4_t, svfloat16_t,
+ z0_res = svmax_single_f16_x4 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** fmax {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svfloat16x4_t, svfloat16_t,
+ z0 = svmax_single_f16_x4 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmax {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svfloat16x4_t, svfloat16_t, z24,
+ svmax_single_f16_x4 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f32_x2.c
new file mode 100644
index 0000000..d53878a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f32_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** fmax {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svfloat32x2_t, z0,
+ svmax_f32_x2 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** fmax {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svfloat32x2_t, z0,
+ svmax_f32_x2 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z28\.s - z29\.s}
+** |
+** fmax [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svfloat32x2_t, z0,
+ svmax_f32_x2 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** fmax {z18\.s - z19\.s}, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svfloat32x2_t, z18,
+ svmax_f32_x2 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z18, svfloat32x2_t, z23,
+ svmax_f32_x2 (z23, z18),
+ svmax (z23, z18))
+
+/*
+** max_z28_z28_z0:
+** fmax {z28\.s - z29\.s}, {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svfloat32x2_t, z28,
+ svmax_f32_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** fmax {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svfloat32x2_t, z0,
+ svmax_f32_x2 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** |
+** fmax {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svfloat32x2_t, z4,
+ svmax_f32_x2 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** fmax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svfloat32x2_t, svfloat32_t, z24,
+ svmax_single_f32_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** fmax {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svfloat32x2_t, svfloat32_t, z24,
+ svmax_single_f32_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fmax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svfloat32x2_t, svfloat32_t, z24,
+ svmax_single_f32_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** fmax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svfloat32x2_t, svfloat32_t, z1,
+ svmax_single_f32_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fmax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svfloat32x2_t, svfloat32_t, z1,
+ svmax_single_f32_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** fmax {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svfloat32x2_t, svfloat32_t, z18,
+ svmax_single_f32_x2 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** fmax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svfloat32x2_t, svfloat32_t,
+ z0_res = svmax_single_f32_x2 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** fmax {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svfloat32x2_t, svfloat32_t,
+ z0 = svmax_single_f32_x2 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmax {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svfloat32x2_t, svfloat32_t, z24,
+ svmax_single_f32_x2 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f32_x4.c
new file mode 100644
index 0000000..d0a6598
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f32_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** fmax {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svfloat32x4_t, z0,
+ svmax_f32_x4 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** fmax {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svfloat32x4_t, z0,
+ svmax_f32_x4 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z28\.s - z31\.s}
+** |
+** fmax [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svfloat32x4_t, z0,
+ svmax_f32_x4 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svfloat32x4_t, z18,
+ svmax_f32_x4 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z28, svfloat32x4_t, z23,
+ svmax_f32_x4 (z23, z28),
+ svmax (z23, z28))
+
+/*
+** max_z28_z28_z0:
+** fmax {z28\.s - z31\.s}, {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svfloat32x4_t, z28,
+ svmax_f32_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** |
+** fmax {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svfloat32x4_t, z0,
+ svmax_f32_x4 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** |
+** fmax {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svfloat32x4_t, z4,
+ svmax_f32_x4 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** fmax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svfloat32x4_t, svfloat32_t, z24,
+ svmax_single_f32_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** fmax {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svfloat32x4_t, svfloat32_t, z24,
+ svmax_single_f32_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svfloat32x4_t, svfloat32_t, z24,
+ svmax_single_f32_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** fmax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svfloat32x4_t, svfloat32_t, z1,
+ svmax_single_f32_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svfloat32x4_t, svfloat32_t, z1,
+ svmax_single_f32_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svfloat32x4_t, svfloat32_t, z18,
+ svmax_single_f32_x4 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** fmax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svfloat32x4_t, svfloat32_t,
+ z0_res = svmax_single_f32_x4 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** fmax {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svfloat32x4_t, svfloat32_t,
+ z0 = svmax_single_f32_x4 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmax {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svfloat32x4_t, svfloat32_t, z24,
+ svmax_single_f32_x4 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f64_x2.c
new file mode 100644
index 0000000..a51ceac
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f64_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** fmax {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svfloat64x2_t, z0,
+ svmax_f64_x2 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** fmax {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svfloat64x2_t, z0,
+ svmax_f64_x2 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z28\.d - z29\.d}
+** |
+** fmax [^\n]+, {z28\.d - z29\.d}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svfloat64x2_t, z0,
+ svmax_f64_x2 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** fmax {z18\.d - z19\.d}, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svfloat64x2_t, z18,
+ svmax_f64_x2 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z18, svfloat64x2_t, z23,
+ svmax_f64_x2 (z23, z18),
+ svmax (z23, z18))
+
+/*
+** max_z28_z28_z0:
+** fmax {z28\.d - z29\.d}, {z28\.d - z29\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svfloat64x2_t, z28,
+ svmax_f64_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** fmax {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svfloat64x2_t, z0,
+ svmax_f64_x2 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** |
+** fmax {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svfloat64x2_t, z4,
+ svmax_f64_x2 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** fmax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svfloat64x2_t, svfloat64_t, z24,
+ svmax_single_f64_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** fmax {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svfloat64x2_t, svfloat64_t, z24,
+ svmax_single_f64_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fmax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svfloat64x2_t, svfloat64_t, z24,
+ svmax_single_f64_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** fmax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svfloat64x2_t, svfloat64_t, z1,
+ svmax_single_f64_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fmax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svfloat64x2_t, svfloat64_t, z1,
+ svmax_single_f64_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** fmax {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svfloat64x2_t, svfloat64_t, z18,
+ svmax_single_f64_x2 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** fmax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svfloat64x2_t, svfloat64_t,
+ z0_res = svmax_single_f64_x2 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** fmax {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svfloat64x2_t, svfloat64_t,
+ z0 = svmax_single_f64_x2 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmax {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svfloat64x2_t, svfloat64_t, z24,
+ svmax_single_f64_x2 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f64_x4.c
new file mode 100644
index 0000000..712b14e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_f64_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** fmax {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svfloat64x4_t, z0,
+ svmax_f64_x4 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** fmax {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svfloat64x4_t, z0,
+ svmax_f64_x4 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z28\.d - z31\.d}
+** |
+** fmax [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svfloat64x4_t, z0,
+ svmax_f64_x4 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svfloat64x4_t, z18,
+ svmax_f64_x4 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z28, svfloat64x4_t, z23,
+ svmax_f64_x4 (z23, z28),
+ svmax (z23, z28))
+
+/*
+** max_z28_z28_z0:
+** fmax {z28\.d - z31\.d}, {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svfloat64x4_t, z28,
+ svmax_f64_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** |
+** fmax {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svfloat64x4_t, z0,
+ svmax_f64_x4 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** |
+** fmax {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svfloat64x4_t, z4,
+ svmax_f64_x4 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** fmax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svfloat64x4_t, svfloat64_t, z24,
+ svmax_single_f64_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** fmax {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svfloat64x4_t, svfloat64_t, z24,
+ svmax_single_f64_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svfloat64x4_t, svfloat64_t, z24,
+ svmax_single_f64_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** fmax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svfloat64x4_t, svfloat64_t, z1,
+ svmax_single_f64_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svfloat64x4_t, svfloat64_t, z1,
+ svmax_single_f64_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmax [^\n]+, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svfloat64x4_t, svfloat64_t, z18,
+ svmax_single_f64_x4 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** fmax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svfloat64x4_t, svfloat64_t,
+ z0_res = svmax_single_f64_x4 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** fmax {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svfloat64x4_t, svfloat64_t,
+ z0 = svmax_single_f64_x4 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmax {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svfloat64x4_t, svfloat64_t, z24,
+ svmax_single_f64_x4 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s16_x2.c
new file mode 100644
index 0000000..34b4cdd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s16_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** smax {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svint16x2_t, z0,
+ svmax_s16_x2 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** smax {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svint16x2_t, z0,
+ svmax_s16_x2 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.h - z29\.h}
+** |
+** smax [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svint16x2_t, z0,
+ svmax_s16_x2 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** smax {z18\.h - z19\.h}, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svint16x2_t, z18,
+ svmax_s16_x2 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z18, svint16x2_t, z23,
+ svmax_s16_x2 (z23, z18),
+ svmax (z23, z18))
+
+/*
+** max_z28_z28_z0:
+** smax {z28\.h - z29\.h}, {z28\.h - z29\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svint16x2_t, z28,
+ svmax_s16_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** smax {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svint16x2_t, z0,
+ svmax_s16_x2 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** |
+** smax {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svint16x2_t, z4,
+ svmax_s16_x2 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** smax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svint16x2_t, svint16_t, z24,
+ svmax_single_s16_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** smax {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svint16x2_t, svint16_t, z24,
+ svmax_single_s16_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** smax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svint16x2_t, svint16_t, z24,
+ svmax_single_s16_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** smax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svint16x2_t, svint16_t, z1,
+ svmax_single_s16_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** smax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svint16x2_t, svint16_t, z1,
+ svmax_single_s16_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** smax {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svint16x2_t, svint16_t, z18,
+ svmax_single_s16_x2 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** smax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svint16x2_t, svint16_t,
+ z0_res = svmax_single_s16_x2 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** smax {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svint16x2_t, svint16_t,
+ z0 = svmax_single_s16_x2 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smax {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svint16x2_t, svint16_t, z24,
+ svmax_single_s16_x2 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s16_x4.c
new file mode 100644
index 0000000..3b44ec8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s16_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** smax {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svint16x4_t, z0,
+ svmax_s16_x4 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** smax {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svint16x4_t, z0,
+ svmax_s16_x4 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.h - z31\.h}
+** |
+** smax [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svint16x4_t, z0,
+ svmax_s16_x4 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svint16x4_t, z18,
+ svmax_s16_x4 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z28, svint16x4_t, z23,
+ svmax_s16_x4 (z23, z28),
+ svmax (z23, z28))
+
+/*
+** max_z28_z28_z0:
+** smax {z28\.h - z31\.h}, {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svint16x4_t, z28,
+ svmax_s16_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** |
+** smax {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svint16x4_t, z0,
+ svmax_s16_x4 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** |
+** smax {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svint16x4_t, z4,
+ svmax_s16_x4 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** smax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svint16x4_t, svint16_t, z24,
+ svmax_single_s16_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** smax {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svint16x4_t, svint16_t, z24,
+ svmax_single_s16_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svint16x4_t, svint16_t, z24,
+ svmax_single_s16_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** smax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svint16x4_t, svint16_t, z1,
+ svmax_single_s16_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svint16x4_t, svint16_t, z1,
+ svmax_single_s16_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svint16x4_t, svint16_t, z18,
+ svmax_single_s16_x4 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** smax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svint16x4_t, svint16_t,
+ z0_res = svmax_single_s16_x4 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** smax {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svint16x4_t, svint16_t,
+ z0 = svmax_single_s16_x4 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smax {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svint16x4_t, svint16_t, z24,
+ svmax_single_s16_x4 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s32_x2.c
new file mode 100644
index 0000000..bb9fc22
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s32_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** smax {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svint32x2_t, z0,
+ svmax_s32_x2 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** smax {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svint32x2_t, z0,
+ svmax_s32_x2 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.s - z29\.s}
+** |
+** smax [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svint32x2_t, z0,
+ svmax_s32_x2 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** smax {z18\.s - z19\.s}, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svint32x2_t, z18,
+ svmax_s32_x2 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z18, svint32x2_t, z23,
+ svmax_s32_x2 (z23, z18),
+ svmax (z23, z18))
+
+/*
+** max_z28_z28_z0:
+** smax {z28\.s - z29\.s}, {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svint32x2_t, z28,
+ svmax_s32_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** smax {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svint32x2_t, z0,
+ svmax_s32_x2 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** |
+** smax {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svint32x2_t, z4,
+ svmax_s32_x2 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** smax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svint32x2_t, svint32_t, z24,
+ svmax_single_s32_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** smax {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svint32x2_t, svint32_t, z24,
+ svmax_single_s32_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** smax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svint32x2_t, svint32_t, z24,
+ svmax_single_s32_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** smax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svint32x2_t, svint32_t, z1,
+ svmax_single_s32_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** smax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svint32x2_t, svint32_t, z1,
+ svmax_single_s32_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** smax {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svint32x2_t, svint32_t, z18,
+ svmax_single_s32_x2 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** smax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svint32x2_t, svint32_t,
+ z0_res = svmax_single_s32_x2 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** smax {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svint32x2_t, svint32_t,
+ z0 = svmax_single_s32_x2 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smax {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svint32x2_t, svint32_t, z24,
+ svmax_single_s32_x2 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s32_x4.c
new file mode 100644
index 0000000..f3db66a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s32_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** smax {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svint32x4_t, z0,
+ svmax_s32_x4 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** smax {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svint32x4_t, z0,
+ svmax_s32_x4 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.s - z31\.s}
+** |
+** smax [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svint32x4_t, z0,
+ svmax_s32_x4 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svint32x4_t, z18,
+ svmax_s32_x4 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z28, svint32x4_t, z23,
+ svmax_s32_x4 (z23, z28),
+ svmax (z23, z28))
+
+/*
+** max_z28_z28_z0:
+** smax {z28\.s - z31\.s}, {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svint32x4_t, z28,
+ svmax_s32_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** |
+** smax {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svint32x4_t, z0,
+ svmax_s32_x4 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** |
+** smax {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svint32x4_t, z4,
+ svmax_s32_x4 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** smax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svint32x4_t, svint32_t, z24,
+ svmax_single_s32_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** smax {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svint32x4_t, svint32_t, z24,
+ svmax_single_s32_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svint32x4_t, svint32_t, z24,
+ svmax_single_s32_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** smax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svint32x4_t, svint32_t, z1,
+ svmax_single_s32_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svint32x4_t, svint32_t, z1,
+ svmax_single_s32_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svint32x4_t, svint32_t, z18,
+ svmax_single_s32_x4 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** smax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svint32x4_t, svint32_t,
+ z0_res = svmax_single_s32_x4 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** smax {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svint32x4_t, svint32_t,
+ z0 = svmax_single_s32_x4 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smax {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svint32x4_t, svint32_t, z24,
+ svmax_single_s32_x4 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s64_x2.c
new file mode 100644
index 0000000..384dd6e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s64_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** smax {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svint64x2_t, z0,
+ svmax_s64_x2 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** smax {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svint64x2_t, z0,
+ svmax_s64_x2 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.d - z29\.d}
+** |
+** smax [^\n]+, {z28\.d - z29\.d}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svint64x2_t, z0,
+ svmax_s64_x2 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** smax {z18\.d - z19\.d}, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svint64x2_t, z18,
+ svmax_s64_x2 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z18, svint64x2_t, z23,
+ svmax_s64_x2 (z23, z18),
+ svmax (z23, z18))
+
+/*
+** max_z28_z28_z0:
+** smax {z28\.d - z29\.d}, {z28\.d - z29\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svint64x2_t, z28,
+ svmax_s64_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** smax {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svint64x2_t, z0,
+ svmax_s64_x2 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** |
+** smax {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svint64x2_t, z4,
+ svmax_s64_x2 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** smax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svint64x2_t, svint64_t, z24,
+ svmax_single_s64_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** smax {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svint64x2_t, svint64_t, z24,
+ svmax_single_s64_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** smax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svint64x2_t, svint64_t, z24,
+ svmax_single_s64_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** smax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svint64x2_t, svint64_t, z1,
+ svmax_single_s64_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** smax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svint64x2_t, svint64_t, z1,
+ svmax_single_s64_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** smax {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svint64x2_t, svint64_t, z18,
+ svmax_single_s64_x2 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** smax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svint64x2_t, svint64_t,
+ z0_res = svmax_single_s64_x2 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** smax {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svint64x2_t, svint64_t,
+ z0 = svmax_single_s64_x2 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smax {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svint64x2_t, svint64_t, z24,
+ svmax_single_s64_x2 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s64_x4.c
new file mode 100644
index 0000000..666c79b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s64_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** smax {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svint64x4_t, z0,
+ svmax_s64_x4 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** smax {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svint64x4_t, z0,
+ svmax_s64_x4 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.d - z31\.d}
+** |
+** smax [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svint64x4_t, z0,
+ svmax_s64_x4 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svint64x4_t, z18,
+ svmax_s64_x4 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z28, svint64x4_t, z23,
+ svmax_s64_x4 (z23, z28),
+ svmax (z23, z28))
+
+/*
+** max_z28_z28_z0:
+** smax {z28\.d - z31\.d}, {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svint64x4_t, z28,
+ svmax_s64_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** |
+** smax {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svint64x4_t, z0,
+ svmax_s64_x4 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** |
+** smax {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svint64x4_t, z4,
+ svmax_s64_x4 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** smax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svint64x4_t, svint64_t, z24,
+ svmax_single_s64_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** smax {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svint64x4_t, svint64_t, z24,
+ svmax_single_s64_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svint64x4_t, svint64_t, z24,
+ svmax_single_s64_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** smax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svint64x4_t, svint64_t, z1,
+ svmax_single_s64_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svint64x4_t, svint64_t, z1,
+ svmax_single_s64_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svint64x4_t, svint64_t, z18,
+ svmax_single_s64_x4 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** smax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svint64x4_t, svint64_t,
+ z0_res = svmax_single_s64_x4 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** smax {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svint64x4_t, svint64_t,
+ z0 = svmax_single_s64_x4 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smax {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svint64x4_t, svint64_t, z24,
+ svmax_single_s64_x4 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s8_x2.c
new file mode 100644
index 0000000..76144b8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s8_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** smax {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svint8x2_t, z0,
+ svmax_s8_x2 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** smax {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svint8x2_t, z0,
+ svmax_s8_x2 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.b - z29\.b}
+** |
+** smax [^\n]+, {z28\.b - z29\.b}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svint8x2_t, z0,
+ svmax_s8_x2 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** smax {z18\.b - z19\.b}, {z18\.b - z19\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svint8x2_t, z18,
+ svmax_s8_x2 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z18\.b - z19\.b}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z18, svint8x2_t, z23,
+ svmax_s8_x2 (z23, z18),
+ svmax (z23, z18))
+
+/*
+** max_z28_z28_z0:
+** smax {z28\.b - z29\.b}, {z28\.b - z29\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svint8x2_t, z28,
+ svmax_s8_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** smax {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svint8x2_t, z0,
+ svmax_s8_x2 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** |
+** smax {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svint8x2_t, z4,
+ svmax_s8_x2 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** smax {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svint8x2_t, svint8_t, z24,
+ svmax_single_s8_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** |
+** smax {z28\.b - z29\.b}, {z28\.b - z29\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svint8x2_t, svint8_t, z24,
+ svmax_single_s8_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** smax {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svint8x2_t, svint8_t, z24,
+ svmax_single_s8_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** smax {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svint8x2_t, svint8_t, z1,
+ svmax_single_s8_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** smax ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svint8x2_t, svint8_t, z1,
+ svmax_single_s8_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** smax {z18\.b - z19\.b}, {z18\.b - z19\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svint8x2_t, svint8_t, z18,
+ svmax_single_s8_x2 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** smax ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svint8x2_t, svint8_t,
+ z0_res = svmax_single_s8_x2 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** smax {z0\.b - z1\.b}, {z0\.b - z1\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svint8x2_t, svint8_t,
+ z0 = svmax_single_s8_x2 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smax {z24\.b - z25\.b}, {z24\.b - z25\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svint8x2_t, svint8_t, z24,
+ svmax_single_s8_x2 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s8_x4.c
new file mode 100644
index 0000000..f15f3a7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_s8_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** smax {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svint8x4_t, z0,
+ svmax_s8_x4 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** smax {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svint8x4_t, z0,
+ svmax_s8_x4 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.b - z31\.b}
+** |
+** smax [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svint8x4_t, z0,
+ svmax_s8_x4 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z4\.b - z7\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svint8x4_t, z18,
+ svmax_s8_x4 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z28, svint8x4_t, z23,
+ svmax_s8_x4 (z23, z28),
+ svmax (z23, z28))
+
+/*
+** max_z28_z28_z0:
+** smax {z28\.b - z31\.b}, {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svint8x4_t, z28,
+ svmax_s8_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** |
+** smax {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svint8x4_t, z0,
+ svmax_s8_x4 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** |
+** smax {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svint8x4_t, z4,
+ svmax_s8_x4 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** smax {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svint8x4_t, svint8_t, z24,
+ svmax_single_s8_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** |
+** smax {z28\.b - z31\.b}, {z28\.b - z31\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svint8x4_t, svint8_t, z24,
+ svmax_single_s8_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svint8x4_t, svint8_t, z24,
+ svmax_single_s8_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** smax {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svint8x4_t, svint8_t, z1,
+ svmax_single_s8_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svint8x4_t, svint8_t, z1,
+ svmax_single_s8_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smax [^\n]+, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svint8x4_t, svint8_t, z18,
+ svmax_single_s8_x4 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** smax ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svint8x4_t, svint8_t,
+ z0_res = svmax_single_s8_x4 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** smax {z0\.b - z3\.b}, {z0\.b - z3\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svint8x4_t, svint8_t,
+ z0 = svmax_single_s8_x4 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smax {z24\.b - z27\.b}, {z24\.b - z27\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svint8x4_t, svint8_t, z24,
+ svmax_single_s8_x4 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u16_x2.c
new file mode 100644
index 0000000..cf9d862
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u16_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** umax {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svuint16x2_t, z0,
+ svmax_u16_x2 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** umax {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svuint16x2_t, z0,
+ svmax_u16_x2 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.h - z29\.h}
+** |
+** umax [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svuint16x2_t, z0,
+ svmax_u16_x2 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** umax {z18\.h - z19\.h}, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svuint16x2_t, z18,
+ svmax_u16_x2 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z18, svuint16x2_t, z23,
+ svmax_u16_x2 (z23, z18),
+ svmax (z23, z18))
+
+/*
+** max_z28_z28_z0:
+** umax {z28\.h - z29\.h}, {z28\.h - z29\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svuint16x2_t, z28,
+ svmax_u16_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** umax {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svuint16x2_t, z0,
+ svmax_u16_x2 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** |
+** umax {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svuint16x2_t, z4,
+ svmax_u16_x2 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** umax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svuint16x2_t, svuint16_t, z24,
+ svmax_single_u16_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** umax {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svuint16x2_t, svuint16_t, z24,
+ svmax_single_u16_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** umax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svuint16x2_t, svuint16_t, z24,
+ svmax_single_u16_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** umax {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svuint16x2_t, svuint16_t, z1,
+ svmax_single_u16_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** umax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svuint16x2_t, svuint16_t, z1,
+ svmax_single_u16_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** umax {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svuint16x2_t, svuint16_t, z18,
+ svmax_single_u16_x2 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** umax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svuint16x2_t, svuint16_t,
+ z0_res = svmax_single_u16_x2 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** umax {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svuint16x2_t, svuint16_t,
+ z0 = svmax_single_u16_x2 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umax {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svuint16x2_t, svuint16_t, z24,
+ svmax_single_u16_x2 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u16_x4.c
new file mode 100644
index 0000000..b23738b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u16_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** umax {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svuint16x4_t, z0,
+ svmax_u16_x4 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** umax {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svuint16x4_t, z0,
+ svmax_u16_x4 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.h - z31\.h}
+** |
+** umax [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svuint16x4_t, z0,
+ svmax_u16_x4 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svuint16x4_t, z18,
+ svmax_u16_x4 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z28, svuint16x4_t, z23,
+ svmax_u16_x4 (z23, z28),
+ svmax (z23, z28))
+
+/*
+** max_z28_z28_z0:
+** umax {z28\.h - z31\.h}, {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svuint16x4_t, z28,
+ svmax_u16_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** |
+** umax {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svuint16x4_t, z0,
+ svmax_u16_x4 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** |
+** umax {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svuint16x4_t, z4,
+ svmax_u16_x4 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** umax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svuint16x4_t, svuint16_t, z24,
+ svmax_single_u16_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** umax {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svuint16x4_t, svuint16_t, z24,
+ svmax_single_u16_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svuint16x4_t, svuint16_t, z24,
+ svmax_single_u16_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** umax {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svuint16x4_t, svuint16_t, z1,
+ svmax_single_u16_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svuint16x4_t, svuint16_t, z1,
+ svmax_single_u16_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svuint16x4_t, svuint16_t, z18,
+ svmax_single_u16_x4 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** umax ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svuint16x4_t, svuint16_t,
+ z0_res = svmax_single_u16_x4 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** umax {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svuint16x4_t, svuint16_t,
+ z0 = svmax_single_u16_x4 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umax {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svuint16x4_t, svuint16_t, z24,
+ svmax_single_u16_x4 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u32_x2.c
new file mode 100644
index 0000000..fff3175
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u32_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** umax {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svuint32x2_t, z0,
+ svmax_u32_x2 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** umax {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svuint32x2_t, z0,
+ svmax_u32_x2 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.s - z29\.s}
+** |
+** umax [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svuint32x2_t, z0,
+ svmax_u32_x2 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** umax {z18\.s - z19\.s}, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svuint32x2_t, z18,
+ svmax_u32_x2 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z18, svuint32x2_t, z23,
+ svmax_u32_x2 (z23, z18),
+ svmax (z23, z18))
+
+/*
+** max_z28_z28_z0:
+** umax {z28\.s - z29\.s}, {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svuint32x2_t, z28,
+ svmax_u32_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** umax {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svuint32x2_t, z0,
+ svmax_u32_x2 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** |
+** umax {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svuint32x2_t, z4,
+ svmax_u32_x2 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** umax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svuint32x2_t, svuint32_t, z24,
+ svmax_single_u32_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** umax {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svuint32x2_t, svuint32_t, z24,
+ svmax_single_u32_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** umax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svuint32x2_t, svuint32_t, z24,
+ svmax_single_u32_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** umax {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svuint32x2_t, svuint32_t, z1,
+ svmax_single_u32_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** umax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svuint32x2_t, svuint32_t, z1,
+ svmax_single_u32_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** umax {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svuint32x2_t, svuint32_t, z18,
+ svmax_single_u32_x2 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** umax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svuint32x2_t, svuint32_t,
+ z0_res = svmax_single_u32_x2 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** umax {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svuint32x2_t, svuint32_t,
+ z0 = svmax_single_u32_x2 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umax {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svuint32x2_t, svuint32_t, z24,
+ svmax_single_u32_x2 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u32_x4.c
new file mode 100644
index 0000000..e5b32a2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u32_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** umax {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svuint32x4_t, z0,
+ svmax_u32_x4 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** umax {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svuint32x4_t, z0,
+ svmax_u32_x4 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.s - z31\.s}
+** |
+** umax [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svuint32x4_t, z0,
+ svmax_u32_x4 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svuint32x4_t, z18,
+ svmax_u32_x4 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z28, svuint32x4_t, z23,
+ svmax_u32_x4 (z23, z28),
+ svmax (z23, z28))
+
+/*
+** max_z28_z28_z0:
+** umax {z28\.s - z31\.s}, {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svuint32x4_t, z28,
+ svmax_u32_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** |
+** umax {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svuint32x4_t, z0,
+ svmax_u32_x4 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** |
+** umax {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svuint32x4_t, z4,
+ svmax_u32_x4 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** umax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svuint32x4_t, svuint32_t, z24,
+ svmax_single_u32_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** umax {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svuint32x4_t, svuint32_t, z24,
+ svmax_single_u32_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svuint32x4_t, svuint32_t, z24,
+ svmax_single_u32_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** umax {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svuint32x4_t, svuint32_t, z1,
+ svmax_single_u32_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svuint32x4_t, svuint32_t, z1,
+ svmax_single_u32_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svuint32x4_t, svuint32_t, z18,
+ svmax_single_u32_x4 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** umax ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svuint32x4_t, svuint32_t,
+ z0_res = svmax_single_u32_x4 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** umax {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svuint32x4_t, svuint32_t,
+ z0 = svmax_single_u32_x4 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umax {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svuint32x4_t, svuint32_t, z24,
+ svmax_single_u32_x4 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u64_x2.c
new file mode 100644
index 0000000..12a4576
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u64_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** umax {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svuint64x2_t, z0,
+ svmax_u64_x2 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** umax {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svuint64x2_t, z0,
+ svmax_u64_x2 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.d - z29\.d}
+** |
+** umax [^\n]+, {z28\.d - z29\.d}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svuint64x2_t, z0,
+ svmax_u64_x2 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** umax {z18\.d - z19\.d}, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svuint64x2_t, z18,
+ svmax_u64_x2 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z18, svuint64x2_t, z23,
+ svmax_u64_x2 (z23, z18),
+ svmax (z23, z18))
+
+/*
+** max_z28_z28_z0:
+** umax {z28\.d - z29\.d}, {z28\.d - z29\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svuint64x2_t, z28,
+ svmax_u64_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** umax {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svuint64x2_t, z0,
+ svmax_u64_x2 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** |
+** umax {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svuint64x2_t, z4,
+ svmax_u64_x2 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** umax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svuint64x2_t, svuint64_t, z24,
+ svmax_single_u64_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** umax {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svuint64x2_t, svuint64_t, z24,
+ svmax_single_u64_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** umax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svuint64x2_t, svuint64_t, z24,
+ svmax_single_u64_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** umax {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svuint64x2_t, svuint64_t, z1,
+ svmax_single_u64_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** umax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svuint64x2_t, svuint64_t, z1,
+ svmax_single_u64_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** umax {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svuint64x2_t, svuint64_t, z18,
+ svmax_single_u64_x2 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** umax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svuint64x2_t, svuint64_t,
+ z0_res = svmax_single_u64_x2 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** umax {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svuint64x2_t, svuint64_t,
+ z0 = svmax_single_u64_x2 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umax {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svuint64x2_t, svuint64_t, z24,
+ svmax_single_u64_x2 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u64_x4.c
new file mode 100644
index 0000000..1f100cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u64_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** umax {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svuint64x4_t, z0,
+ svmax_u64_x4 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** umax {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svuint64x4_t, z0,
+ svmax_u64_x4 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.d - z31\.d}
+** |
+** umax [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svuint64x4_t, z0,
+ svmax_u64_x4 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svuint64x4_t, z18,
+ svmax_u64_x4 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z28, svuint64x4_t, z23,
+ svmax_u64_x4 (z23, z28),
+ svmax (z23, z28))
+
+/*
+** max_z28_z28_z0:
+** umax {z28\.d - z31\.d}, {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svuint64x4_t, z28,
+ svmax_u64_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** |
+** umax {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svuint64x4_t, z0,
+ svmax_u64_x4 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** |
+** umax {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svuint64x4_t, z4,
+ svmax_u64_x4 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** umax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svuint64x4_t, svuint64_t, z24,
+ svmax_single_u64_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** umax {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svuint64x4_t, svuint64_t, z24,
+ svmax_single_u64_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svuint64x4_t, svuint64_t, z24,
+ svmax_single_u64_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** umax {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svuint64x4_t, svuint64_t, z1,
+ svmax_single_u64_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svuint64x4_t, svuint64_t, z1,
+ svmax_single_u64_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svuint64x4_t, svuint64_t, z18,
+ svmax_single_u64_x4 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** umax ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svuint64x4_t, svuint64_t,
+ z0_res = svmax_single_u64_x4 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** umax {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svuint64x4_t, svuint64_t,
+ z0 = svmax_single_u64_x4 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umax {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svuint64x4_t, svuint64_t, z24,
+ svmax_single_u64_x4 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u8_x2.c
new file mode 100644
index 0000000..836985a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u8_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** umax {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svuint8x2_t, z0,
+ svmax_u8_x2 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** umax {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svuint8x2_t, z0,
+ svmax_u8_x2 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.b - z29\.b}
+** |
+** umax [^\n]+, {z28\.b - z29\.b}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svuint8x2_t, z0,
+ svmax_u8_x2 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** umax {z18\.b - z19\.b}, {z18\.b - z19\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svuint8x2_t, z18,
+ svmax_u8_x2 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z18\.b - z19\.b}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z18, svuint8x2_t, z23,
+ svmax_u8_x2 (z23, z18),
+ svmax (z23, z18))
+
+/*
+** max_z28_z28_z0:
+** umax {z28\.b - z29\.b}, {z28\.b - z29\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svuint8x2_t, z28,
+ svmax_u8_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** umax {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svuint8x2_t, z0,
+ svmax_u8_x2 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** |
+** umax {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svuint8x2_t, z4,
+ svmax_u8_x2 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** umax {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svuint8x2_t, svuint8_t, z24,
+ svmax_single_u8_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** |
+** umax {z28\.b - z29\.b}, {z28\.b - z29\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svuint8x2_t, svuint8_t, z24,
+ svmax_single_u8_x2 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** umax {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svuint8x2_t, svuint8_t, z24,
+ svmax_single_u8_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** umax {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svuint8x2_t, svuint8_t, z1,
+ svmax_single_u8_x2 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** umax ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svuint8x2_t, svuint8_t, z1,
+ svmax_single_u8_x2 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** umax {z18\.b - z19\.b}, {z18\.b - z19\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svuint8x2_t, svuint8_t, z18,
+ svmax_single_u8_x2 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** umax ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svuint8x2_t, svuint8_t,
+ z0_res = svmax_single_u8_x2 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** umax {z0\.b - z1\.b}, {z0\.b - z1\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svuint8x2_t, svuint8_t,
+ z0 = svmax_single_u8_x2 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umax {z24\.b - z25\.b}, {z24\.b - z25\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svuint8x2_t, svuint8_t, z24,
+ svmax_single_u8_x2 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u8_x4.c
new file mode 100644
index 0000000..f61a762
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/max_u8_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** max_z0_z0_z4:
+** umax {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (max_z0_z0_z4, svuint8x4_t, z0,
+ svmax_u8_x4 (z0, z4),
+ svmax (z0, z4))
+
+/*
+** max_z0_z4_z0:
+** umax {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (max_z0_z4_z0, svuint8x4_t, z0,
+ svmax_u8_x4 (z4, z0),
+ svmax (z4, z0))
+
+/*
+** max_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.b - z31\.b}
+** |
+** umax [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z4_z28, svuint8x4_t, z0,
+ svmax_u8_x4 (z4, z28),
+ svmax (z4, z28))
+
+/*
+** max_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z4\.b - z7\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z18_z18_z4, svuint8x4_t, z18,
+ svmax_u8_x4 (z18, z4),
+ svmax (z18, z4))
+
+/*
+** max_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (max_z23_z23_z28, svuint8x4_t, z23,
+ svmax_u8_x4 (z23, z28),
+ svmax (z23, z28))
+
+/*
+** max_z28_z28_z0:
+** umax {z28\.b - z31\.b}, {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (max_z28_z28_z0, svuint8x4_t, z28,
+ svmax_u8_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** |
+** umax {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z0_z0_z18, svuint8x4_t, z0,
+ svmax_u8_x4 (z0, z18),
+ svmax (z0, z18))
+
+/*
+** max_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** |
+** umax {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (max_z4_z4_z23, svuint8x4_t, z4,
+ svmax_u8_x4 (z4, z23),
+ svmax (z4, z23))
+
+/*
+** max_single_z24_z24_z0:
+** umax {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z0, svuint8x4_t, svuint8_t, z24,
+ svmax_single_u8_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** |
+** umax {z28\.b - z31\.b}, {z28\.b - z31\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z28_z0, svuint8x4_t, svuint8_t, z24,
+ svmax_single_u8_x4 (z28, z0),
+ svmax (z28, z0))
+
+/*
+** max_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z1_z0, svuint8x4_t, svuint8_t, z24,
+ svmax_single_u8_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z1_z24_z0:
+** umax {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z24_z0, svuint8x4_t, svuint8_t, z1,
+ svmax_single_u8_x4 (z24, z0),
+ svmax (z24, z0))
+
+/*
+** max_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z1_z1_z0, svuint8x4_t, svuint8_t, z1,
+ svmax_single_u8_x4 (z1, z0),
+ svmax (z1, z0))
+
+/*
+** max_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umax [^\n]+, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (max_single_z18_z18_z0, svuint8x4_t, svuint8_t, z18,
+ svmax_single_u8_x4 (z18, z0),
+ svmax (z18, z0))
+
+/*
+** max_single_awkward:
+** ...
+** umax ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (max_single_awkward, svuint8x4_t, svuint8_t,
+ z0_res = svmax_single_u8_x4 (z1, z0),
+ z0_res = svmax (z1, z0))
+
+/*
+** max_single_z0_z0_z15:
+** ...
+** umax {z0\.b - z3\.b}, {z0\.b - z3\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (max_single_z0_z0_z15, svuint8x4_t, svuint8_t,
+ z0 = svmax_single_u8_x4 (z0, z15),
+ z0 = svmax (z0, z15))
+
+/*
+** max_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umax {z24\.b - z27\.b}, {z24\.b - z27\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (max_single_z24_z24_z16, svuint8x4_t, svuint8_t, z24,
+ svmax_single_u8_x4 (z24, z16),
+ svmax (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f16_x2.c
new file mode 100644
index 0000000..5b962fb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f16_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** maxnm_z0_z0_z4:
+** fmaxnm {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z4, svfloat16x2_t, z0,
+ svmaxnm_f16_x2 (z0, z4),
+ svmaxnm (z0, z4))
+
+/*
+** maxnm_z0_z4_z0:
+** fmaxnm {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z0, svfloat16x2_t, z0,
+ svmaxnm_f16_x2 (z4, z0),
+ svmaxnm (z4, z0))
+
+/*
+** maxnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z28\.h - z29\.h}
+** |
+** fmaxnm [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z28, svfloat16x2_t, z0,
+ svmaxnm_f16_x2 (z4, z28),
+ svmaxnm (z4, z28))
+
+/*
+** maxnm_z18_z18_z4:
+** fmaxnm {z18\.h - z19\.h}, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (maxnm_z18_z18_z4, svfloat16x2_t, z18,
+ svmaxnm_f16_x2 (z18, z4),
+ svmaxnm (z18, z4))
+
+/*
+** maxnm_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (maxnm_z23_z23_z18, svfloat16x2_t, z23,
+ svmaxnm_f16_x2 (z23, z18),
+ svmaxnm (z23, z18))
+
+/*
+** maxnm_z28_z28_z0:
+** fmaxnm {z28\.h - z29\.h}, {z28\.h - z29\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (maxnm_z28_z28_z0, svfloat16x2_t, z28,
+ svmaxnm_f16_x2 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_z0_z0_z18:
+** fmaxnm {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z18, svfloat16x2_t, z0,
+ svmaxnm_f16_x2 (z0, z18),
+ svmaxnm (z0, z18))
+
+/*
+** maxnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** |
+** fmaxnm {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z4_z4_z23, svfloat16x2_t, z4,
+ svmaxnm_f16_x2 (z4, z23),
+ svmaxnm (z4, z23))
+
+/*
+** maxnm_single_z24_z24_z0:
+** fmaxnm {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z0, svfloat16x2_t, svfloat16_t, z24,
+ svmaxnm_single_f16_x2 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** fmaxnm {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z28_z0, svfloat16x2_t, svfloat16_t, z24,
+ svmaxnm_single_f16_x2 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fmaxnm {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z1_z0, svfloat16x2_t, svfloat16_t, z24,
+ svmaxnm_single_f16_x2 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z1_z24_z0:
+** fmaxnm {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z24_z0, svfloat16x2_t, svfloat16_t, z1,
+ svmaxnm_single_f16_x2 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z1_z0, svfloat16x2_t, svfloat16_t, z1,
+ svmaxnm_single_f16_x2 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z18_z18_z0:
+** fmaxnm {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z18_z18_z0, svfloat16x2_t, svfloat16_t, z18,
+ svmaxnm_single_f16_x2 (z18, z0),
+ svmaxnm (z18, z0))
+
+/*
+** maxnm_single_awkward:
+** ...
+** fmaxnm ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (maxnm_single_awkward, svfloat16x2_t, svfloat16_t,
+ z0_res = svmaxnm_single_f16_x2 (z1, z0),
+ z0_res = svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z0_z0_z15:
+** ...
+** fmaxnm {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (maxnm_single_z0_z0_z15, svfloat16x2_t, svfloat16_t,
+ z0 = svmaxnm_single_f16_x2 (z0, z15),
+ z0 = svmaxnm (z0, z15))
+
+/*
+** maxnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmaxnm {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z16, svfloat16x2_t, svfloat16_t, z24,
+ svmaxnm_single_f16_x2 (z24, z16),
+ svmaxnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f16_x4.c
new file mode 100644
index 0000000..9012280
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f16_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** maxnm_z0_z0_z4:
+** fmaxnm {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z4, svfloat16x4_t, z0,
+ svmaxnm_f16_x4 (z0, z4),
+ svmaxnm (z0, z4))
+
+/*
+** maxnm_z0_z4_z0:
+** fmaxnm {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z0, svfloat16x4_t, z0,
+ svmaxnm_f16_x4 (z4, z0),
+ svmaxnm (z4, z0))
+
+/*
+** maxnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z28\.h - z31\.h}
+** |
+** fmaxnm [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z28, svfloat16x4_t, z0,
+ svmaxnm_f16_x4 (z4, z28),
+ svmaxnm (z4, z28))
+
+/*
+** maxnm_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (maxnm_z18_z18_z4, svfloat16x4_t, z18,
+ svmaxnm_f16_x4 (z18, z4),
+ svmaxnm (z18, z4))
+
+/*
+** maxnm_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (maxnm_z23_z23_z28, svfloat16x4_t, z23,
+ svmaxnm_f16_x4 (z23, z28),
+ svmaxnm (z23, z28))
+
+/*
+** maxnm_z28_z28_z0:
+** fmaxnm {z28\.h - z31\.h}, {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (maxnm_z28_z28_z0, svfloat16x4_t, z28,
+ svmaxnm_f16_x4 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** |
+** fmaxnm {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z18, svfloat16x4_t, z0,
+ svmaxnm_f16_x4 (z0, z18),
+ svmaxnm (z0, z18))
+
+/*
+** maxnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** |
+** fmaxnm {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z4_z4_z23, svfloat16x4_t, z4,
+ svmaxnm_f16_x4 (z4, z23),
+ svmaxnm (z4, z23))
+
+/*
+** maxnm_single_z24_z24_z0:
+** fmaxnm {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z0, svfloat16x4_t, svfloat16_t, z24,
+ svmaxnm_single_f16_x4 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** fmaxnm {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z28_z0, svfloat16x4_t, svfloat16_t, z24,
+ svmaxnm_single_f16_x4 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z1_z0, svfloat16x4_t, svfloat16_t, z24,
+ svmaxnm_single_f16_x4 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z1_z24_z0:
+** fmaxnm {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z24_z0, svfloat16x4_t, svfloat16_t, z1,
+ svmaxnm_single_f16_x4 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z1_z0, svfloat16x4_t, svfloat16_t, z1,
+ svmaxnm_single_f16_x4 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z18_z18_z0, svfloat16x4_t, svfloat16_t, z18,
+ svmaxnm_single_f16_x4 (z18, z0),
+ svmaxnm (z18, z0))
+
+/*
+** maxnm_single_awkward:
+** ...
+** fmaxnm ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (maxnm_single_awkward, svfloat16x4_t, svfloat16_t,
+ z0_res = svmaxnm_single_f16_x4 (z1, z0),
+ z0_res = svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z0_z0_z15:
+** ...
+** fmaxnm {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (maxnm_single_z0_z0_z15, svfloat16x4_t, svfloat16_t,
+ z0 = svmaxnm_single_f16_x4 (z0, z15),
+ z0 = svmaxnm (z0, z15))
+
+/*
+** maxnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmaxnm {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z16, svfloat16x4_t, svfloat16_t, z24,
+ svmaxnm_single_f16_x4 (z24, z16),
+ svmaxnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f32_x2.c
new file mode 100644
index 0000000..729729e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f32_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** maxnm_z0_z0_z4:
+** fmaxnm {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z4, svfloat32x2_t, z0,
+ svmaxnm_f32_x2 (z0, z4),
+ svmaxnm (z0, z4))
+
+/*
+** maxnm_z0_z4_z0:
+** fmaxnm {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z0, svfloat32x2_t, z0,
+ svmaxnm_f32_x2 (z4, z0),
+ svmaxnm (z4, z0))
+
+/*
+** maxnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z28\.s - z29\.s}
+** |
+** fmaxnm [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z28, svfloat32x2_t, z0,
+ svmaxnm_f32_x2 (z4, z28),
+ svmaxnm (z4, z28))
+
+/*
+** maxnm_z18_z18_z4:
+** fmaxnm {z18\.s - z19\.s}, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (maxnm_z18_z18_z4, svfloat32x2_t, z18,
+ svmaxnm_f32_x2 (z18, z4),
+ svmaxnm (z18, z4))
+
+/*
+** maxnm_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (maxnm_z23_z23_z18, svfloat32x2_t, z23,
+ svmaxnm_f32_x2 (z23, z18),
+ svmaxnm (z23, z18))
+
+/*
+** maxnm_z28_z28_z0:
+** fmaxnm {z28\.s - z29\.s}, {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (maxnm_z28_z28_z0, svfloat32x2_t, z28,
+ svmaxnm_f32_x2 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_z0_z0_z18:
+** fmaxnm {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z18, svfloat32x2_t, z0,
+ svmaxnm_f32_x2 (z0, z18),
+ svmaxnm (z0, z18))
+
+/*
+** maxnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** |
+** fmaxnm {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z4_z4_z23, svfloat32x2_t, z4,
+ svmaxnm_f32_x2 (z4, z23),
+ svmaxnm (z4, z23))
+
+/*
+** maxnm_single_z24_z24_z0:
+** fmaxnm {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z0, svfloat32x2_t, svfloat32_t, z24,
+ svmaxnm_single_f32_x2 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** fmaxnm {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z28_z0, svfloat32x2_t, svfloat32_t, z24,
+ svmaxnm_single_f32_x2 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fmaxnm {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z1_z0, svfloat32x2_t, svfloat32_t, z24,
+ svmaxnm_single_f32_x2 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z1_z24_z0:
+** fmaxnm {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z24_z0, svfloat32x2_t, svfloat32_t, z1,
+ svmaxnm_single_f32_x2 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z1_z0, svfloat32x2_t, svfloat32_t, z1,
+ svmaxnm_single_f32_x2 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z18_z18_z0:
+** fmaxnm {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z18_z18_z0, svfloat32x2_t, svfloat32_t, z18,
+ svmaxnm_single_f32_x2 (z18, z0),
+ svmaxnm (z18, z0))
+
+/*
+** maxnm_single_awkward:
+** ...
+** fmaxnm ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (maxnm_single_awkward, svfloat32x2_t, svfloat32_t,
+ z0_res = svmaxnm_single_f32_x2 (z1, z0),
+ z0_res = svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z0_z0_z15:
+** ...
+** fmaxnm {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (maxnm_single_z0_z0_z15, svfloat32x2_t, svfloat32_t,
+ z0 = svmaxnm_single_f32_x2 (z0, z15),
+ z0 = svmaxnm (z0, z15))
+
+/*
+** maxnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmaxnm {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z16, svfloat32x2_t, svfloat32_t, z24,
+ svmaxnm_single_f32_x2 (z24, z16),
+ svmaxnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f32_x4.c
new file mode 100644
index 0000000..dda0b1c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f32_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** maxnm_z0_z0_z4:
+** fmaxnm {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z4, svfloat32x4_t, z0,
+ svmaxnm_f32_x4 (z0, z4),
+ svmaxnm (z0, z4))
+
+/*
+** maxnm_z0_z4_z0:
+** fmaxnm {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z0, svfloat32x4_t, z0,
+ svmaxnm_f32_x4 (z4, z0),
+ svmaxnm (z4, z0))
+
+/*
+** maxnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z28\.s - z31\.s}
+** |
+** fmaxnm [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z28, svfloat32x4_t, z0,
+ svmaxnm_f32_x4 (z4, z28),
+ svmaxnm (z4, z28))
+
+/*
+** maxnm_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (maxnm_z18_z18_z4, svfloat32x4_t, z18,
+ svmaxnm_f32_x4 (z18, z4),
+ svmaxnm (z18, z4))
+
+/*
+** maxnm_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (maxnm_z23_z23_z28, svfloat32x4_t, z23,
+ svmaxnm_f32_x4 (z23, z28),
+ svmaxnm (z23, z28))
+
+/*
+** maxnm_z28_z28_z0:
+** fmaxnm {z28\.s - z31\.s}, {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (maxnm_z28_z28_z0, svfloat32x4_t, z28,
+ svmaxnm_f32_x4 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** |
+** fmaxnm {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z18, svfloat32x4_t, z0,
+ svmaxnm_f32_x4 (z0, z18),
+ svmaxnm (z0, z18))
+
+/*
+** maxnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** |
+** fmaxnm {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z4_z4_z23, svfloat32x4_t, z4,
+ svmaxnm_f32_x4 (z4, z23),
+ svmaxnm (z4, z23))
+
+/*
+** maxnm_single_z24_z24_z0:
+** fmaxnm {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z0, svfloat32x4_t, svfloat32_t, z24,
+ svmaxnm_single_f32_x4 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** fmaxnm {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z28_z0, svfloat32x4_t, svfloat32_t, z24,
+ svmaxnm_single_f32_x4 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z1_z0, svfloat32x4_t, svfloat32_t, z24,
+ svmaxnm_single_f32_x4 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z1_z24_z0:
+** fmaxnm {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z24_z0, svfloat32x4_t, svfloat32_t, z1,
+ svmaxnm_single_f32_x4 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z1_z0, svfloat32x4_t, svfloat32_t, z1,
+ svmaxnm_single_f32_x4 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z18_z18_z0, svfloat32x4_t, svfloat32_t, z18,
+ svmaxnm_single_f32_x4 (z18, z0),
+ svmaxnm (z18, z0))
+
+/*
+** maxnm_single_awkward:
+** ...
+** fmaxnm ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (maxnm_single_awkward, svfloat32x4_t, svfloat32_t,
+ z0_res = svmaxnm_single_f32_x4 (z1, z0),
+ z0_res = svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z0_z0_z15:
+** ...
+** fmaxnm {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (maxnm_single_z0_z0_z15, svfloat32x4_t, svfloat32_t,
+ z0 = svmaxnm_single_f32_x4 (z0, z15),
+ z0 = svmaxnm (z0, z15))
+
+/*
+** maxnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmaxnm {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z16, svfloat32x4_t, svfloat32_t, z24,
+ svmaxnm_single_f32_x4 (z24, z16),
+ svmaxnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f64_x2.c
new file mode 100644
index 0000000..cbffc43
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f64_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** maxnm_z0_z0_z4:
+** fmaxnm {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z4, svfloat64x2_t, z0,
+ svmaxnm_f64_x2 (z0, z4),
+ svmaxnm (z0, z4))
+
+/*
+** maxnm_z0_z4_z0:
+** fmaxnm {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z0, svfloat64x2_t, z0,
+ svmaxnm_f64_x2 (z4, z0),
+ svmaxnm (z4, z0))
+
+/*
+** maxnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z28\.d - z29\.d}
+** |
+** fmaxnm [^\n]+, {z28\.d - z29\.d}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z28, svfloat64x2_t, z0,
+ svmaxnm_f64_x2 (z4, z28),
+ svmaxnm (z4, z28))
+
+/*
+** maxnm_z18_z18_z4:
+** fmaxnm {z18\.d - z19\.d}, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (maxnm_z18_z18_z4, svfloat64x2_t, z18,
+ svmaxnm_f64_x2 (z18, z4),
+ svmaxnm (z18, z4))
+
+/*
+** maxnm_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (maxnm_z23_z23_z18, svfloat64x2_t, z23,
+ svmaxnm_f64_x2 (z23, z18),
+ svmaxnm (z23, z18))
+
+/*
+** maxnm_z28_z28_z0:
+** fmaxnm {z28\.d - z29\.d}, {z28\.d - z29\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (maxnm_z28_z28_z0, svfloat64x2_t, z28,
+ svmaxnm_f64_x2 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_z0_z0_z18:
+** fmaxnm {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z18, svfloat64x2_t, z0,
+ svmaxnm_f64_x2 (z0, z18),
+ svmaxnm (z0, z18))
+
+/*
+** maxnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** |
+** fmaxnm {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z4_z4_z23, svfloat64x2_t, z4,
+ svmaxnm_f64_x2 (z4, z23),
+ svmaxnm (z4, z23))
+
+/*
+** maxnm_single_z24_z24_z0:
+** fmaxnm {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z0, svfloat64x2_t, svfloat64_t, z24,
+ svmaxnm_single_f64_x2 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** fmaxnm {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z28_z0, svfloat64x2_t, svfloat64_t, z24,
+ svmaxnm_single_f64_x2 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fmaxnm {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z1_z0, svfloat64x2_t, svfloat64_t, z24,
+ svmaxnm_single_f64_x2 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z1_z24_z0:
+** fmaxnm {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z24_z0, svfloat64x2_t, svfloat64_t, z1,
+ svmaxnm_single_f64_x2 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z1_z0, svfloat64x2_t, svfloat64_t, z1,
+ svmaxnm_single_f64_x2 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z18_z18_z0:
+** fmaxnm {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z18_z18_z0, svfloat64x2_t, svfloat64_t, z18,
+ svmaxnm_single_f64_x2 (z18, z0),
+ svmaxnm (z18, z0))
+
+/*
+** maxnm_single_awkward:
+** ...
+** fmaxnm ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (maxnm_single_awkward, svfloat64x2_t, svfloat64_t,
+ z0_res = svmaxnm_single_f64_x2 (z1, z0),
+ z0_res = svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z0_z0_z15:
+** ...
+** fmaxnm {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (maxnm_single_z0_z0_z15, svfloat64x2_t, svfloat64_t,
+ z0 = svmaxnm_single_f64_x2 (z0, z15),
+ z0 = svmaxnm (z0, z15))
+
+/*
+** maxnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmaxnm {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z16, svfloat64x2_t, svfloat64_t, z24,
+ svmaxnm_single_f64_x2 (z24, z16),
+ svmaxnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f64_x4.c
new file mode 100644
index 0000000..ba78edf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/maxnm_f64_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** maxnm_z0_z0_z4:
+** fmaxnm {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z4, svfloat64x4_t, z0,
+ svmaxnm_f64_x4 (z0, z4),
+ svmaxnm (z0, z4))
+
+/*
+** maxnm_z0_z4_z0:
+** fmaxnm {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z0, svfloat64x4_t, z0,
+ svmaxnm_f64_x4 (z4, z0),
+ svmaxnm (z4, z0))
+
+/*
+** maxnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z28\.d - z31\.d}
+** |
+** fmaxnm [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z0_z4_z28, svfloat64x4_t, z0,
+ svmaxnm_f64_x4 (z4, z28),
+ svmaxnm (z4, z28))
+
+/*
+** maxnm_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (maxnm_z18_z18_z4, svfloat64x4_t, z18,
+ svmaxnm_f64_x4 (z18, z4),
+ svmaxnm (z18, z4))
+
+/*
+** maxnm_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (maxnm_z23_z23_z28, svfloat64x4_t, z23,
+ svmaxnm_f64_x4 (z23, z28),
+ svmaxnm (z23, z28))
+
+/*
+** maxnm_z28_z28_z0:
+** fmaxnm {z28\.d - z31\.d}, {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (maxnm_z28_z28_z0, svfloat64x4_t, z28,
+ svmaxnm_f64_x4 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** |
+** fmaxnm {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z0_z0_z18, svfloat64x4_t, z0,
+ svmaxnm_f64_x4 (z0, z18),
+ svmaxnm (z0, z18))
+
+/*
+** maxnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** |
+** fmaxnm {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (maxnm_z4_z4_z23, svfloat64x4_t, z4,
+ svmaxnm_f64_x4 (z4, z23),
+ svmaxnm (z4, z23))
+
+/*
+** maxnm_single_z24_z24_z0:
+** fmaxnm {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z0, svfloat64x4_t, svfloat64_t, z24,
+ svmaxnm_single_f64_x4 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** fmaxnm {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z28_z0, svfloat64x4_t, svfloat64_t, z24,
+ svmaxnm_single_f64_x4 (z28, z0),
+ svmaxnm (z28, z0))
+
+/*
+** maxnm_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z1_z0, svfloat64x4_t, svfloat64_t, z24,
+ svmaxnm_single_f64_x4 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z1_z24_z0:
+** fmaxnm {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z24_z0, svfloat64x4_t, svfloat64_t, z1,
+ svmaxnm_single_f64_x4 (z24, z0),
+ svmaxnm (z24, z0))
+
+/*
+** maxnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z1_z1_z0, svfloat64x4_t, svfloat64_t, z1,
+ svmaxnm_single_f64_x4 (z1, z0),
+ svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmaxnm [^\n]+, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z18_z18_z0, svfloat64x4_t, svfloat64_t, z18,
+ svmaxnm_single_f64_x4 (z18, z0),
+ svmaxnm (z18, z0))
+
+/*
+** maxnm_single_awkward:
+** ...
+** fmaxnm ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (maxnm_single_awkward, svfloat64x4_t, svfloat64_t,
+ z0_res = svmaxnm_single_f64_x4 (z1, z0),
+ z0_res = svmaxnm (z1, z0))
+
+/*
+** maxnm_single_z0_z0_z15:
+** ...
+** fmaxnm {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (maxnm_single_z0_z0_z15, svfloat64x4_t, svfloat64_t,
+ z0 = svmaxnm_single_f64_x4 (z0, z15),
+ z0 = svmaxnm (z0, z15))
+
+/*
+** maxnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmaxnm {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (maxnm_single_z24_z24_z16, svfloat64x4_t, svfloat64_t, z24,
+ svmaxnm_single_f64_x4 (z24, z16),
+ svmaxnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f16_x2.c
new file mode 100644
index 0000000..91b6524
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f16_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** fmin {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svfloat16x2_t, z0,
+ svmin_f16_x2 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** fmin {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svfloat16x2_t, z0,
+ svmin_f16_x2 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z28\.h - z29\.h}
+** |
+** fmin [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svfloat16x2_t, z0,
+ svmin_f16_x2 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** fmin {z18\.h - z19\.h}, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svfloat16x2_t, z18,
+ svmin_f16_x2 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z18, svfloat16x2_t, z23,
+ svmin_f16_x2 (z23, z18),
+ svmin (z23, z18))
+
+/*
+** min_z28_z28_z0:
+** fmin {z28\.h - z29\.h}, {z28\.h - z29\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svfloat16x2_t, z28,
+ svmin_f16_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** fmin {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svfloat16x2_t, z0,
+ svmin_f16_x2 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** |
+** fmin {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svfloat16x2_t, z4,
+ svmin_f16_x2 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** fmin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svfloat16x2_t, svfloat16_t, z24,
+ svmin_single_f16_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** fmin {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svfloat16x2_t, svfloat16_t, z24,
+ svmin_single_f16_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fmin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svfloat16x2_t, svfloat16_t, z24,
+ svmin_single_f16_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** fmin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svfloat16x2_t, svfloat16_t, z1,
+ svmin_single_f16_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fmin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svfloat16x2_t, svfloat16_t, z1,
+ svmin_single_f16_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** fmin {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svfloat16x2_t, svfloat16_t, z18,
+ svmin_single_f16_x2 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** fmin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svfloat16x2_t, svfloat16_t,
+ z0_res = svmin_single_f16_x2 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** fmin {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svfloat16x2_t, svfloat16_t,
+ z0 = svmin_single_f16_x2 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmin {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svfloat16x2_t, svfloat16_t, z24,
+ svmin_single_f16_x2 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f16_x4.c
new file mode 100644
index 0000000..b3763e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f16_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** fmin {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svfloat16x4_t, z0,
+ svmin_f16_x4 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** fmin {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svfloat16x4_t, z0,
+ svmin_f16_x4 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z28\.h - z31\.h}
+** |
+** fmin [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svfloat16x4_t, z0,
+ svmin_f16_x4 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svfloat16x4_t, z18,
+ svmin_f16_x4 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z28, svfloat16x4_t, z23,
+ svmin_f16_x4 (z23, z28),
+ svmin (z23, z28))
+
+/*
+** min_z28_z28_z0:
+** fmin {z28\.h - z31\.h}, {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svfloat16x4_t, z28,
+ svmin_f16_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** |
+** fmin {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svfloat16x4_t, z0,
+ svmin_f16_x4 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** |
+** fmin {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svfloat16x4_t, z4,
+ svmin_f16_x4 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** fmin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svfloat16x4_t, svfloat16_t, z24,
+ svmin_single_f16_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** fmin {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svfloat16x4_t, svfloat16_t, z24,
+ svmin_single_f16_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svfloat16x4_t, svfloat16_t, z24,
+ svmin_single_f16_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** fmin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svfloat16x4_t, svfloat16_t, z1,
+ svmin_single_f16_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svfloat16x4_t, svfloat16_t, z1,
+ svmin_single_f16_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svfloat16x4_t, svfloat16_t, z18,
+ svmin_single_f16_x4 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** fmin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svfloat16x4_t, svfloat16_t,
+ z0_res = svmin_single_f16_x4 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** fmin {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svfloat16x4_t, svfloat16_t,
+ z0 = svmin_single_f16_x4 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmin {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svfloat16x4_t, svfloat16_t, z24,
+ svmin_single_f16_x4 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f32_x2.c
new file mode 100644
index 0000000..4b97345
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f32_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** fmin {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svfloat32x2_t, z0,
+ svmin_f32_x2 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** fmin {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svfloat32x2_t, z0,
+ svmin_f32_x2 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z28\.s - z29\.s}
+** |
+** fmin [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svfloat32x2_t, z0,
+ svmin_f32_x2 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** fmin {z18\.s - z19\.s}, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svfloat32x2_t, z18,
+ svmin_f32_x2 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z18, svfloat32x2_t, z23,
+ svmin_f32_x2 (z23, z18),
+ svmin (z23, z18))
+
+/*
+** min_z28_z28_z0:
+** fmin {z28\.s - z29\.s}, {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svfloat32x2_t, z28,
+ svmin_f32_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** fmin {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svfloat32x2_t, z0,
+ svmin_f32_x2 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** |
+** fmin {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svfloat32x2_t, z4,
+ svmin_f32_x2 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** fmin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svfloat32x2_t, svfloat32_t, z24,
+ svmin_single_f32_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** fmin {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svfloat32x2_t, svfloat32_t, z24,
+ svmin_single_f32_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fmin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svfloat32x2_t, svfloat32_t, z24,
+ svmin_single_f32_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** fmin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svfloat32x2_t, svfloat32_t, z1,
+ svmin_single_f32_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fmin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svfloat32x2_t, svfloat32_t, z1,
+ svmin_single_f32_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** fmin {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svfloat32x2_t, svfloat32_t, z18,
+ svmin_single_f32_x2 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** fmin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svfloat32x2_t, svfloat32_t,
+ z0_res = svmin_single_f32_x2 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** fmin {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svfloat32x2_t, svfloat32_t,
+ z0 = svmin_single_f32_x2 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmin {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svfloat32x2_t, svfloat32_t, z24,
+ svmin_single_f32_x2 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f32_x4.c
new file mode 100644
index 0000000..d556270
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f32_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** fmin {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svfloat32x4_t, z0,
+ svmin_f32_x4 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** fmin {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svfloat32x4_t, z0,
+ svmin_f32_x4 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z28\.s - z31\.s}
+** |
+** fmin [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svfloat32x4_t, z0,
+ svmin_f32_x4 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svfloat32x4_t, z18,
+ svmin_f32_x4 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z28, svfloat32x4_t, z23,
+ svmin_f32_x4 (z23, z28),
+ svmin (z23, z28))
+
+/*
+** min_z28_z28_z0:
+** fmin {z28\.s - z31\.s}, {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svfloat32x4_t, z28,
+ svmin_f32_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** |
+** fmin {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svfloat32x4_t, z0,
+ svmin_f32_x4 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** |
+** fmin {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svfloat32x4_t, z4,
+ svmin_f32_x4 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** fmin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svfloat32x4_t, svfloat32_t, z24,
+ svmin_single_f32_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** fmin {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svfloat32x4_t, svfloat32_t, z24,
+ svmin_single_f32_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svfloat32x4_t, svfloat32_t, z24,
+ svmin_single_f32_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** fmin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svfloat32x4_t, svfloat32_t, z1,
+ svmin_single_f32_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svfloat32x4_t, svfloat32_t, z1,
+ svmin_single_f32_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svfloat32x4_t, svfloat32_t, z18,
+ svmin_single_f32_x4 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** fmin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svfloat32x4_t, svfloat32_t,
+ z0_res = svmin_single_f32_x4 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** fmin {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svfloat32x4_t, svfloat32_t,
+ z0 = svmin_single_f32_x4 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmin {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svfloat32x4_t, svfloat32_t, z24,
+ svmin_single_f32_x4 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f64_x2.c
new file mode 100644
index 0000000..d8596bc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f64_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** fmin {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svfloat64x2_t, z0,
+ svmin_f64_x2 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** fmin {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svfloat64x2_t, z0,
+ svmin_f64_x2 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z28\.d - z29\.d}
+** |
+** fmin [^\n]+, {z28\.d - z29\.d}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svfloat64x2_t, z0,
+ svmin_f64_x2 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** fmin {z18\.d - z19\.d}, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svfloat64x2_t, z18,
+ svmin_f64_x2 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z18, svfloat64x2_t, z23,
+ svmin_f64_x2 (z23, z18),
+ svmin (z23, z18))
+
+/*
+** min_z28_z28_z0:
+** fmin {z28\.d - z29\.d}, {z28\.d - z29\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svfloat64x2_t, z28,
+ svmin_f64_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** fmin {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svfloat64x2_t, z0,
+ svmin_f64_x2 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** |
+** fmin {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svfloat64x2_t, z4,
+ svmin_f64_x2 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** fmin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svfloat64x2_t, svfloat64_t, z24,
+ svmin_single_f64_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** fmin {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svfloat64x2_t, svfloat64_t, z24,
+ svmin_single_f64_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fmin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svfloat64x2_t, svfloat64_t, z24,
+ svmin_single_f64_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** fmin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svfloat64x2_t, svfloat64_t, z1,
+ svmin_single_f64_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fmin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svfloat64x2_t, svfloat64_t, z1,
+ svmin_single_f64_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** fmin {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svfloat64x2_t, svfloat64_t, z18,
+ svmin_single_f64_x2 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** fmin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svfloat64x2_t, svfloat64_t,
+ z0_res = svmin_single_f64_x2 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** fmin {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svfloat64x2_t, svfloat64_t,
+ z0 = svmin_single_f64_x2 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmin {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svfloat64x2_t, svfloat64_t, z24,
+ svmin_single_f64_x2 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f64_x4.c
new file mode 100644
index 0000000..a7668dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_f64_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** fmin {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svfloat64x4_t, z0,
+ svmin_f64_x4 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** fmin {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svfloat64x4_t, z0,
+ svmin_f64_x4 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z28\.d - z31\.d}
+** |
+** fmin [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svfloat64x4_t, z0,
+ svmin_f64_x4 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svfloat64x4_t, z18,
+ svmin_f64_x4 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z28, svfloat64x4_t, z23,
+ svmin_f64_x4 (z23, z28),
+ svmin (z23, z28))
+
+/*
+** min_z28_z28_z0:
+** fmin {z28\.d - z31\.d}, {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svfloat64x4_t, z28,
+ svmin_f64_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** |
+** fmin {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svfloat64x4_t, z0,
+ svmin_f64_x4 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** |
+** fmin {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svfloat64x4_t, z4,
+ svmin_f64_x4 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** fmin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svfloat64x4_t, svfloat64_t, z24,
+ svmin_single_f64_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** fmin {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svfloat64x4_t, svfloat64_t, z24,
+ svmin_single_f64_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svfloat64x4_t, svfloat64_t, z24,
+ svmin_single_f64_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** fmin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svfloat64x4_t, svfloat64_t, z1,
+ svmin_single_f64_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svfloat64x4_t, svfloat64_t, z1,
+ svmin_single_f64_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmin [^\n]+, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svfloat64x4_t, svfloat64_t, z18,
+ svmin_single_f64_x4 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** fmin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svfloat64x4_t, svfloat64_t,
+ z0_res = svmin_single_f64_x4 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** fmin {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svfloat64x4_t, svfloat64_t,
+ z0 = svmin_single_f64_x4 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmin {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svfloat64x4_t, svfloat64_t, z24,
+ svmin_single_f64_x4 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s16_x2.c
new file mode 100644
index 0000000..5a0c192
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s16_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** smin {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svint16x2_t, z0,
+ svmin_s16_x2 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** smin {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svint16x2_t, z0,
+ svmin_s16_x2 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.h - z29\.h}
+** |
+** smin [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svint16x2_t, z0,
+ svmin_s16_x2 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** smin {z18\.h - z19\.h}, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svint16x2_t, z18,
+ svmin_s16_x2 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z18, svint16x2_t, z23,
+ svmin_s16_x2 (z23, z18),
+ svmin (z23, z18))
+
+/*
+** min_z28_z28_z0:
+** smin {z28\.h - z29\.h}, {z28\.h - z29\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svint16x2_t, z28,
+ svmin_s16_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** smin {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svint16x2_t, z0,
+ svmin_s16_x2 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** |
+** smin {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svint16x2_t, z4,
+ svmin_s16_x2 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** smin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svint16x2_t, svint16_t, z24,
+ svmin_single_s16_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** smin {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svint16x2_t, svint16_t, z24,
+ svmin_single_s16_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** smin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svint16x2_t, svint16_t, z24,
+ svmin_single_s16_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** smin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svint16x2_t, svint16_t, z1,
+ svmin_single_s16_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** smin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svint16x2_t, svint16_t, z1,
+ svmin_single_s16_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** smin {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svint16x2_t, svint16_t, z18,
+ svmin_single_s16_x2 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** smin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svint16x2_t, svint16_t,
+ z0_res = svmin_single_s16_x2 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** smin {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svint16x2_t, svint16_t,
+ z0 = svmin_single_s16_x2 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smin {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svint16x2_t, svint16_t, z24,
+ svmin_single_s16_x2 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s16_x4.c
new file mode 100644
index 0000000..6fc0e90
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s16_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** smin {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svint16x4_t, z0,
+ svmin_s16_x4 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** smin {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svint16x4_t, z0,
+ svmin_s16_x4 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.h - z31\.h}
+** |
+** smin [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svint16x4_t, z0,
+ svmin_s16_x4 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svint16x4_t, z18,
+ svmin_s16_x4 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z28, svint16x4_t, z23,
+ svmin_s16_x4 (z23, z28),
+ svmin (z23, z28))
+
+/*
+** min_z28_z28_z0:
+** smin {z28\.h - z31\.h}, {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svint16x4_t, z28,
+ svmin_s16_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** |
+** smin {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svint16x4_t, z0,
+ svmin_s16_x4 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** |
+** smin {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svint16x4_t, z4,
+ svmin_s16_x4 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** smin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svint16x4_t, svint16_t, z24,
+ svmin_single_s16_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** smin {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svint16x4_t, svint16_t, z24,
+ svmin_single_s16_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svint16x4_t, svint16_t, z24,
+ svmin_single_s16_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** smin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svint16x4_t, svint16_t, z1,
+ svmin_single_s16_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svint16x4_t, svint16_t, z1,
+ svmin_single_s16_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svint16x4_t, svint16_t, z18,
+ svmin_single_s16_x4 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** smin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svint16x4_t, svint16_t,
+ z0_res = svmin_single_s16_x4 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** smin {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svint16x4_t, svint16_t,
+ z0 = svmin_single_s16_x4 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smin {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svint16x4_t, svint16_t, z24,
+ svmin_single_s16_x4 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s32_x2.c
new file mode 100644
index 0000000..a8a9e10
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s32_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** smin {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svint32x2_t, z0,
+ svmin_s32_x2 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** smin {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svint32x2_t, z0,
+ svmin_s32_x2 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.s - z29\.s}
+** |
+** smin [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svint32x2_t, z0,
+ svmin_s32_x2 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** smin {z18\.s - z19\.s}, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svint32x2_t, z18,
+ svmin_s32_x2 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z18, svint32x2_t, z23,
+ svmin_s32_x2 (z23, z18),
+ svmin (z23, z18))
+
+/*
+** min_z28_z28_z0:
+** smin {z28\.s - z29\.s}, {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svint32x2_t, z28,
+ svmin_s32_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** smin {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svint32x2_t, z0,
+ svmin_s32_x2 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** |
+** smin {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svint32x2_t, z4,
+ svmin_s32_x2 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** smin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svint32x2_t, svint32_t, z24,
+ svmin_single_s32_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** smin {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svint32x2_t, svint32_t, z24,
+ svmin_single_s32_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** smin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svint32x2_t, svint32_t, z24,
+ svmin_single_s32_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** smin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svint32x2_t, svint32_t, z1,
+ svmin_single_s32_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** smin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svint32x2_t, svint32_t, z1,
+ svmin_single_s32_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** smin {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svint32x2_t, svint32_t, z18,
+ svmin_single_s32_x2 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** smin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svint32x2_t, svint32_t,
+ z0_res = svmin_single_s32_x2 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** smin {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svint32x2_t, svint32_t,
+ z0 = svmin_single_s32_x2 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smin {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svint32x2_t, svint32_t, z24,
+ svmin_single_s32_x2 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s32_x4.c
new file mode 100644
index 0000000..df5bf89
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s32_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** smin {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svint32x4_t, z0,
+ svmin_s32_x4 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** smin {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svint32x4_t, z0,
+ svmin_s32_x4 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.s - z31\.s}
+** |
+** smin [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svint32x4_t, z0,
+ svmin_s32_x4 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svint32x4_t, z18,
+ svmin_s32_x4 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z28, svint32x4_t, z23,
+ svmin_s32_x4 (z23, z28),
+ svmin (z23, z28))
+
+/*
+** min_z28_z28_z0:
+** smin {z28\.s - z31\.s}, {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svint32x4_t, z28,
+ svmin_s32_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** |
+** smin {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svint32x4_t, z0,
+ svmin_s32_x4 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** |
+** smin {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svint32x4_t, z4,
+ svmin_s32_x4 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** smin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svint32x4_t, svint32_t, z24,
+ svmin_single_s32_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** smin {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svint32x4_t, svint32_t, z24,
+ svmin_single_s32_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svint32x4_t, svint32_t, z24,
+ svmin_single_s32_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** smin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svint32x4_t, svint32_t, z1,
+ svmin_single_s32_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svint32x4_t, svint32_t, z1,
+ svmin_single_s32_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svint32x4_t, svint32_t, z18,
+ svmin_single_s32_x4 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** smin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svint32x4_t, svint32_t,
+ z0_res = svmin_single_s32_x4 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** smin {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svint32x4_t, svint32_t,
+ z0 = svmin_single_s32_x4 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smin {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svint32x4_t, svint32_t, z24,
+ svmin_single_s32_x4 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s64_x2.c
new file mode 100644
index 0000000..5b16c46
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s64_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** smin {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svint64x2_t, z0,
+ svmin_s64_x2 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** smin {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svint64x2_t, z0,
+ svmin_s64_x2 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.d - z29\.d}
+** |
+** smin [^\n]+, {z28\.d - z29\.d}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svint64x2_t, z0,
+ svmin_s64_x2 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** smin {z18\.d - z19\.d}, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svint64x2_t, z18,
+ svmin_s64_x2 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z18, svint64x2_t, z23,
+ svmin_s64_x2 (z23, z18),
+ svmin (z23, z18))
+
+/*
+** min_z28_z28_z0:
+** smin {z28\.d - z29\.d}, {z28\.d - z29\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svint64x2_t, z28,
+ svmin_s64_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** smin {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svint64x2_t, z0,
+ svmin_s64_x2 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** |
+** smin {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svint64x2_t, z4,
+ svmin_s64_x2 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** smin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svint64x2_t, svint64_t, z24,
+ svmin_single_s64_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** smin {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svint64x2_t, svint64_t, z24,
+ svmin_single_s64_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** smin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svint64x2_t, svint64_t, z24,
+ svmin_single_s64_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** smin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svint64x2_t, svint64_t, z1,
+ svmin_single_s64_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** smin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svint64x2_t, svint64_t, z1,
+ svmin_single_s64_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** smin {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svint64x2_t, svint64_t, z18,
+ svmin_single_s64_x2 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** smin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svint64x2_t, svint64_t,
+ z0_res = svmin_single_s64_x2 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** smin {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svint64x2_t, svint64_t,
+ z0 = svmin_single_s64_x2 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smin {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svint64x2_t, svint64_t, z24,
+ svmin_single_s64_x2 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s64_x4.c
new file mode 100644
index 0000000..4b926d2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s64_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** smin {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svint64x4_t, z0,
+ svmin_s64_x4 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** smin {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svint64x4_t, z0,
+ svmin_s64_x4 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.d - z31\.d}
+** |
+** smin [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svint64x4_t, z0,
+ svmin_s64_x4 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svint64x4_t, z18,
+ svmin_s64_x4 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z28, svint64x4_t, z23,
+ svmin_s64_x4 (z23, z28),
+ svmin (z23, z28))
+
+/*
+** min_z28_z28_z0:
+** smin {z28\.d - z31\.d}, {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svint64x4_t, z28,
+ svmin_s64_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** |
+** smin {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svint64x4_t, z0,
+ svmin_s64_x4 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** |
+** smin {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svint64x4_t, z4,
+ svmin_s64_x4 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** smin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svint64x4_t, svint64_t, z24,
+ svmin_single_s64_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** smin {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svint64x4_t, svint64_t, z24,
+ svmin_single_s64_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svint64x4_t, svint64_t, z24,
+ svmin_single_s64_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** smin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svint64x4_t, svint64_t, z1,
+ svmin_single_s64_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svint64x4_t, svint64_t, z1,
+ svmin_single_s64_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svint64x4_t, svint64_t, z18,
+ svmin_single_s64_x4 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** smin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svint64x4_t, svint64_t,
+ z0_res = svmin_single_s64_x4 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** smin {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svint64x4_t, svint64_t,
+ z0 = svmin_single_s64_x4 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smin {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svint64x4_t, svint64_t, z24,
+ svmin_single_s64_x4 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s8_x2.c
new file mode 100644
index 0000000..9082ef7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s8_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** smin {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svint8x2_t, z0,
+ svmin_s8_x2 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** smin {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svint8x2_t, z0,
+ svmin_s8_x2 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.b - z29\.b}
+** |
+** smin [^\n]+, {z28\.b - z29\.b}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svint8x2_t, z0,
+ svmin_s8_x2 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** smin {z18\.b - z19\.b}, {z18\.b - z19\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svint8x2_t, z18,
+ svmin_s8_x2 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z18\.b - z19\.b}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z18, svint8x2_t, z23,
+ svmin_s8_x2 (z23, z18),
+ svmin (z23, z18))
+
+/*
+** min_z28_z28_z0:
+** smin {z28\.b - z29\.b}, {z28\.b - z29\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svint8x2_t, z28,
+ svmin_s8_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** smin {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svint8x2_t, z0,
+ svmin_s8_x2 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** |
+** smin {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svint8x2_t, z4,
+ svmin_s8_x2 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** smin {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svint8x2_t, svint8_t, z24,
+ svmin_single_s8_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** |
+** smin {z28\.b - z29\.b}, {z28\.b - z29\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svint8x2_t, svint8_t, z24,
+ svmin_single_s8_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** smin {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svint8x2_t, svint8_t, z24,
+ svmin_single_s8_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** smin {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svint8x2_t, svint8_t, z1,
+ svmin_single_s8_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** smin ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svint8x2_t, svint8_t, z1,
+ svmin_single_s8_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** smin {z18\.b - z19\.b}, {z18\.b - z19\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svint8x2_t, svint8_t, z18,
+ svmin_single_s8_x2 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** smin ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svint8x2_t, svint8_t,
+ z0_res = svmin_single_s8_x2 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** smin {z0\.b - z1\.b}, {z0\.b - z1\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svint8x2_t, svint8_t,
+ z0 = svmin_single_s8_x2 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smin {z24\.b - z25\.b}, {z24\.b - z25\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svint8x2_t, svint8_t, z24,
+ svmin_single_s8_x2 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s8_x4.c
new file mode 100644
index 0000000..e322966
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_s8_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** smin {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svint8x4_t, z0,
+ svmin_s8_x4 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** smin {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svint8x4_t, z0,
+ svmin_s8_x4 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.b - z31\.b}
+** |
+** smin [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svint8x4_t, z0,
+ svmin_s8_x4 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z4\.b - z7\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svint8x4_t, z18,
+ svmin_s8_x4 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z28, svint8x4_t, z23,
+ svmin_s8_x4 (z23, z28),
+ svmin (z23, z28))
+
+/*
+** min_z28_z28_z0:
+** smin {z28\.b - z31\.b}, {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svint8x4_t, z28,
+ svmin_s8_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** |
+** smin {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svint8x4_t, z0,
+ svmin_s8_x4 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** |
+** smin {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svint8x4_t, z4,
+ svmin_s8_x4 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** smin {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svint8x4_t, svint8_t, z24,
+ svmin_single_s8_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** |
+** smin {z28\.b - z31\.b}, {z28\.b - z31\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svint8x4_t, svint8_t, z24,
+ svmin_single_s8_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svint8x4_t, svint8_t, z24,
+ svmin_single_s8_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** smin {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svint8x4_t, svint8_t, z1,
+ svmin_single_s8_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svint8x4_t, svint8_t, z1,
+ svmin_single_s8_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smin [^\n]+, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svint8x4_t, svint8_t, z18,
+ svmin_single_s8_x4 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** smin ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svint8x4_t, svint8_t,
+ z0_res = svmin_single_s8_x4 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** smin {z0\.b - z3\.b}, {z0\.b - z3\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svint8x4_t, svint8_t,
+ z0 = svmin_single_s8_x4 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** smin {z24\.b - z27\.b}, {z24\.b - z27\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svint8x4_t, svint8_t, z24,
+ svmin_single_s8_x4 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u16_x2.c
new file mode 100644
index 0000000..40c41d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u16_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** umin {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svuint16x2_t, z0,
+ svmin_u16_x2 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** umin {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svuint16x2_t, z0,
+ svmin_u16_x2 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.h - z29\.h}
+** |
+** umin [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svuint16x2_t, z0,
+ svmin_u16_x2 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** umin {z18\.h - z19\.h}, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svuint16x2_t, z18,
+ svmin_u16_x2 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z18, svuint16x2_t, z23,
+ svmin_u16_x2 (z23, z18),
+ svmin (z23, z18))
+
+/*
+** min_z28_z28_z0:
+** umin {z28\.h - z29\.h}, {z28\.h - z29\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svuint16x2_t, z28,
+ svmin_u16_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** umin {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svuint16x2_t, z0,
+ svmin_u16_x2 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** |
+** umin {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svuint16x2_t, z4,
+ svmin_u16_x2 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** umin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svuint16x2_t, svuint16_t, z24,
+ svmin_single_u16_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** umin {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svuint16x2_t, svuint16_t, z24,
+ svmin_single_u16_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** umin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svuint16x2_t, svuint16_t, z24,
+ svmin_single_u16_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** umin {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svuint16x2_t, svuint16_t, z1,
+ svmin_single_u16_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** umin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svuint16x2_t, svuint16_t, z1,
+ svmin_single_u16_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** umin {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svuint16x2_t, svuint16_t, z18,
+ svmin_single_u16_x2 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** umin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svuint16x2_t, svuint16_t,
+ z0_res = svmin_single_u16_x2 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** umin {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svuint16x2_t, svuint16_t,
+ z0 = svmin_single_u16_x2 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umin {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svuint16x2_t, svuint16_t, z24,
+ svmin_single_u16_x2 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u16_x4.c
new file mode 100644
index 0000000..ebe8da1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u16_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** umin {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svuint16x4_t, z0,
+ svmin_u16_x4 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** umin {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svuint16x4_t, z0,
+ svmin_u16_x4 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.h - z31\.h}
+** |
+** umin [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svuint16x4_t, z0,
+ svmin_u16_x4 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svuint16x4_t, z18,
+ svmin_u16_x4 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z28, svuint16x4_t, z23,
+ svmin_u16_x4 (z23, z28),
+ svmin (z23, z28))
+
+/*
+** min_z28_z28_z0:
+** umin {z28\.h - z31\.h}, {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svuint16x4_t, z28,
+ svmin_u16_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** |
+** umin {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svuint16x4_t, z0,
+ svmin_u16_x4 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** |
+** umin {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svuint16x4_t, z4,
+ svmin_u16_x4 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** umin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svuint16x4_t, svuint16_t, z24,
+ svmin_single_u16_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** umin {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svuint16x4_t, svuint16_t, z24,
+ svmin_single_u16_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svuint16x4_t, svuint16_t, z24,
+ svmin_single_u16_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** umin {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svuint16x4_t, svuint16_t, z1,
+ svmin_single_u16_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svuint16x4_t, svuint16_t, z1,
+ svmin_single_u16_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svuint16x4_t, svuint16_t, z18,
+ svmin_single_u16_x4 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** umin ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svuint16x4_t, svuint16_t,
+ z0_res = svmin_single_u16_x4 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** umin {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svuint16x4_t, svuint16_t,
+ z0 = svmin_single_u16_x4 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umin {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svuint16x4_t, svuint16_t, z24,
+ svmin_single_u16_x4 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u32_x2.c
new file mode 100644
index 0000000..5173e22
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u32_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** umin {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svuint32x2_t, z0,
+ svmin_u32_x2 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** umin {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svuint32x2_t, z0,
+ svmin_u32_x2 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.s - z29\.s}
+** |
+** umin [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svuint32x2_t, z0,
+ svmin_u32_x2 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** umin {z18\.s - z19\.s}, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svuint32x2_t, z18,
+ svmin_u32_x2 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z18, svuint32x2_t, z23,
+ svmin_u32_x2 (z23, z18),
+ svmin (z23, z18))
+
+/*
+** min_z28_z28_z0:
+** umin {z28\.s - z29\.s}, {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svuint32x2_t, z28,
+ svmin_u32_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** umin {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svuint32x2_t, z0,
+ svmin_u32_x2 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** |
+** umin {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svuint32x2_t, z4,
+ svmin_u32_x2 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** umin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svuint32x2_t, svuint32_t, z24,
+ svmin_single_u32_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** umin {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svuint32x2_t, svuint32_t, z24,
+ svmin_single_u32_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** umin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svuint32x2_t, svuint32_t, z24,
+ svmin_single_u32_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** umin {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svuint32x2_t, svuint32_t, z1,
+ svmin_single_u32_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** umin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svuint32x2_t, svuint32_t, z1,
+ svmin_single_u32_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** umin {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svuint32x2_t, svuint32_t, z18,
+ svmin_single_u32_x2 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** umin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svuint32x2_t, svuint32_t,
+ z0_res = svmin_single_u32_x2 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** umin {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svuint32x2_t, svuint32_t,
+ z0 = svmin_single_u32_x2 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umin {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svuint32x2_t, svuint32_t, z24,
+ svmin_single_u32_x2 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u32_x4.c
new file mode 100644
index 0000000..f2e4079
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u32_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** umin {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svuint32x4_t, z0,
+ svmin_u32_x4 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** umin {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svuint32x4_t, z0,
+ svmin_u32_x4 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.s - z31\.s}
+** |
+** umin [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svuint32x4_t, z0,
+ svmin_u32_x4 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svuint32x4_t, z18,
+ svmin_u32_x4 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z28, svuint32x4_t, z23,
+ svmin_u32_x4 (z23, z28),
+ svmin (z23, z28))
+
+/*
+** min_z28_z28_z0:
+** umin {z28\.s - z31\.s}, {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svuint32x4_t, z28,
+ svmin_u32_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** |
+** umin {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svuint32x4_t, z0,
+ svmin_u32_x4 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** |
+** umin {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svuint32x4_t, z4,
+ svmin_u32_x4 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** umin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svuint32x4_t, svuint32_t, z24,
+ svmin_single_u32_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** umin {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svuint32x4_t, svuint32_t, z24,
+ svmin_single_u32_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svuint32x4_t, svuint32_t, z24,
+ svmin_single_u32_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** umin {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svuint32x4_t, svuint32_t, z1,
+ svmin_single_u32_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svuint32x4_t, svuint32_t, z1,
+ svmin_single_u32_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svuint32x4_t, svuint32_t, z18,
+ svmin_single_u32_x4 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** umin ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svuint32x4_t, svuint32_t,
+ z0_res = svmin_single_u32_x4 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** umin {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svuint32x4_t, svuint32_t,
+ z0 = svmin_single_u32_x4 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umin {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svuint32x4_t, svuint32_t, z24,
+ svmin_single_u32_x4 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u64_x2.c
new file mode 100644
index 0000000..6babb78
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u64_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** umin {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svuint64x2_t, z0,
+ svmin_u64_x2 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** umin {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svuint64x2_t, z0,
+ svmin_u64_x2 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.d - z29\.d}
+** |
+** umin [^\n]+, {z28\.d - z29\.d}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svuint64x2_t, z0,
+ svmin_u64_x2 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** umin {z18\.d - z19\.d}, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svuint64x2_t, z18,
+ svmin_u64_x2 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z18, svuint64x2_t, z23,
+ svmin_u64_x2 (z23, z18),
+ svmin (z23, z18))
+
+/*
+** min_z28_z28_z0:
+** umin {z28\.d - z29\.d}, {z28\.d - z29\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svuint64x2_t, z28,
+ svmin_u64_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** umin {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svuint64x2_t, z0,
+ svmin_u64_x2 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** |
+** umin {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svuint64x2_t, z4,
+ svmin_u64_x2 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** umin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svuint64x2_t, svuint64_t, z24,
+ svmin_single_u64_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** umin {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svuint64x2_t, svuint64_t, z24,
+ svmin_single_u64_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** umin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svuint64x2_t, svuint64_t, z24,
+ svmin_single_u64_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** umin {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svuint64x2_t, svuint64_t, z1,
+ svmin_single_u64_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** umin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svuint64x2_t, svuint64_t, z1,
+ svmin_single_u64_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** umin {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svuint64x2_t, svuint64_t, z18,
+ svmin_single_u64_x2 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** umin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svuint64x2_t, svuint64_t,
+ z0_res = svmin_single_u64_x2 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** umin {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svuint64x2_t, svuint64_t,
+ z0 = svmin_single_u64_x2 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umin {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svuint64x2_t, svuint64_t, z24,
+ svmin_single_u64_x2 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u64_x4.c
new file mode 100644
index 0000000..b3dba47
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u64_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** umin {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svuint64x4_t, z0,
+ svmin_u64_x4 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** umin {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svuint64x4_t, z0,
+ svmin_u64_x4 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.d - z31\.d}
+** |
+** umin [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svuint64x4_t, z0,
+ svmin_u64_x4 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svuint64x4_t, z18,
+ svmin_u64_x4 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z28, svuint64x4_t, z23,
+ svmin_u64_x4 (z23, z28),
+ svmin (z23, z28))
+
+/*
+** min_z28_z28_z0:
+** umin {z28\.d - z31\.d}, {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svuint64x4_t, z28,
+ svmin_u64_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** |
+** umin {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svuint64x4_t, z0,
+ svmin_u64_x4 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** |
+** umin {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svuint64x4_t, z4,
+ svmin_u64_x4 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** umin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svuint64x4_t, svuint64_t, z24,
+ svmin_single_u64_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** umin {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svuint64x4_t, svuint64_t, z24,
+ svmin_single_u64_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svuint64x4_t, svuint64_t, z24,
+ svmin_single_u64_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** umin {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svuint64x4_t, svuint64_t, z1,
+ svmin_single_u64_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svuint64x4_t, svuint64_t, z1,
+ svmin_single_u64_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svuint64x4_t, svuint64_t, z18,
+ svmin_single_u64_x4 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** umin ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svuint64x4_t, svuint64_t,
+ z0_res = svmin_single_u64_x4 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** umin {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svuint64x4_t, svuint64_t,
+ z0 = svmin_single_u64_x4 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umin {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svuint64x4_t, svuint64_t, z24,
+ svmin_single_u64_x4 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u8_x2.c
new file mode 100644
index 0000000..4b4c723
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u8_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** umin {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svuint8x2_t, z0,
+ svmin_u8_x2 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** umin {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svuint8x2_t, z0,
+ svmin_u8_x2 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.b - z29\.b}
+** |
+** umin [^\n]+, {z28\.b - z29\.b}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svuint8x2_t, z0,
+ svmin_u8_x2 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** umin {z18\.b - z19\.b}, {z18\.b - z19\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svuint8x2_t, z18,
+ svmin_u8_x2 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z18\.b - z19\.b}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z18, svuint8x2_t, z23,
+ svmin_u8_x2 (z23, z18),
+ svmin (z23, z18))
+
+/*
+** min_z28_z28_z0:
+** umin {z28\.b - z29\.b}, {z28\.b - z29\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svuint8x2_t, z28,
+ svmin_u8_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** umin {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svuint8x2_t, z0,
+ svmin_u8_x2 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** |
+** umin {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svuint8x2_t, z4,
+ svmin_u8_x2 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** umin {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svuint8x2_t, svuint8_t, z24,
+ svmin_single_u8_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** |
+** umin {z28\.b - z29\.b}, {z28\.b - z29\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svuint8x2_t, svuint8_t, z24,
+ svmin_single_u8_x2 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** umin {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svuint8x2_t, svuint8_t, z24,
+ svmin_single_u8_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** umin {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svuint8x2_t, svuint8_t, z1,
+ svmin_single_u8_x2 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** umin ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svuint8x2_t, svuint8_t, z1,
+ svmin_single_u8_x2 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** umin {z18\.b - z19\.b}, {z18\.b - z19\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svuint8x2_t, svuint8_t, z18,
+ svmin_single_u8_x2 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** umin ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svuint8x2_t, svuint8_t,
+ z0_res = svmin_single_u8_x2 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** umin {z0\.b - z1\.b}, {z0\.b - z1\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svuint8x2_t, svuint8_t,
+ z0 = svmin_single_u8_x2 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umin {z24\.b - z25\.b}, {z24\.b - z25\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svuint8x2_t, svuint8_t, z24,
+ svmin_single_u8_x2 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u8_x4.c
new file mode 100644
index 0000000..81243fd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/min_u8_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** min_z0_z0_z4:
+** umin {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (min_z0_z0_z4, svuint8x4_t, z0,
+ svmin_u8_x4 (z0, z4),
+ svmin (z0, z4))
+
+/*
+** min_z0_z4_z0:
+** umin {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (min_z0_z4_z0, svuint8x4_t, z0,
+ svmin_u8_x4 (z4, z0),
+ svmin (z4, z0))
+
+/*
+** min_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.b - z31\.b}
+** |
+** umin [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z4_z28, svuint8x4_t, z0,
+ svmin_u8_x4 (z4, z28),
+ svmin (z4, z28))
+
+/*
+** min_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z4\.b - z7\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z18_z18_z4, svuint8x4_t, z18,
+ svmin_u8_x4 (z18, z4),
+ svmin (z18, z4))
+
+/*
+** min_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (min_z23_z23_z28, svuint8x4_t, z23,
+ svmin_u8_x4 (z23, z28),
+ svmin (z23, z28))
+
+/*
+** min_z28_z28_z0:
+** umin {z28\.b - z31\.b}, {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (min_z28_z28_z0, svuint8x4_t, z28,
+ svmin_u8_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** |
+** umin {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z0_z0_z18, svuint8x4_t, z0,
+ svmin_u8_x4 (z0, z18),
+ svmin (z0, z18))
+
+/*
+** min_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** |
+** umin {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (min_z4_z4_z23, svuint8x4_t, z4,
+ svmin_u8_x4 (z4, z23),
+ svmin (z4, z23))
+
+/*
+** min_single_z24_z24_z0:
+** umin {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z0, svuint8x4_t, svuint8_t, z24,
+ svmin_single_u8_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** |
+** umin {z28\.b - z31\.b}, {z28\.b - z31\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z28_z0, svuint8x4_t, svuint8_t, z24,
+ svmin_single_u8_x4 (z28, z0),
+ svmin (z28, z0))
+
+/*
+** min_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z1_z0, svuint8x4_t, svuint8_t, z24,
+ svmin_single_u8_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z1_z24_z0:
+** umin {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z24_z0, svuint8x4_t, svuint8_t, z1,
+ svmin_single_u8_x4 (z24, z0),
+ svmin (z24, z0))
+
+/*
+** min_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z1_z1_z0, svuint8x4_t, svuint8_t, z1,
+ svmin_single_u8_x4 (z1, z0),
+ svmin (z1, z0))
+
+/*
+** min_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umin [^\n]+, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (min_single_z18_z18_z0, svuint8x4_t, svuint8_t, z18,
+ svmin_single_u8_x4 (z18, z0),
+ svmin (z18, z0))
+
+/*
+** min_single_awkward:
+** ...
+** umin ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (min_single_awkward, svuint8x4_t, svuint8_t,
+ z0_res = svmin_single_u8_x4 (z1, z0),
+ z0_res = svmin (z1, z0))
+
+/*
+** min_single_z0_z0_z15:
+** ...
+** umin {z0\.b - z3\.b}, {z0\.b - z3\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (min_single_z0_z0_z15, svuint8x4_t, svuint8_t,
+ z0 = svmin_single_u8_x4 (z0, z15),
+ z0 = svmin (z0, z15))
+
+/*
+** min_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** umin {z24\.b - z27\.b}, {z24\.b - z27\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (min_single_z24_z24_z16, svuint8x4_t, svuint8_t, z24,
+ svmin_single_u8_x4 (z24, z16),
+ svmin (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f16_x2.c
new file mode 100644
index 0000000..62c13d2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f16_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** minnm_z0_z0_z4:
+** fminnm {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (minnm_z0_z0_z4, svfloat16x2_t, z0,
+ svminnm_f16_x2 (z0, z4),
+ svminnm (z0, z4))
+
+/*
+** minnm_z0_z4_z0:
+** fminnm {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (minnm_z0_z4_z0, svfloat16x2_t, z0,
+ svminnm_f16_x2 (z4, z0),
+ svminnm (z4, z0))
+
+/*
+** minnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z28\.h - z29\.h}
+** |
+** fminnm [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z0_z4_z28, svfloat16x2_t, z0,
+ svminnm_f16_x2 (z4, z28),
+ svminnm (z4, z28))
+
+/*
+** minnm_z18_z18_z4:
+** fminnm {z18\.h - z19\.h}, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (minnm_z18_z18_z4, svfloat16x2_t, z18,
+ svminnm_f16_x2 (z18, z4),
+ svminnm (z18, z4))
+
+/*
+** minnm_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (minnm_z23_z23_z18, svfloat16x2_t, z23,
+ svminnm_f16_x2 (z23, z18),
+ svminnm (z23, z18))
+
+/*
+** minnm_z28_z28_z0:
+** fminnm {z28\.h - z29\.h}, {z28\.h - z29\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (minnm_z28_z28_z0, svfloat16x2_t, z28,
+ svminnm_f16_x2 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_z0_z0_z18:
+** fminnm {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_XN (minnm_z0_z0_z18, svfloat16x2_t, z0,
+ svminnm_f16_x2 (z0, z18),
+ svminnm (z0, z18))
+
+/*
+** minnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** |
+** fminnm {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z4_z4_z23, svfloat16x2_t, z4,
+ svminnm_f16_x2 (z4, z23),
+ svminnm (z4, z23))
+
+/*
+** minnm_single_z24_z24_z0:
+** fminnm {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z0, svfloat16x2_t, svfloat16_t, z24,
+ svminnm_single_f16_x2 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** fminnm {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z28_z0, svfloat16x2_t, svfloat16_t, z24,
+ svminnm_single_f16_x2 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fminnm {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z1_z0, svfloat16x2_t, svfloat16_t, z24,
+ svminnm_single_f16_x2 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z1_z24_z0:
+** fminnm {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z24_z0, svfloat16x2_t, svfloat16_t, z1,
+ svminnm_single_f16_x2 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fminnm ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z1_z0, svfloat16x2_t, svfloat16_t, z1,
+ svminnm_single_f16_x2 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z18_z18_z0:
+** fminnm {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z18_z18_z0, svfloat16x2_t, svfloat16_t, z18,
+ svminnm_single_f16_x2 (z18, z0),
+ svminnm (z18, z0))
+
+/*
+** minnm_single_awkward:
+** ...
+** fminnm ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (minnm_single_awkward, svfloat16x2_t, svfloat16_t,
+ z0_res = svminnm_single_f16_x2 (z1, z0),
+ z0_res = svminnm (z1, z0))
+
+/*
+** minnm_single_z0_z0_z15:
+** ...
+** fminnm {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (minnm_single_z0_z0_z15, svfloat16x2_t, svfloat16_t,
+ z0 = svminnm_single_f16_x2 (z0, z15),
+ z0 = svminnm (z0, z15))
+
+/*
+** minnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fminnm {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z16, svfloat16x2_t, svfloat16_t, z24,
+ svminnm_single_f16_x2 (z24, z16),
+ svminnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f16_x4.c
new file mode 100644
index 0000000..6afb754
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f16_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** minnm_z0_z0_z4:
+** fminnm {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (minnm_z0_z0_z4, svfloat16x4_t, z0,
+ svminnm_f16_x4 (z0, z4),
+ svminnm (z0, z4))
+
+/*
+** minnm_z0_z4_z0:
+** fminnm {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (minnm_z0_z4_z0, svfloat16x4_t, z0,
+ svminnm_f16_x4 (z4, z0),
+ svminnm (z4, z0))
+
+/*
+** minnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z28\.h - z31\.h}
+** |
+** fminnm [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z0_z4_z28, svfloat16x4_t, z0,
+ svminnm_f16_x4 (z4, z28),
+ svminnm (z4, z28))
+
+/*
+** minnm_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (minnm_z18_z18_z4, svfloat16x4_t, z18,
+ svminnm_f16_x4 (z18, z4),
+ svminnm (z18, z4))
+
+/*
+** minnm_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (minnm_z23_z23_z28, svfloat16x4_t, z23,
+ svminnm_f16_x4 (z23, z28),
+ svminnm (z23, z28))
+
+/*
+** minnm_z28_z28_z0:
+** fminnm {z28\.h - z31\.h}, {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (minnm_z28_z28_z0, svfloat16x4_t, z28,
+ svminnm_f16_x4 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** |
+** fminnm {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z0_z0_z18, svfloat16x4_t, z0,
+ svminnm_f16_x4 (z0, z18),
+ svminnm (z0, z18))
+
+/*
+** minnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** |
+** fminnm {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z4_z4_z23, svfloat16x4_t, z4,
+ svminnm_f16_x4 (z4, z23),
+ svminnm (z4, z23))
+
+/*
+** minnm_single_z24_z24_z0:
+** fminnm {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z0, svfloat16x4_t, svfloat16_t, z24,
+ svminnm_single_f16_x4 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** fminnm {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z28_z0, svfloat16x4_t, svfloat16_t, z24,
+ svminnm_single_f16_x4 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z1_z0, svfloat16x4_t, svfloat16_t, z24,
+ svminnm_single_f16_x4 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z1_z24_z0:
+** fminnm {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z24_z0, svfloat16x4_t, svfloat16_t, z1,
+ svminnm_single_f16_x4 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z1_z0, svfloat16x4_t, svfloat16_t, z1,
+ svminnm_single_f16_x4 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z18_z18_z0, svfloat16x4_t, svfloat16_t, z18,
+ svminnm_single_f16_x4 (z18, z0),
+ svminnm (z18, z0))
+
+/*
+** minnm_single_awkward:
+** ...
+** fminnm ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (minnm_single_awkward, svfloat16x4_t, svfloat16_t,
+ z0_res = svminnm_single_f16_x4 (z1, z0),
+ z0_res = svminnm (z1, z0))
+
+/*
+** minnm_single_z0_z0_z15:
+** ...
+** fminnm {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (minnm_single_z0_z0_z15, svfloat16x4_t, svfloat16_t,
+ z0 = svminnm_single_f16_x4 (z0, z15),
+ z0 = svminnm (z0, z15))
+
+/*
+** minnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fminnm {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z16, svfloat16x4_t, svfloat16_t, z24,
+ svminnm_single_f16_x4 (z24, z16),
+ svminnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f32_x2.c
new file mode 100644
index 0000000..19149fe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f32_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** minnm_z0_z0_z4:
+** fminnm {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (minnm_z0_z0_z4, svfloat32x2_t, z0,
+ svminnm_f32_x2 (z0, z4),
+ svminnm (z0, z4))
+
+/*
+** minnm_z0_z4_z0:
+** fminnm {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (minnm_z0_z4_z0, svfloat32x2_t, z0,
+ svminnm_f32_x2 (z4, z0),
+ svminnm (z4, z0))
+
+/*
+** minnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z28\.s - z29\.s}
+** |
+** fminnm [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z0_z4_z28, svfloat32x2_t, z0,
+ svminnm_f32_x2 (z4, z28),
+ svminnm (z4, z28))
+
+/*
+** minnm_z18_z18_z4:
+** fminnm {z18\.s - z19\.s}, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (minnm_z18_z18_z4, svfloat32x2_t, z18,
+ svminnm_f32_x2 (z18, z4),
+ svminnm (z18, z4))
+
+/*
+** minnm_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (minnm_z23_z23_z18, svfloat32x2_t, z23,
+ svminnm_f32_x2 (z23, z18),
+ svminnm (z23, z18))
+
+/*
+** minnm_z28_z28_z0:
+** fminnm {z28\.s - z29\.s}, {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (minnm_z28_z28_z0, svfloat32x2_t, z28,
+ svminnm_f32_x2 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_z0_z0_z18:
+** fminnm {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (minnm_z0_z0_z18, svfloat32x2_t, z0,
+ svminnm_f32_x2 (z0, z18),
+ svminnm (z0, z18))
+
+/*
+** minnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** |
+** fminnm {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z4_z4_z23, svfloat32x2_t, z4,
+ svminnm_f32_x2 (z4, z23),
+ svminnm (z4, z23))
+
+/*
+** minnm_single_z24_z24_z0:
+** fminnm {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z0, svfloat32x2_t, svfloat32_t, z24,
+ svminnm_single_f32_x2 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** fminnm {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z28_z0, svfloat32x2_t, svfloat32_t, z24,
+ svminnm_single_f32_x2 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fminnm {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z1_z0, svfloat32x2_t, svfloat32_t, z24,
+ svminnm_single_f32_x2 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z1_z24_z0:
+** fminnm {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z24_z0, svfloat32x2_t, svfloat32_t, z1,
+ svminnm_single_f32_x2 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fminnm ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z1_z0, svfloat32x2_t, svfloat32_t, z1,
+ svminnm_single_f32_x2 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z18_z18_z0:
+** fminnm {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z18_z18_z0, svfloat32x2_t, svfloat32_t, z18,
+ svminnm_single_f32_x2 (z18, z0),
+ svminnm (z18, z0))
+
+/*
+** minnm_single_awkward:
+** ...
+** fminnm ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (minnm_single_awkward, svfloat32x2_t, svfloat32_t,
+ z0_res = svminnm_single_f32_x2 (z1, z0),
+ z0_res = svminnm (z1, z0))
+
+/*
+** minnm_single_z0_z0_z15:
+** ...
+** fminnm {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (minnm_single_z0_z0_z15, svfloat32x2_t, svfloat32_t,
+ z0 = svminnm_single_f32_x2 (z0, z15),
+ z0 = svminnm (z0, z15))
+
+/*
+** minnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fminnm {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z16, svfloat32x2_t, svfloat32_t, z24,
+ svminnm_single_f32_x2 (z24, z16),
+ svminnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f32_x4.c
new file mode 100644
index 0000000..aa1079a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f32_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** minnm_z0_z0_z4:
+** fminnm {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (minnm_z0_z0_z4, svfloat32x4_t, z0,
+ svminnm_f32_x4 (z0, z4),
+ svminnm (z0, z4))
+
+/*
+** minnm_z0_z4_z0:
+** fminnm {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (minnm_z0_z4_z0, svfloat32x4_t, z0,
+ svminnm_f32_x4 (z4, z0),
+ svminnm (z4, z0))
+
+/*
+** minnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z28\.s - z31\.s}
+** |
+** fminnm [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z0_z4_z28, svfloat32x4_t, z0,
+ svminnm_f32_x4 (z4, z28),
+ svminnm (z4, z28))
+
+/*
+** minnm_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (minnm_z18_z18_z4, svfloat32x4_t, z18,
+ svminnm_f32_x4 (z18, z4),
+ svminnm (z18, z4))
+
+/*
+** minnm_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (minnm_z23_z23_z28, svfloat32x4_t, z23,
+ svminnm_f32_x4 (z23, z28),
+ svminnm (z23, z28))
+
+/*
+** minnm_z28_z28_z0:
+** fminnm {z28\.s - z31\.s}, {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (minnm_z28_z28_z0, svfloat32x4_t, z28,
+ svminnm_f32_x4 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** |
+** fminnm {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z0_z0_z18, svfloat32x4_t, z0,
+ svminnm_f32_x4 (z0, z18),
+ svminnm (z0, z18))
+
+/*
+** minnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** |
+** fminnm {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z4_z4_z23, svfloat32x4_t, z4,
+ svminnm_f32_x4 (z4, z23),
+ svminnm (z4, z23))
+
+/*
+** minnm_single_z24_z24_z0:
+** fminnm {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z0, svfloat32x4_t, svfloat32_t, z24,
+ svminnm_single_f32_x4 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** fminnm {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z28_z0, svfloat32x4_t, svfloat32_t, z24,
+ svminnm_single_f32_x4 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z1_z0, svfloat32x4_t, svfloat32_t, z24,
+ svminnm_single_f32_x4 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z1_z24_z0:
+** fminnm {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z24_z0, svfloat32x4_t, svfloat32_t, z1,
+ svminnm_single_f32_x4 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z1_z0, svfloat32x4_t, svfloat32_t, z1,
+ svminnm_single_f32_x4 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z18_z18_z0, svfloat32x4_t, svfloat32_t, z18,
+ svminnm_single_f32_x4 (z18, z0),
+ svminnm (z18, z0))
+
+/*
+** minnm_single_awkward:
+** ...
+** fminnm ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (minnm_single_awkward, svfloat32x4_t, svfloat32_t,
+ z0_res = svminnm_single_f32_x4 (z1, z0),
+ z0_res = svminnm (z1, z0))
+
+/*
+** minnm_single_z0_z0_z15:
+** ...
+** fminnm {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (minnm_single_z0_z0_z15, svfloat32x4_t, svfloat32_t,
+ z0 = svminnm_single_f32_x4 (z0, z15),
+ z0 = svminnm (z0, z15))
+
+/*
+** minnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fminnm {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z16, svfloat32x4_t, svfloat32_t, z24,
+ svminnm_single_f32_x4 (z24, z16),
+ svminnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f64_x2.c
new file mode 100644
index 0000000..b2fe2f0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f64_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** minnm_z0_z0_z4:
+** fminnm {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (minnm_z0_z0_z4, svfloat64x2_t, z0,
+ svminnm_f64_x2 (z0, z4),
+ svminnm (z0, z4))
+
+/*
+** minnm_z0_z4_z0:
+** fminnm {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (minnm_z0_z4_z0, svfloat64x2_t, z0,
+ svminnm_f64_x2 (z4, z0),
+ svminnm (z4, z0))
+
+/*
+** minnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z28\.d - z29\.d}
+** |
+** fminnm [^\n]+, {z28\.d - z29\.d}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z0_z4_z28, svfloat64x2_t, z0,
+ svminnm_f64_x2 (z4, z28),
+ svminnm (z4, z28))
+
+/*
+** minnm_z18_z18_z4:
+** fminnm {z18\.d - z19\.d}, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (minnm_z18_z18_z4, svfloat64x2_t, z18,
+ svminnm_f64_x2 (z18, z4),
+ svminnm (z18, z4))
+
+/*
+** minnm_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (minnm_z23_z23_z18, svfloat64x2_t, z23,
+ svminnm_f64_x2 (z23, z18),
+ svminnm (z23, z18))
+
+/*
+** minnm_z28_z28_z0:
+** fminnm {z28\.d - z29\.d}, {z28\.d - z29\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (minnm_z28_z28_z0, svfloat64x2_t, z28,
+ svminnm_f64_x2 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_z0_z0_z18:
+** fminnm {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_XN (minnm_z0_z0_z18, svfloat64x2_t, z0,
+ svminnm_f64_x2 (z0, z18),
+ svminnm (z0, z18))
+
+/*
+** minnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** |
+** fminnm {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z4_z4_z23, svfloat64x2_t, z4,
+ svminnm_f64_x2 (z4, z23),
+ svminnm (z4, z23))
+
+/*
+** minnm_single_z24_z24_z0:
+** fminnm {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z0, svfloat64x2_t, svfloat64_t, z24,
+ svminnm_single_f64_x2 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** fminnm {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z28_z0, svfloat64x2_t, svfloat64_t, z24,
+ svminnm_single_f64_x2 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** fminnm {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z1_z0, svfloat64x2_t, svfloat64_t, z24,
+ svminnm_single_f64_x2 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z1_z24_z0:
+** fminnm {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z24_z0, svfloat64x2_t, svfloat64_t, z1,
+ svminnm_single_f64_x2 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** fminnm ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z1_z0, svfloat64x2_t, svfloat64_t, z1,
+ svminnm_single_f64_x2 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z18_z18_z0:
+** fminnm {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z18_z18_z0, svfloat64x2_t, svfloat64_t, z18,
+ svminnm_single_f64_x2 (z18, z0),
+ svminnm (z18, z0))
+
+/*
+** minnm_single_awkward:
+** ...
+** fminnm ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (minnm_single_awkward, svfloat64x2_t, svfloat64_t,
+ z0_res = svminnm_single_f64_x2 (z1, z0),
+ z0_res = svminnm (z1, z0))
+
+/*
+** minnm_single_z0_z0_z15:
+** ...
+** fminnm {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (minnm_single_z0_z0_z15, svfloat64x2_t, svfloat64_t,
+ z0 = svminnm_single_f64_x2 (z0, z15),
+ z0 = svminnm (z0, z15))
+
+/*
+** minnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fminnm {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z16, svfloat64x2_t, svfloat64_t, z24,
+ svminnm_single_f64_x2 (z24, z16),
+ svminnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f64_x4.c
new file mode 100644
index 0000000..22e659c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/minnm_f64_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** minnm_z0_z0_z4:
+** fminnm {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (minnm_z0_z0_z4, svfloat64x4_t, z0,
+ svminnm_f64_x4 (z0, z4),
+ svminnm (z0, z4))
+
+/*
+** minnm_z0_z4_z0:
+** fminnm {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (minnm_z0_z4_z0, svfloat64x4_t, z0,
+ svminnm_f64_x4 (z4, z0),
+ svminnm (z4, z0))
+
+/*
+** minnm_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z28\.d - z31\.d}
+** |
+** fminnm [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z0_z4_z28, svfloat64x4_t, z0,
+ svminnm_f64_x4 (z4, z28),
+ svminnm (z4, z28))
+
+/*
+** minnm_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (minnm_z18_z18_z4, svfloat64x4_t, z18,
+ svminnm_f64_x4 (z18, z4),
+ svminnm (z18, z4))
+
+/*
+** minnm_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (minnm_z23_z23_z28, svfloat64x4_t, z23,
+ svminnm_f64_x4 (z23, z28),
+ svminnm (z23, z28))
+
+/*
+** minnm_z28_z28_z0:
+** fminnm {z28\.d - z31\.d}, {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (minnm_z28_z28_z0, svfloat64x4_t, z28,
+ svminnm_f64_x4 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** |
+** fminnm {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z0_z0_z18, svfloat64x4_t, z0,
+ svminnm_f64_x4 (z0, z18),
+ svminnm (z0, z18))
+
+/*
+** minnm_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** |
+** fminnm {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (minnm_z4_z4_z23, svfloat64x4_t, z4,
+ svminnm_f64_x4 (z4, z23),
+ svminnm (z4, z23))
+
+/*
+** minnm_single_z24_z24_z0:
+** fminnm {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z0, svfloat64x4_t, svfloat64_t, z24,
+ svminnm_single_f64_x4 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** fminnm {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z28_z0, svfloat64x4_t, svfloat64_t, z24,
+ svminnm_single_f64_x4 (z28, z0),
+ svminnm (z28, z0))
+
+/*
+** minnm_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z1_z0, svfloat64x4_t, svfloat64_t, z24,
+ svminnm_single_f64_x4 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z1_z24_z0:
+** fminnm {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z24_z0, svfloat64x4_t, svfloat64_t, z1,
+ svminnm_single_f64_x4 (z24, z0),
+ svminnm (z24, z0))
+
+/*
+** minnm_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z1_z1_z0, svfloat64x4_t, svfloat64_t, z1,
+ svminnm_single_f64_x4 (z1, z0),
+ svminnm (z1, z0))
+
+/*
+** minnm_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fminnm [^\n]+, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z18_z18_z0, svfloat64x4_t, svfloat64_t, z18,
+ svminnm_single_f64_x4 (z18, z0),
+ svminnm (z18, z0))
+
+/*
+** minnm_single_awkward:
+** ...
+** fminnm ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (minnm_single_awkward, svfloat64x4_t, svfloat64_t,
+ z0_res = svminnm_single_f64_x4 (z1, z0),
+ z0_res = svminnm (z1, z0))
+
+/*
+** minnm_single_z0_z0_z15:
+** ...
+** fminnm {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (minnm_single_z0_z0_z15, svfloat64x4_t, svfloat64_t,
+ z0 = svminnm_single_f64_x4 (z0, z15),
+ z0 = svminnm (z0, z15))
+
+/*
+** minnm_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** fminnm {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (minnm_single_z24_z24_z16, svfloat64x4_t, svfloat64_t, z24,
+ svminnm_single_f64_x4 (z24, z16),
+ svminnm (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x1.c
new file mode 100644
index 0000000..c61f638
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlal za\.s\[\1, 0:1\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_0_z0_z0_0, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (0, z0, z0, 0),
+ svmla_lane_za32_vg2x1 (0, z0, z0, 0))
+
+/*
+** mla_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** bfmlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w0_z0_z3_1, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w0, z0, z3, 1),
+ svmla_lane_za32_vg2x1 (w0, z0, z3, 1))
+
+/*
+** mla_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** bfmlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w7_z0_z3_2, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w7, z0, z3, 2),
+ svmla_lane_za32_vg2x1 (w7, z0, z3, 2))
+
+/*
+** mla_lane_w8_z7_z3_3:
+** bfmlal za\.s\[w8, 0:1\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z7_z3_3, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w8, z7, z3, 3),
+ svmla_lane_za32_vg2x1 (w8, z7, z3, 3))
+
+/*
+** mla_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** bfmlal za\.s\[w8, 0:1\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z31_z16_4, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w8, z31, z16, 4),
+ svmla_lane_za32_vg2x1 (w8, z31, z16, 4))
+
+/*
+** mla_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlal za\.s\[\1, 0:1\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p1_z0_z0_5, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w8 + 1, z0, z0, 5),
+ svmla_lane_za32_vg2x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mla_lane_w8p2_z23_z0_6:
+** bfmlal za\.s\[w8, 2:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p2_z23_z0_6, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w8 + 2, z23, z0, 6),
+ svmla_lane_za32_vg2x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mla_lane_w11p6_z23_z0_7:
+** bfmlal za\.s\[w11, 6:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p6_z23_z0_7, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w11 + 6, z23, z0, 7),
+ svmla_lane_za32_vg2x1 (w11 + 6, z23, z0, 7))
+
+/*
+** mla_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p7_z7_z7_0, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w8 + 7, z7, z7, 0),
+ svmla_lane_za32_vg2x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mla_lane_w11p10_z23_z0_1:
+** bfmlal za\.s\[w11, 10:11\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p10_z23_z0_1, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w11 + 10, z23, z0, 1),
+ svmla_lane_za32_vg2x1 (w11 + 10, z23, z0, 1))
+
+/*
+** mla_lane_w8p14_z23_z0_2:
+** bfmlal za\.s\[w8, 14:15\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p14_z23_z0_2, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w8 + 14, z23, z0, 2),
+ svmla_lane_za32_vg2x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mla_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** bfmlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p15_z7_z7_3, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w8 + 15, z7, z7, 3),
+ svmla_lane_za32_vg2x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mla_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** bfmlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p16_z7_z7_4, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w8 + 16, z7, z7, 4),
+ svmla_lane_za32_vg2x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mla_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** bfmlal za\.s\[\1, 0:1\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8m1_z16_z0_5, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w8 - 1, z16, z0, 5),
+ svmla_lane_za32_vg2x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mla_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** bfmlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w12_z0_z3_6, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x1 (w12, z0, z3, 6),
+ svmla_lane_za32_vg2x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x2.c
new file mode 100644
index 0000000..357ef6c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svbfloat16x2_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x2 (0, z0, z4, 0),
+ svmla_lane_za32_vg2x2 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svbfloat16x2_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x2 (w0, z0, z7, 1),
+ svmla_lane_za32_vg2x2 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svbfloat16x2_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x2 (w8, z28, z4, 2),
+ svmla_lane_za32_vg2x2 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w8p6_z0_z4_7:
+** bfmlal za\.s\[w8, 6:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_7, svbfloat16x2_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x2 (w8 + 6, z0, z4, 7),
+ svmla_lane_za32_vg2x2 (w8 + 6, z0, z4, 7))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svbfloat16x2_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x2 (w8 + 7, z0, z4, 3),
+ svmla_lane_za32_vg2x2 (w8 + 7, z0, z4, 3))
+
+/*
+** mla_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_4, svbfloat16x2_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x2 (w8 + 8, z0, z4, 4),
+ svmla_lane_za32_vg2x2 (w8 + 8, z0, z4, 4))
+
+/*
+** mla_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_5, svbfloat16x2_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x2 (w0 - 1, z0, z4, 5),
+ svmla_lane_za32_vg2x2 (w0 - 1, z0, z4, 5))
+
+/*
+** mla_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_6, svbfloat16x2_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x2 (w8, z4, z15, 6),
+ svmla_lane_za32_vg2x2 (w8, z4, z15, 6))
+
+/*
+** mla_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_7, svbfloat16x2_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x2 (w8, z28, z16, 7),
+ svmla_lane_za32_vg2x2 (w8, z28, z16, 7))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** bfmlal za\.s\[w8, 0:1, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svbfloat16x2_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x2 (w8, z17, z7, 0),
+ svmla_lane_za32_vg2x2 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svbfloat16x2_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x2 (w8, z22, z4, 1),
+ svmla_lane_za32_vg2x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x4.c
new file mode 100644
index 0000000..9a06ff4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_bf16_vg2x4.c
@@ -0,0 +1,118 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svbfloat16x4_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x4 (0, z0, z4, 0),
+ svmla_lane_za32_vg2x4 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svbfloat16x4_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x4 (w0, z0, z7, 1),
+ svmla_lane_za32_vg2x4 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** bfmlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svbfloat16x4_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x4 (w8, z28, z4, 2),
+ svmla_lane_za32_vg2x4 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w8p6_z0_z4_7:
+** bfmlal za\.s\[w8, 6:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_7, svbfloat16x4_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x4 (w8 + 6, z0, z4, 7),
+ svmla_lane_za32_vg2x4 (w8 + 6, z0, z4, 7))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svbfloat16x4_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x4 (w8 + 7, z0, z4, 3),
+ svmla_lane_za32_vg2x4 (w8 + 7, z0, z4, 3))
+
+/*
+** mla_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_4, svbfloat16x4_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x4 (w8 + 8, z0, z4, 4),
+ svmla_lane_za32_vg2x4 (w8 + 8, z0, z4, 4))
+
+/*
+** mla_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_5, svbfloat16x4_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x4 (w0 - 1, z0, z4, 5),
+ svmla_lane_za32_vg2x4 (w0 - 1, z0, z4, 5))
+
+/*
+** mla_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** bfmlal za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_6, svbfloat16x4_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x4 (w8, z4, z15, 6),
+ svmla_lane_za32_vg2x4 (w8, z4, z15, 6))
+
+/*
+** mla_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** bfmlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_7, svbfloat16x4_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x4 (w8, z28, z16, 7),
+ svmla_lane_za32_vg2x4 (w8, z28, z16, 7))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** bfmlal za\.s\[w8, 0:1, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svbfloat16x4_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x4 (w8, z17, z7, 0),
+ svmla_lane_za32_vg2x4 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** bfmlal za\.s\[w8, 0:1, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svbfloat16x4_t, svbfloat16_t,
+ svmla_lane_za32_bf16_vg2x4 (w8, z22, z4, 1),
+ svmla_lane_za32_vg2x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x1.c
new file mode 100644
index 0000000..8c2214b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** fmlal za\.s\[\1, 0:1\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_0_z0_z0_0, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (0, z0, z0, 0),
+ svmla_lane_za32_vg2x1 (0, z0, z0, 0))
+
+/*
+** mla_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** fmlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w0_z0_z3_1, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w0, z0, z3, 1),
+ svmla_lane_za32_vg2x1 (w0, z0, z3, 1))
+
+/*
+** mla_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** fmlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w7_z0_z3_2, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w7, z0, z3, 2),
+ svmla_lane_za32_vg2x1 (w7, z0, z3, 2))
+
+/*
+** mla_lane_w8_z7_z3_3:
+** fmlal za\.s\[w8, 0:1\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z7_z3_3, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w8, z7, z3, 3),
+ svmla_lane_za32_vg2x1 (w8, z7, z3, 3))
+
+/*
+** mla_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** fmlal za\.s\[w8, 0:1\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z31_z16_4, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w8, z31, z16, 4),
+ svmla_lane_za32_vg2x1 (w8, z31, z16, 4))
+
+/*
+** mla_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlal za\.s\[\1, 0:1\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p1_z0_z0_5, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w8 + 1, z0, z0, 5),
+ svmla_lane_za32_vg2x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mla_lane_w8p2_z23_z0_6:
+** fmlal za\.s\[w8, 2:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p2_z23_z0_6, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w8 + 2, z23, z0, 6),
+ svmla_lane_za32_vg2x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mla_lane_w11p6_z23_z0_7:
+** fmlal za\.s\[w11, 6:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p6_z23_z0_7, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w11 + 6, z23, z0, 7),
+ svmla_lane_za32_vg2x1 (w11 + 6, z23, z0, 7))
+
+/*
+** mla_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p7_z7_z7_0, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w8 + 7, z7, z7, 0),
+ svmla_lane_za32_vg2x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mla_lane_w11p10_z23_z0_1:
+** fmlal za\.s\[w11, 10:11\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p10_z23_z0_1, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w11 + 10, z23, z0, 1),
+ svmla_lane_za32_vg2x1 (w11 + 10, z23, z0, 1))
+
+/*
+** mla_lane_w8p14_z23_z0_2:
+** fmlal za\.s\[w8, 14:15\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p14_z23_z0_2, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w8 + 14, z23, z0, 2),
+ svmla_lane_za32_vg2x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mla_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** fmlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p15_z7_z7_3, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w8 + 15, z7, z7, 3),
+ svmla_lane_za32_vg2x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mla_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** fmlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p16_z7_z7_4, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w8 + 16, z7, z7, 4),
+ svmla_lane_za32_vg2x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mla_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmlal za\.s\[\1, 0:1\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8m1_z16_z0_5, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w8 - 1, z16, z0, 5),
+ svmla_lane_za32_vg2x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mla_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** fmlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w12_z0_z3_6, svfloat16_t,
+ svmla_lane_za32_f16_vg2x1 (w12, z0, z3, 6),
+ svmla_lane_za32_vg2x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x2.c
new file mode 100644
index 0000000..6c77355
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svfloat16x2_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x2 (0, z0, z4, 0),
+ svmla_lane_za32_vg2x2 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svfloat16x2_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x2 (w0, z0, z7, 1),
+ svmla_lane_za32_vg2x2 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** fmlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svfloat16x2_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x2 (w8, z28, z4, 2),
+ svmla_lane_za32_vg2x2 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w8p6_z0_z4_7:
+** fmlal za\.s\[w8, 6:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_7, svfloat16x2_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x2 (w8 + 6, z0, z4, 7),
+ svmla_lane_za32_vg2x2 (w8 + 6, z0, z4, 7))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svfloat16x2_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x2 (w8 + 7, z0, z4, 3),
+ svmla_lane_za32_vg2x2 (w8 + 7, z0, z4, 3))
+
+/*
+** mla_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_4, svfloat16x2_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x2 (w8 + 8, z0, z4, 4),
+ svmla_lane_za32_vg2x2 (w8 + 8, z0, z4, 4))
+
+/*
+** mla_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_5, svfloat16x2_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x2 (w0 - 1, z0, z4, 5),
+ svmla_lane_za32_vg2x2 (w0 - 1, z0, z4, 5))
+
+/*
+** mla_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** fmlal za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_6, svfloat16x2_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x2 (w8, z4, z15, 6),
+ svmla_lane_za32_vg2x2 (w8, z4, z15, 6))
+
+/*
+** mla_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** fmlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_7, svfloat16x2_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x2 (w8, z28, z16, 7),
+ svmla_lane_za32_vg2x2 (w8, z28, z16, 7))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** fmlal za\.s\[w8, 0:1, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svfloat16x2_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x2 (w8, z17, z7, 0),
+ svmla_lane_za32_vg2x2 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** fmlal za\.s\[w8, 0:1, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svfloat16x2_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x2 (w8, z22, z4, 1),
+ svmla_lane_za32_vg2x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x4.c
new file mode 100644
index 0000000..4c9f545
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f16_vg2x4.c
@@ -0,0 +1,118 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svfloat16x4_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x4 (0, z0, z4, 0),
+ svmla_lane_za32_vg2x4 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svfloat16x4_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x4 (w0, z0, z7, 1),
+ svmla_lane_za32_vg2x4 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** fmlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svfloat16x4_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x4 (w8, z28, z4, 2),
+ svmla_lane_za32_vg2x4 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w8p6_z0_z4_7:
+** fmlal za\.s\[w8, 6:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_7, svfloat16x4_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x4 (w8 + 6, z0, z4, 7),
+ svmla_lane_za32_vg2x4 (w8 + 6, z0, z4, 7))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svfloat16x4_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x4 (w8 + 7, z0, z4, 3),
+ svmla_lane_za32_vg2x4 (w8 + 7, z0, z4, 3))
+
+/*
+** mla_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_4, svfloat16x4_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x4 (w8 + 8, z0, z4, 4),
+ svmla_lane_za32_vg2x4 (w8 + 8, z0, z4, 4))
+
+/*
+** mla_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_5, svfloat16x4_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x4 (w0 - 1, z0, z4, 5),
+ svmla_lane_za32_vg2x4 (w0 - 1, z0, z4, 5))
+
+/*
+** mla_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** fmlal za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_6, svfloat16x4_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x4 (w8, z4, z15, 6),
+ svmla_lane_za32_vg2x4 (w8, z4, z15, 6))
+
+/*
+** mla_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** fmlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_7, svfloat16x4_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x4 (w8, z28, z16, 7),
+ svmla_lane_za32_vg2x4 (w8, z28, z16, 7))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmlal za\.s\[w8, 0:1, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svfloat16x4_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x4 (w8, z17, z7, 0),
+ svmla_lane_za32_vg2x4 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmlal za\.s\[w8, 0:1, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svfloat16x4_t, svfloat16_t,
+ svmla_lane_za32_f16_vg2x4 (w8, z22, z4, 1),
+ svmla_lane_za32_vg2x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f32_vg1x2.c
new file mode 100644
index 0000000..e99f36e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f32_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, z4\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svfloat32x2_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x2 (0, z0, z4, 0),
+ svmla_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, z7\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svfloat32x2_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x2 (w0, z0, z7, 1),
+ svmla_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** fmla za\.s\[w8, 0, vgx2\], {z28\.s - z29\.s}, z4\.s\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svfloat32x2_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x2 (w8, z28, z4, 2),
+ svmla_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** fmla za\.s\[w8, 7, vgx2\], {z0\.s - z1\.s}, z4\.s\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svfloat32x2_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x2 (w8 + 7, z0, z4, 3),
+ svmla_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** mla_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, z4\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_0, svfloat32x2_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x2 (w8 + 8, z0, z4, 0),
+ svmla_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** mla_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmla za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, z4\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_1, svfloat32x2_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x2 (w0 - 1, z0, z4, 1),
+ svmla_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** mla_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** fmla za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, z15\.s\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_2, svfloat32x2_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x2 (w8, z4, z15, 2),
+ svmla_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** mla_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** fmla za\.s\[w8, 0, vgx2\], {z28\.s - z29\.s}, \1\.s\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_3, svfloat32x2_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x2 (w8, z28, z16, 3),
+ svmla_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** fmla za\.s\[w8, 0, vgx2\], [^\n]+, z7\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svfloat32x2_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x2 (w8, z17, z7, 0),
+ svmla_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** fmla za\.s\[w8, 0, vgx2\], {z22\.s - z23\.s}, z4\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svfloat32x2_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x2 (w8, z22, z4, 1),
+ svmla_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f32_vg1x4.c
new file mode 100644
index 0000000..86dd661
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_f32_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, z4\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svfloat32x4_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x4 (0, z0, z4, 0),
+ svmla_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, z7\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svfloat32x4_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x4 (w0, z0, z7, 1),
+ svmla_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** fmla za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}, z4\.s\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svfloat32x4_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x4 (w8, z28, z4, 2),
+ svmla_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** fmla za\.s\[w8, 7, vgx4\], {z0\.s - z3\.s}, z4\.s\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svfloat32x4_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x4 (w8 + 7, z0, z4, 3),
+ svmla_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** mla_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, z4\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_0, svfloat32x4_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x4 (w8 + 8, z0, z4, 0),
+ svmla_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** mla_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmla za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, z4\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_1, svfloat32x4_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x4 (w0 - 1, z0, z4, 1),
+ svmla_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** mla_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** fmla za\.s\[w8, 0, vgx4\], {z4\.s - z7\.s}, z15\.s\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_2, svfloat32x4_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x4 (w8, z4, z15, 2),
+ svmla_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** mla_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** fmla za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}, \1\.s\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_3, svfloat32x4_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x4 (w8, z28, z16, 3),
+ svmla_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmla za\.s\[w8, 0, vgx4\], [^\n]+, z7\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svfloat32x4_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x4 (w8, z17, z7, 0),
+ svmla_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmla za\.s\[w8, 0, vgx4\], [^\n]+, z4\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svfloat32x4_t, svfloat32_t,
+ svmla_lane_za32_f32_vg1x4 (w8, z22, z4, 1),
+ svmla_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x1.c
new file mode 100644
index 0000000..e21f6f2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** smlal za\.s\[\1, 0:1\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_0_z0_z0_0, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (0, z0, z0, 0),
+ svmla_lane_za32_vg2x1 (0, z0, z0, 0))
+
+/*
+** mla_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** smlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w0_z0_z3_1, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w0, z0, z3, 1),
+ svmla_lane_za32_vg2x1 (w0, z0, z3, 1))
+
+/*
+** mla_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** smlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w7_z0_z3_2, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w7, z0, z3, 2),
+ svmla_lane_za32_vg2x1 (w7, z0, z3, 2))
+
+/*
+** mla_lane_w8_z7_z3_3:
+** smlal za\.s\[w8, 0:1\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z7_z3_3, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w8, z7, z3, 3),
+ svmla_lane_za32_vg2x1 (w8, z7, z3, 3))
+
+/*
+** mla_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** smlal za\.s\[w8, 0:1\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z31_z16_4, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w8, z31, z16, 4),
+ svmla_lane_za32_vg2x1 (w8, z31, z16, 4))
+
+/*
+** mla_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** smlal za\.s\[\1, 0:1\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p1_z0_z0_5, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w8 + 1, z0, z0, 5),
+ svmla_lane_za32_vg2x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mla_lane_w8p2_z23_z0_6:
+** smlal za\.s\[w8, 2:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p2_z23_z0_6, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w8 + 2, z23, z0, 6),
+ svmla_lane_za32_vg2x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mla_lane_w11p6_z23_z0_7:
+** smlal za\.s\[w11, 6:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p6_z23_z0_7, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w11 + 6, z23, z0, 7),
+ svmla_lane_za32_vg2x1 (w11 + 6, z23, z0, 7))
+
+/*
+** mla_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p7_z7_z7_0, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w8 + 7, z7, z7, 0),
+ svmla_lane_za32_vg2x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mla_lane_w11p10_z23_z0_1:
+** smlal za\.s\[w11, 10:11\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p10_z23_z0_1, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w11 + 10, z23, z0, 1),
+ svmla_lane_za32_vg2x1 (w11 + 10, z23, z0, 1))
+
+/*
+** mla_lane_w8p14_z23_z0_2:
+** smlal za\.s\[w8, 14:15\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p14_z23_z0_2, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w8 + 14, z23, z0, 2),
+ svmla_lane_za32_vg2x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mla_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** smlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p15_z7_z7_3, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w8 + 15, z7, z7, 3),
+ svmla_lane_za32_vg2x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mla_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** smlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p16_z7_z7_4, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w8 + 16, z7, z7, 4),
+ svmla_lane_za32_vg2x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mla_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlal za\.s\[\1, 0:1\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8m1_z16_z0_5, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w8 - 1, z16, z0, 5),
+ svmla_lane_za32_vg2x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mla_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** smlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w12_z0_z3_6, svint16_t,
+ svmla_lane_za32_s16_vg2x1 (w12, z0, z3, 6),
+ svmla_lane_za32_vg2x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x2.c
new file mode 100644
index 0000000..f679871
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svint16x2_t, svint16_t,
+ svmla_lane_za32_s16_vg2x2 (0, z0, z4, 0),
+ svmla_lane_za32_vg2x2 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svint16x2_t, svint16_t,
+ svmla_lane_za32_s16_vg2x2 (w0, z0, z7, 1),
+ svmla_lane_za32_vg2x2 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** smlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svint16x2_t, svint16_t,
+ svmla_lane_za32_s16_vg2x2 (w8, z28, z4, 2),
+ svmla_lane_za32_vg2x2 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w8p6_z0_z4_7:
+** smlal za\.s\[w8, 6:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_7, svint16x2_t, svint16_t,
+ svmla_lane_za32_s16_vg2x2 (w8 + 6, z0, z4, 7),
+ svmla_lane_za32_vg2x2 (w8 + 6, z0, z4, 7))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** smlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svint16x2_t, svint16_t,
+ svmla_lane_za32_s16_vg2x2 (w8 + 7, z0, z4, 3),
+ svmla_lane_za32_vg2x2 (w8 + 7, z0, z4, 3))
+
+/*
+** mla_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_4, svint16x2_t, svint16_t,
+ svmla_lane_za32_s16_vg2x2 (w8 + 8, z0, z4, 4),
+ svmla_lane_za32_vg2x2 (w8 + 8, z0, z4, 4))
+
+/*
+** mla_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_5, svint16x2_t, svint16_t,
+ svmla_lane_za32_s16_vg2x2 (w0 - 1, z0, z4, 5),
+ svmla_lane_za32_vg2x2 (w0 - 1, z0, z4, 5))
+
+/*
+** mla_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** smlal za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_6, svint16x2_t, svint16_t,
+ svmla_lane_za32_s16_vg2x2 (w8, z4, z15, 6),
+ svmla_lane_za32_vg2x2 (w8, z4, z15, 6))
+
+/*
+** mla_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** smlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_7, svint16x2_t, svint16_t,
+ svmla_lane_za32_s16_vg2x2 (w8, z28, z16, 7),
+ svmla_lane_za32_vg2x2 (w8, z28, z16, 7))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** smlal za\.s\[w8, 0:1, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svint16x2_t, svint16_t,
+ svmla_lane_za32_s16_vg2x2 (w8, z17, z7, 0),
+ svmla_lane_za32_vg2x2 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** smlal za\.s\[w8, 0:1, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svint16x2_t, svint16_t,
+ svmla_lane_za32_s16_vg2x2 (w8, z22, z4, 1),
+ svmla_lane_za32_vg2x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x4.c
new file mode 100644
index 0000000..81d87de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s16_vg2x4.c
@@ -0,0 +1,118 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svint16x4_t, svint16_t,
+ svmla_lane_za32_s16_vg2x4 (0, z0, z4, 0),
+ svmla_lane_za32_vg2x4 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svint16x4_t, svint16_t,
+ svmla_lane_za32_s16_vg2x4 (w0, z0, z7, 1),
+ svmla_lane_za32_vg2x4 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** smlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svint16x4_t, svint16_t,
+ svmla_lane_za32_s16_vg2x4 (w8, z28, z4, 2),
+ svmla_lane_za32_vg2x4 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w8p6_z0_z4_7:
+** smlal za\.s\[w8, 6:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_7, svint16x4_t, svint16_t,
+ svmla_lane_za32_s16_vg2x4 (w8 + 6, z0, z4, 7),
+ svmla_lane_za32_vg2x4 (w8 + 6, z0, z4, 7))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** smlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svint16x4_t, svint16_t,
+ svmla_lane_za32_s16_vg2x4 (w8 + 7, z0, z4, 3),
+ svmla_lane_za32_vg2x4 (w8 + 7, z0, z4, 3))
+
+/*
+** mla_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_4, svint16x4_t, svint16_t,
+ svmla_lane_za32_s16_vg2x4 (w8 + 8, z0, z4, 4),
+ svmla_lane_za32_vg2x4 (w8 + 8, z0, z4, 4))
+
+/*
+** mla_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_5, svint16x4_t, svint16_t,
+ svmla_lane_za32_s16_vg2x4 (w0 - 1, z0, z4, 5),
+ svmla_lane_za32_vg2x4 (w0 - 1, z0, z4, 5))
+
+/*
+** mla_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** smlal za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_6, svint16x4_t, svint16_t,
+ svmla_lane_za32_s16_vg2x4 (w8, z4, z15, 6),
+ svmla_lane_za32_vg2x4 (w8, z4, z15, 6))
+
+/*
+** mla_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** smlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_7, svint16x4_t, svint16_t,
+ svmla_lane_za32_s16_vg2x4 (w8, z28, z16, 7),
+ svmla_lane_za32_vg2x4 (w8, z28, z16, 7))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlal za\.s\[w8, 0:1, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svint16x4_t, svint16_t,
+ svmla_lane_za32_s16_vg2x4 (w8, z17, z7, 0),
+ svmla_lane_za32_vg2x4 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlal za\.s\[w8, 0:1, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svint16x4_t, svint16_t,
+ svmla_lane_za32_s16_vg2x4 (w8, z22, z4, 1),
+ svmla_lane_za32_vg2x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x1.c
new file mode 100644
index 0000000..7bdd9b6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x1.c
@@ -0,0 +1,150 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.s\[\1, 0:3\], z0\.b, z0\.b\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_0_z0_z0_0, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (0, z0, z0, 0),
+ svmla_lane_za32_vg4x1 (0, z0, z0, 0))
+
+/*
+** mla_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.s\[\1, 0:3\], z0\.b, z3\.b\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w0_z0_z3_1, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w0, z0, z3, 1),
+ svmla_lane_za32_vg4x1 (w0, z0, z3, 1))
+
+/*
+** mla_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** smlall za\.s\[\1, 0:3\], z0\.b, z3\.b\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w7_z0_z3_2, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w7, z0, z3, 2),
+ svmla_lane_za32_vg4x1 (w7, z0, z3, 2))
+
+/*
+** mla_lane_w8_z7_z3_3:
+** smlall za\.s\[w8, 0:3\], z7\.b, z3\.b\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z7_z3_3, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w8, z7, z3, 3),
+ svmla_lane_za32_vg4x1 (w8, z7, z3, 3))
+
+/*
+** mla_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** smlall za\.s\[w8, 0:3\], z31\.b. \1\.b\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z31_z16_4, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w8, z31, z16, 4),
+ svmla_lane_za32_vg4x1 (w8, z31, z16, 4))
+
+/*
+** mla_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.s\[\1, 0:3\], z0\.b, z0\.b\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p1_z0_z0_5, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w8 + 1, z0, z0, 5),
+ svmla_lane_za32_vg4x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mla_lane_w8p2_z23_z0_6:
+** add (w8|w9|w10|w11), w8, #?2
+** smlall za\.s\[\1, 0:3\], z23\.b, z0\.b\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p2_z23_z0_6, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w8 + 2, z23, z0, 6),
+ svmla_lane_za32_vg4x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mla_lane_w11p4_z23_z0_7:
+** smlall za\.s\[w11, 4:7\], z23\.b, z0\.b\[7\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p4_z23_z0_7, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w11 + 4, z23, z0, 7),
+ svmla_lane_za32_vg4x1 (w11 + 4, z23, z0, 7))
+
+/*
+** mla_lane_w8p7_z7_z7_8:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.s\[\1, 0:3\], z7\.b, z7\.b\[8\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p7_z7_z7_8, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w8 + 7, z7, z7, 8),
+ svmla_lane_za32_vg4x1 (w8 + 7, z7, z7, 8))
+
+/*
+** mla_lane_w11p12_z23_z0_9:
+** smlall za\.s\[w11, 12:15\], z23\.b, z0\.b\[9\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p12_z23_z0_9, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w11 + 12, z23, z0, 9),
+ svmla_lane_za32_vg4x1 (w11 + 12, z23, z0, 9))
+
+/*
+** mla_lane_w8p14_z23_z0_10:
+** add (w8|w9|w10|w11), w8, #?14
+** smlall za\.s\[w8, 0:3\], z23\.b, z0\.b\[10\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p14_z23_z0_10, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w8 + 14, z23, z0, 10),
+ svmla_lane_za32_vg4x1 (w8 + 14, z23, z0, 10))
+
+/*
+** mla_lane_w8p15_z7_z7_11:
+** add (w8|w9|w10|w11), w8, #?15
+** smlall za\.s\[\1, 0:3\], z7\.b, z7\.b\[11\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p15_z7_z7_11, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w8 + 15, z7, z7, 11),
+ svmla_lane_za32_vg4x1 (w8 + 15, z7, z7, 11))
+
+/*
+** mla_lane_w8p16_z7_z7_12:
+** add (w8|w9|w10|w11), w8, #?16
+** smlall za\.s\[\1, 0:3\], z7\.b, z7\.b\[12\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p16_z7_z7_12, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w8 + 16, z7, z7, 12),
+ svmla_lane_za32_vg4x1 (w8 + 16, z7, z7, 12))
+
+/*
+** mla_lane_w8m1_z16_z0_13:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlall za\.s\[\1, 0:3\], z16\.b, z0\.b\[13\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8m1_z16_z0_13, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w8 - 1, z16, z0, 13),
+ svmla_lane_za32_vg4x1 (w8 - 1, z16, z0, 13))
+
+/*
+** mla_lane_w12_z0_z3_15:
+** mov (w8|w9|w10|w11), w12
+** smlall za\.s\[\1, 0:3\], z0\.b, z3\.b\[15\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w12_z0_z3_15, svint8_t,
+ svmla_lane_za32_s8_vg4x1 (w12, z0, z3, 15),
+ svmla_lane_za32_vg4x1 (w12, z0, z3, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x2.c
new file mode 100644
index 0000000..e84706c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (0, z0, z4, 0),
+ svmla_lane_za32_vg4x2 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (w0, z0, z7, 1),
+ svmla_lane_za32_vg4x2 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** smlall za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (w8, z28, z4, 2),
+ svmla_lane_za32_vg4x2 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w11p4_z0_z4_3:
+** smlall za\.s\[w11, 4:7, vgx2\], {z0\.b - z1\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w11p4_z0_z4_3, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (w11 + 4, z0, z4, 3),
+ svmla_lane_za32_vg4x2 (w11 + 4, z0, z4, 3))
+
+/*
+** mla_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** smlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_4, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (w8 + 6, z0, z4, 4),
+ svmla_lane_za32_vg4x2 (w8 + 6, z0, z4, 4))
+
+/*
+** mla_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_5, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (w8 + 7, z0, z4, 5),
+ svmla_lane_za32_vg4x2 (w8 + 7, z0, z4, 5))
+
+/*
+** mla_lane_w8p8_z0_z4_7:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_7, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (w8 + 8, z0, z4, 7),
+ svmla_lane_za32_vg4x2 (w8 + 8, z0, z4, 7))
+
+/*
+** mla_lane_w0m1_z0_z4_9:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[9\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_9, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (w0 - 1, z0, z4, 9),
+ svmla_lane_za32_vg4x2 (w0 - 1, z0, z4, 9))
+
+/*
+** mla_lane_w8_z4_z15_10:
+** str d15, \[sp, #?-16\]!
+** smlall za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, z15\.b\[10\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_10, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (w8, z4, z15, 10),
+ svmla_lane_za32_vg4x2 (w8, z4, z15, 10))
+
+/*
+** mla_lane_w8_z28_z16_11:
+** mov (z[0-7]).d, z16.d
+** smlall za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, \1\.b\[11\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_11, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (w8, z28, z16, 11),
+ svmla_lane_za32_vg4x2 (w8, z28, z16, 11))
+
+/*
+** mla_lane_w8_z17_z7_13:
+** mov [^\n]+
+** mov [^\n]+
+** smlall za\.s\[w8, 0:3, vgx2\], [^\n]+, z7\.b\[13\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_13, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (w8, z17, z7, 13),
+ svmla_lane_za32_vg4x2 (w8, z17, z7, 13))
+
+/*
+** mla_lane_w8_z22_z4_15:
+** smlall za\.s\[w8, 0:3, vgx2\], {z22\.b - z23\.b}, z4\.b\[15\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_15, svint8x2_t, svint8_t,
+ svmla_lane_za32_s8_vg4x2 (w8, z22, z4, 15),
+ svmla_lane_za32_vg4x2 (w8, z22, z4, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x4.c
new file mode 100644
index 0000000..dbb64ef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_s8_vg4x4.c
@@ -0,0 +1,128 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (0, z0, z4, 0),
+ svmla_lane_za32_vg4x4 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (w0, z0, z7, 1),
+ svmla_lane_za32_vg4x4 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** smlall za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (w8, z28, z4, 2),
+ svmla_lane_za32_vg4x4 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w11p4_z0_z4_7:
+** smlall za\.s\[w11, 4:7, vgx4\], {z0\.b - z3\.b}, z4\.b\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w11p4_z0_z4_7, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (w11 + 4, z0, z4, 7),
+ svmla_lane_za32_vg4x4 (w11 + 4, z0, z4, 7))
+
+/*
+** mla_lane_w8p6_z0_z4_8:
+** add (w8|w9|w10|w11), w8, #?6
+** smlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[8\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_8, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (w8 + 6, z0, z4, 8),
+ svmla_lane_za32_vg4x4 (w8 + 6, z0, z4, 8))
+
+/*
+** mla_lane_w8p7_z0_z4_9:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[9\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_9, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (w8 + 7, z0, z4, 9),
+ svmla_lane_za32_vg4x4 (w8 + 7, z0, z4, 9))
+
+/*
+** mla_lane_w8p8_z0_z4_10:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[10\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_10, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (w8 + 8, z0, z4, 10),
+ svmla_lane_za32_vg4x4 (w8 + 8, z0, z4, 10))
+
+/*
+** mla_lane_w0m1_z0_z4_11:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[11\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_11, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (w0 - 1, z0, z4, 11),
+ svmla_lane_za32_vg4x4 (w0 - 1, z0, z4, 11))
+
+/*
+** mla_lane_w8_z4_z15_12:
+** str d15, \[sp, #?-16\]!
+** smlall za\.s\[w8, 0:3, vgx4\], {z4\.b - z7\.b}, z15\.b\[12\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_12, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (w8, z4, z15, 12),
+ svmla_lane_za32_vg4x4 (w8, z4, z15, 12))
+
+/*
+** mla_lane_w8_z28_z16_13:
+** mov (z[0-7]).d, z16.d
+** smlall za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, \1\.b\[13\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_13, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (w8, z28, z16, 13),
+ svmla_lane_za32_vg4x4 (w8, z28, z16, 13))
+
+/*
+** mla_lane_w8_z17_z7_14:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlall za\.s\[w8, 0:3, vgx4\], [^\n]+, z7\.b\[14\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_14, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (w8, z17, z7, 14),
+ svmla_lane_za32_vg4x4 (w8, z17, z7, 14))
+
+/*
+** mla_lane_w8_z22_z4_15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlall za\.s\[w8, 0:3, vgx4\], [^\n]+, z4\.b\[15\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_15, svint8x4_t, svint8_t,
+ svmla_lane_za32_s8_vg4x4 (w8, z22, z4, 15),
+ svmla_lane_za32_vg4x4 (w8, z22, z4, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x1.c
new file mode 100644
index 0000000..811272f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** umlal za\.s\[\1, 0:1\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_0_z0_z0_0, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (0, z0, z0, 0),
+ svmla_lane_za32_vg2x1 (0, z0, z0, 0))
+
+/*
+** mla_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** umlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w0_z0_z3_1, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w0, z0, z3, 1),
+ svmla_lane_za32_vg2x1 (w0, z0, z3, 1))
+
+/*
+** mla_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** umlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w7_z0_z3_2, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w7, z0, z3, 2),
+ svmla_lane_za32_vg2x1 (w7, z0, z3, 2))
+
+/*
+** mla_lane_w8_z7_z3_3:
+** umlal za\.s\[w8, 0:1\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z7_z3_3, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w8, z7, z3, 3),
+ svmla_lane_za32_vg2x1 (w8, z7, z3, 3))
+
+/*
+** mla_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** umlal za\.s\[w8, 0:1\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z31_z16_4, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w8, z31, z16, 4),
+ svmla_lane_za32_vg2x1 (w8, z31, z16, 4))
+
+/*
+** mla_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** umlal za\.s\[\1, 0:1\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p1_z0_z0_5, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w8 + 1, z0, z0, 5),
+ svmla_lane_za32_vg2x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mla_lane_w8p2_z23_z0_6:
+** umlal za\.s\[w8, 2:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p2_z23_z0_6, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w8 + 2, z23, z0, 6),
+ svmla_lane_za32_vg2x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mla_lane_w11p6_z23_z0_7:
+** umlal za\.s\[w11, 6:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p6_z23_z0_7, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w11 + 6, z23, z0, 7),
+ svmla_lane_za32_vg2x1 (w11 + 6, z23, z0, 7))
+
+/*
+** mla_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p7_z7_z7_0, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w8 + 7, z7, z7, 0),
+ svmla_lane_za32_vg2x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mla_lane_w11p10_z23_z0_1:
+** umlal za\.s\[w11, 10:11\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p10_z23_z0_1, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w11 + 10, z23, z0, 1),
+ svmla_lane_za32_vg2x1 (w11 + 10, z23, z0, 1))
+
+/*
+** mla_lane_w8p14_z23_z0_2:
+** umlal za\.s\[w8, 14:15\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p14_z23_z0_2, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w8 + 14, z23, z0, 2),
+ svmla_lane_za32_vg2x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mla_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** umlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p15_z7_z7_3, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w8 + 15, z7, z7, 3),
+ svmla_lane_za32_vg2x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mla_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** umlal za\.s\[\1, 0:1\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p16_z7_z7_4, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w8 + 16, z7, z7, 4),
+ svmla_lane_za32_vg2x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mla_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlal za\.s\[\1, 0:1\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8m1_z16_z0_5, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w8 - 1, z16, z0, 5),
+ svmla_lane_za32_vg2x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mla_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** umlal za\.s\[\1, 0:1\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w12_z0_z3_6, svuint16_t,
+ svmla_lane_za32_u16_vg2x1 (w12, z0, z3, 6),
+ svmla_lane_za32_vg2x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x2.c
new file mode 100644
index 0000000..9b0f70bc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svuint16x2_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x2 (0, z0, z4, 0),
+ svmla_lane_za32_vg2x2 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svuint16x2_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x2 (w0, z0, z7, 1),
+ svmla_lane_za32_vg2x2 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** umlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svuint16x2_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x2 (w8, z28, z4, 2),
+ svmla_lane_za32_vg2x2 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w8p6_z0_z4_7:
+** umlal za\.s\[w8, 6:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_7, svuint16x2_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x2 (w8 + 6, z0, z4, 7),
+ svmla_lane_za32_vg2x2 (w8 + 6, z0, z4, 7))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** umlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svuint16x2_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x2 (w8 + 7, z0, z4, 3),
+ svmla_lane_za32_vg2x2 (w8 + 7, z0, z4, 3))
+
+/*
+** mla_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_4, svuint16x2_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x2 (w8 + 8, z0, z4, 4),
+ svmla_lane_za32_vg2x2 (w8 + 8, z0, z4, 4))
+
+/*
+** mla_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_5, svuint16x2_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x2 (w0 - 1, z0, z4, 5),
+ svmla_lane_za32_vg2x2 (w0 - 1, z0, z4, 5))
+
+/*
+** mla_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** umlal za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_6, svuint16x2_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x2 (w8, z4, z15, 6),
+ svmla_lane_za32_vg2x2 (w8, z4, z15, 6))
+
+/*
+** mla_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** umlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_7, svuint16x2_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x2 (w8, z28, z16, 7),
+ svmla_lane_za32_vg2x2 (w8, z28, z16, 7))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** umlal za\.s\[w8, 0:1, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svuint16x2_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x2 (w8, z17, z7, 0),
+ svmla_lane_za32_vg2x2 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** umlal za\.s\[w8, 0:1, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svuint16x2_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x2 (w8, z22, z4, 1),
+ svmla_lane_za32_vg2x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x4.c
new file mode 100644
index 0000000..e8d8472
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u16_vg2x4.c
@@ -0,0 +1,118 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svuint16x4_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x4 (0, z0, z4, 0),
+ svmla_lane_za32_vg2x4 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svuint16x4_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x4 (w0, z0, z7, 1),
+ svmla_lane_za32_vg2x4 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** umlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svuint16x4_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x4 (w8, z28, z4, 2),
+ svmla_lane_za32_vg2x4 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w8p6_z0_z4_7:
+** umlal za\.s\[w8, 6:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_7, svuint16x4_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x4 (w8 + 6, z0, z4, 7),
+ svmla_lane_za32_vg2x4 (w8 + 6, z0, z4, 7))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** umlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svuint16x4_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x4 (w8 + 7, z0, z4, 3),
+ svmla_lane_za32_vg2x4 (w8 + 7, z0, z4, 3))
+
+/*
+** mla_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_4, svuint16x4_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x4 (w8 + 8, z0, z4, 4),
+ svmla_lane_za32_vg2x4 (w8 + 8, z0, z4, 4))
+
+/*
+** mla_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_5, svuint16x4_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x4 (w0 - 1, z0, z4, 5),
+ svmla_lane_za32_vg2x4 (w0 - 1, z0, z4, 5))
+
+/*
+** mla_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** umlal za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_6, svuint16x4_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x4 (w8, z4, z15, 6),
+ svmla_lane_za32_vg2x4 (w8, z4, z15, 6))
+
+/*
+** mla_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** umlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_7, svuint16x4_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x4 (w8, z28, z16, 7),
+ svmla_lane_za32_vg2x4 (w8, z28, z16, 7))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlal za\.s\[w8, 0:1, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svuint16x4_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x4 (w8, z17, z7, 0),
+ svmla_lane_za32_vg2x4 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlal za\.s\[w8, 0:1, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svuint16x4_t, svuint16_t,
+ svmla_lane_za32_u16_vg2x4 (w8, z22, z4, 1),
+ svmla_lane_za32_vg2x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x1.c
new file mode 100644
index 0000000..c4ca534
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x1.c
@@ -0,0 +1,150 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.s\[\1, 0:3\], z0\.b, z0\.b\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_0_z0_z0_0, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (0, z0, z0, 0),
+ svmla_lane_za32_vg4x1 (0, z0, z0, 0))
+
+/*
+** mla_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.s\[\1, 0:3\], z0\.b, z3\.b\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w0_z0_z3_1, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w0, z0, z3, 1),
+ svmla_lane_za32_vg4x1 (w0, z0, z3, 1))
+
+/*
+** mla_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** umlall za\.s\[\1, 0:3\], z0\.b, z3\.b\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w7_z0_z3_2, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w7, z0, z3, 2),
+ svmla_lane_za32_vg4x1 (w7, z0, z3, 2))
+
+/*
+** mla_lane_w8_z7_z3_3:
+** umlall za\.s\[w8, 0:3\], z7\.b, z3\.b\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z7_z3_3, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w8, z7, z3, 3),
+ svmla_lane_za32_vg4x1 (w8, z7, z3, 3))
+
+/*
+** mla_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** umlall za\.s\[w8, 0:3\], z31\.b. \1\.b\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z31_z16_4, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w8, z31, z16, 4),
+ svmla_lane_za32_vg4x1 (w8, z31, z16, 4))
+
+/*
+** mla_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.s\[\1, 0:3\], z0\.b, z0\.b\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p1_z0_z0_5, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w8 + 1, z0, z0, 5),
+ svmla_lane_za32_vg4x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mla_lane_w8p2_z23_z0_6:
+** add (w8|w9|w10|w11), w8, #?2
+** umlall za\.s\[\1, 0:3\], z23\.b, z0\.b\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p2_z23_z0_6, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w8 + 2, z23, z0, 6),
+ svmla_lane_za32_vg4x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mla_lane_w11p4_z23_z0_7:
+** umlall za\.s\[w11, 4:7\], z23\.b, z0\.b\[7\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p4_z23_z0_7, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w11 + 4, z23, z0, 7),
+ svmla_lane_za32_vg4x1 (w11 + 4, z23, z0, 7))
+
+/*
+** mla_lane_w8p7_z7_z7_8:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.s\[\1, 0:3\], z7\.b, z7\.b\[8\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p7_z7_z7_8, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w8 + 7, z7, z7, 8),
+ svmla_lane_za32_vg4x1 (w8 + 7, z7, z7, 8))
+
+/*
+** mla_lane_w11p12_z23_z0_9:
+** umlall za\.s\[w11, 12:15\], z23\.b, z0\.b\[9\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p12_z23_z0_9, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w11 + 12, z23, z0, 9),
+ svmla_lane_za32_vg4x1 (w11 + 12, z23, z0, 9))
+
+/*
+** mla_lane_w8p14_z23_z0_10:
+** add (w8|w9|w10|w11), w8, #?14
+** umlall za\.s\[w8, 0:3\], z23\.b, z0\.b\[10\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p14_z23_z0_10, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w8 + 14, z23, z0, 10),
+ svmla_lane_za32_vg4x1 (w8 + 14, z23, z0, 10))
+
+/*
+** mla_lane_w8p15_z7_z7_11:
+** add (w8|w9|w10|w11), w8, #?15
+** umlall za\.s\[\1, 0:3\], z7\.b, z7\.b\[11\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p15_z7_z7_11, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w8 + 15, z7, z7, 11),
+ svmla_lane_za32_vg4x1 (w8 + 15, z7, z7, 11))
+
+/*
+** mla_lane_w8p16_z7_z7_12:
+** add (w8|w9|w10|w11), w8, #?16
+** umlall za\.s\[\1, 0:3\], z7\.b, z7\.b\[12\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p16_z7_z7_12, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w8 + 16, z7, z7, 12),
+ svmla_lane_za32_vg4x1 (w8 + 16, z7, z7, 12))
+
+/*
+** mla_lane_w8m1_z16_z0_13:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlall za\.s\[\1, 0:3\], z16\.b, z0\.b\[13\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8m1_z16_z0_13, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w8 - 1, z16, z0, 13),
+ svmla_lane_za32_vg4x1 (w8 - 1, z16, z0, 13))
+
+/*
+** mla_lane_w12_z0_z3_15:
+** mov (w8|w9|w10|w11), w12
+** umlall za\.s\[\1, 0:3\], z0\.b, z3\.b\[15\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w12_z0_z3_15, svuint8_t,
+ svmla_lane_za32_u8_vg4x1 (w12, z0, z3, 15),
+ svmla_lane_za32_vg4x1 (w12, z0, z3, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x2.c
new file mode 100644
index 0000000..1bee8b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (0, z0, z4, 0),
+ svmla_lane_za32_vg4x2 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (w0, z0, z7, 1),
+ svmla_lane_za32_vg4x2 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** umlall za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (w8, z28, z4, 2),
+ svmla_lane_za32_vg4x2 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w11p4_z0_z4_3:
+** umlall za\.s\[w11, 4:7, vgx2\], {z0\.b - z1\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w11p4_z0_z4_3, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (w11 + 4, z0, z4, 3),
+ svmla_lane_za32_vg4x2 (w11 + 4, z0, z4, 3))
+
+/*
+** mla_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** umlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_4, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (w8 + 6, z0, z4, 4),
+ svmla_lane_za32_vg4x2 (w8 + 6, z0, z4, 4))
+
+/*
+** mla_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_5, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (w8 + 7, z0, z4, 5),
+ svmla_lane_za32_vg4x2 (w8 + 7, z0, z4, 5))
+
+/*
+** mla_lane_w8p8_z0_z4_7:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_7, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (w8 + 8, z0, z4, 7),
+ svmla_lane_za32_vg4x2 (w8 + 8, z0, z4, 7))
+
+/*
+** mla_lane_w0m1_z0_z4_9:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[9\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_9, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (w0 - 1, z0, z4, 9),
+ svmla_lane_za32_vg4x2 (w0 - 1, z0, z4, 9))
+
+/*
+** mla_lane_w8_z4_z15_10:
+** str d15, \[sp, #?-16\]!
+** umlall za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, z15\.b\[10\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_10, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (w8, z4, z15, 10),
+ svmla_lane_za32_vg4x2 (w8, z4, z15, 10))
+
+/*
+** mla_lane_w8_z28_z16_11:
+** mov (z[0-7]).d, z16.d
+** umlall za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, \1\.b\[11\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_11, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (w8, z28, z16, 11),
+ svmla_lane_za32_vg4x2 (w8, z28, z16, 11))
+
+/*
+** mla_lane_w8_z17_z7_13:
+** mov [^\n]+
+** mov [^\n]+
+** umlall za\.s\[w8, 0:3, vgx2\], [^\n]+, z7\.b\[13\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_13, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (w8, z17, z7, 13),
+ svmla_lane_za32_vg4x2 (w8, z17, z7, 13))
+
+/*
+** mla_lane_w8_z22_z4_15:
+** umlall za\.s\[w8, 0:3, vgx2\], {z22\.b - z23\.b}, z4\.b\[15\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_15, svuint8x2_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x2 (w8, z22, z4, 15),
+ svmla_lane_za32_vg4x2 (w8, z22, z4, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x4.c
new file mode 100644
index 0000000..e0e3a53
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za32_u8_vg4x4.c
@@ -0,0 +1,128 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (0, z0, z4, 0),
+ svmla_lane_za32_vg4x4 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (w0, z0, z7, 1),
+ svmla_lane_za32_vg4x4 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** umlall za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (w8, z28, z4, 2),
+ svmla_lane_za32_vg4x4 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w11p4_z0_z4_7:
+** umlall za\.s\[w11, 4:7, vgx4\], {z0\.b - z3\.b}, z4\.b\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w11p4_z0_z4_7, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (w11 + 4, z0, z4, 7),
+ svmla_lane_za32_vg4x4 (w11 + 4, z0, z4, 7))
+
+/*
+** mla_lane_w8p6_z0_z4_8:
+** add (w8|w9|w10|w11), w8, #?6
+** umlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[8\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_8, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (w8 + 6, z0, z4, 8),
+ svmla_lane_za32_vg4x4 (w8 + 6, z0, z4, 8))
+
+/*
+** mla_lane_w8p7_z0_z4_9:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[9\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_9, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (w8 + 7, z0, z4, 9),
+ svmla_lane_za32_vg4x4 (w8 + 7, z0, z4, 9))
+
+/*
+** mla_lane_w8p8_z0_z4_10:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[10\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_10, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (w8 + 8, z0, z4, 10),
+ svmla_lane_za32_vg4x4 (w8 + 8, z0, z4, 10))
+
+/*
+** mla_lane_w0m1_z0_z4_11:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[11\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_11, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (w0 - 1, z0, z4, 11),
+ svmla_lane_za32_vg4x4 (w0 - 1, z0, z4, 11))
+
+/*
+** mla_lane_w8_z4_z15_12:
+** str d15, \[sp, #?-16\]!
+** umlall za\.s\[w8, 0:3, vgx4\], {z4\.b - z7\.b}, z15\.b\[12\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_12, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (w8, z4, z15, 12),
+ svmla_lane_za32_vg4x4 (w8, z4, z15, 12))
+
+/*
+** mla_lane_w8_z28_z16_13:
+** mov (z[0-7]).d, z16.d
+** umlall za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, \1\.b\[13\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_13, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (w8, z28, z16, 13),
+ svmla_lane_za32_vg4x4 (w8, z28, z16, 13))
+
+/*
+** mla_lane_w8_z17_z7_14:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlall za\.s\[w8, 0:3, vgx4\], [^\n]+, z7\.b\[14\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_14, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (w8, z17, z7, 14),
+ svmla_lane_za32_vg4x4 (w8, z17, z7, 14))
+
+/*
+** mla_lane_w8_z22_z4_15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlall za\.s\[w8, 0:3, vgx4\], [^\n]+, z4\.b\[15\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_15, svuint8x4_t, svuint8_t,
+ svmla_lane_za32_u8_vg4x4 (w8, z22, z4, 15),
+ svmla_lane_za32_vg4x4 (w8, z22, z4, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_f64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_f64_vg1x2.c
new file mode 100644
index 0000000..f1f7fc7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_f64_vg1x2.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svfloat64x2_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x2 (0, z0, z4, 0),
+ svmla_lane_za64_vg1x2 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, z7\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svfloat64x2_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x2 (w0, z0, z7, 1),
+ svmla_lane_za64_vg1x2 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** fmla za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svfloat64x2_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x2 (w8, z28, z4, 0),
+ svmla_lane_za64_vg1x2 (w8, z28, z4, 0))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** fmla za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svfloat64x2_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x2 (w8 + 7, z0, z4, 1),
+ svmla_lane_za64_vg1x2 (w8 + 7, z0, z4, 1))
+
+/*
+** mla_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_0, svfloat64x2_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x2 (w8 + 8, z0, z4, 0),
+ svmla_lane_za64_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** mla_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmla za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_1, svfloat64x2_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x2 (w0 - 1, z0, z4, 1),
+ svmla_lane_za64_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** mla_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** fmla za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, z15\.d\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_2, svfloat64x2_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x2 (w8, z4, z15, 0),
+ svmla_lane_za64_vg1x2 (w8, z4, z15, 0))
+
+/*
+** mla_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** fmla za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}, \1\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_3, svfloat64x2_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x2 (w8, z28, z16, 1),
+ svmla_lane_za64_vg1x2 (w8, z28, z16, 1))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** fmla za\.d\[w8, 0, vgx2\], [^\n]+, z7\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svfloat64x2_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x2 (w8, z17, z7, 0),
+ svmla_lane_za64_vg1x2 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** fmla za\.d\[w8, 0, vgx2\], {z22\.d - z23\.d}, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svfloat64x2_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x2 (w8, z22, z4, 1),
+ svmla_lane_za64_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_f64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_f64_vg1x4.c
new file mode 100644
index 0000000..3dbb7c0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_f64_vg1x4.c
@@ -0,0 +1,110 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svfloat64x4_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x4 (0, z0, z4, 0),
+ svmla_lane_za64_vg1x4 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, z7\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svfloat64x4_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x4 (w0, z0, z7, 1),
+ svmla_lane_za64_vg1x4 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** fmla za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svfloat64x4_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x4 (w8, z28, z4, 0),
+ svmla_lane_za64_vg1x4 (w8, z28, z4, 0))
+
+/*
+** mla_lane_w8p7_z0_z4_3:
+** fmla za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_3, svfloat64x4_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x4 (w8 + 7, z0, z4, 1),
+ svmla_lane_za64_vg1x4 (w8 + 7, z0, z4, 1))
+
+/*
+** mla_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_0, svfloat64x4_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x4 (w8 + 8, z0, z4, 0),
+ svmla_lane_za64_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** mla_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmla za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_1, svfloat64x4_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x4 (w0 - 1, z0, z4, 1),
+ svmla_lane_za64_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** mla_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** fmla za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}, z15\.d\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_2, svfloat64x4_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x4 (w8, z4, z15, 0),
+ svmla_lane_za64_vg1x4 (w8, z4, z15, 0))
+
+/*
+** mla_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** fmla za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}, \1\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_3, svfloat64x4_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x4 (w8, z28, z16, 1),
+ svmla_lane_za64_vg1x4 (w8, z28, z16, 1))
+
+/*
+** mla_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmla za\.d\[w8, 0, vgx4\], [^\n]+, z7\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_0, svfloat64x4_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x4 (w8, z17, z7, 0),
+ svmla_lane_za64_vg1x4 (w8, z17, z7, 0))
+
+/*
+** mla_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmla za\.d\[w8, 0, vgx4\], [^\n]+, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_1, svfloat64x4_t, svfloat64_t,
+ svmla_lane_za64_f64_vg1x4 (w8, z22, z4, 1),
+ svmla_lane_za64_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x1.c
new file mode 100644
index 0000000..0ba8cae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x1.c
@@ -0,0 +1,152 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.d\[\1, 0:3\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_0_z0_z0_0, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (0, z0, z0, 0),
+ svmla_lane_za64_vg4x1 (0, z0, z0, 0))
+
+/*
+** mla_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.d\[\1, 0:3\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w0_z0_z3_1, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w0, z0, z3, 1),
+ svmla_lane_za64_vg4x1 (w0, z0, z3, 1))
+
+/*
+** mla_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** smlall za\.d\[\1, 0:3\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w7_z0_z3_2, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w7, z0, z3, 2),
+ svmla_lane_za64_vg4x1 (w7, z0, z3, 2))
+
+/*
+** mla_lane_w8_z7_z3_3:
+** smlall za\.d\[w8, 0:3\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z7_z3_3, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w8, z7, z3, 3),
+ svmla_lane_za64_vg4x1 (w8, z7, z3, 3))
+
+/*
+** mla_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** smlall za\.d\[w8, 0:3\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z31_z16_4, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w8, z31, z16, 4),
+ svmla_lane_za64_vg4x1 (w8, z31, z16, 4))
+
+/*
+** mla_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.d\[\1, 0:3\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p1_z0_z0_5, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w8 + 1, z0, z0, 5),
+ svmla_lane_za64_vg4x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mla_lane_w8p2_z23_z0_6:
+** add (w8|w9|w10|w11), w8, #?2
+** smlall za\.d\[\1, 0:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p2_z23_z0_6, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w8 + 2, z23, z0, 6),
+ svmla_lane_za64_vg4x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mla_lane_w11p4_z23_z0_7:
+** smlall za\.d\[w11, 4:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p4_z23_z0_7, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w11 + 4, z23, z0, 7),
+ svmla_lane_za64_vg4x1 (w11 + 4, z23, z0, 7))
+
+/*
+** mla_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.d\[\1, 0:3\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p7_z7_z7_0, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w8 + 7, z7, z7, 0),
+ svmla_lane_za64_vg4x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mla_lane_w11p12_z23_z0_1:
+** smlall za\.d\[w11, 12:15\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p12_z23_z0_1, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w11 + 12, z23, z0, 1),
+ svmla_lane_za64_vg4x1 (w11 + 12, z23, z0, 1))
+
+/*
+** mla_lane_w8p14_z23_z0_2:
+** add (w8|w9|w10|w11), w8, #?14
+** smlall za\.d\[w8, 0:3\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p14_z23_z0_2, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w8 + 14, z23, z0, 2),
+ svmla_lane_za64_vg4x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mla_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** smlall za\.d\[\1, 0:3\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p15_z7_z7_3, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w8 + 15, z7, z7, 3),
+ svmla_lane_za64_vg4x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mla_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** smlall za\.d\[\1, 0:3\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p16_z7_z7_4, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w8 + 16, z7, z7, 4),
+ svmla_lane_za64_vg4x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mla_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlall za\.d\[\1, 0:3\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8m1_z16_z0_5, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w8 - 1, z16, z0, 5),
+ svmla_lane_za64_vg4x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mla_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** smlall za\.d\[\1, 0:3\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w12_z0_z3_6, svint16_t,
+ svmla_lane_za64_s16_vg4x1 (w12, z0, z3, 6),
+ svmla_lane_za64_vg4x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x2.c
new file mode 100644
index 0000000..583f3a5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x2.c
@@ -0,0 +1,124 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (0, z0, z4, 0),
+ svmla_lane_za64_vg4x2 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (w0, z0, z7, 1),
+ svmla_lane_za64_vg4x2 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** smlall za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (w8, z28, z4, 2),
+ svmla_lane_za64_vg4x2 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w11p4_z0_z4_3:
+** smlall za\.d\[w11, 4:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w11p4_z0_z4_3, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (w11 + 4, z0, z4, 3),
+ svmla_lane_za64_vg4x2 (w11 + 4, z0, z4, 3))
+
+/*
+** mla_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** smlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_4, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (w8 + 6, z0, z4, 4),
+ svmla_lane_za64_vg4x2 (w8 + 6, z0, z4, 4))
+
+/*
+** mla_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_5, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (w8 + 7, z0, z4, 5),
+ svmla_lane_za64_vg4x2 (w8 + 7, z0, z4, 5))
+
+/*
+** mla_lane_w8p8_z0_z4_6:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_6, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (w8 + 8, z0, z4, 6),
+ svmla_lane_za64_vg4x2 (w8 + 8, z0, z4, 6))
+
+/*
+** mla_lane_w0m1_z0_z4_7:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_7, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (w0 - 1, z0, z4, 7),
+ svmla_lane_za64_vg4x2 (w0 - 1, z0, z4, 7))
+
+/*
+** mla_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** smlall za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_0, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (w8, z4, z15, 0),
+ svmla_lane_za64_vg4x2 (w8, z4, z15, 0))
+
+/*
+** mla_lane_w8_z28_z16_1:
+** mov (z[0-7]).d, z16.d
+** smlall za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, \1\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_1, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (w8, z28, z16, 1),
+ svmla_lane_za64_vg4x2 (w8, z28, z16, 1))
+
+/*
+** mla_lane_w8_z17_z7_3:
+** mov [^\n]+
+** mov [^\n]+
+** smlall za\.d\[w8, 0:3, vgx2\], [^\n]+, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_3, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (w8, z17, z7, 3),
+ svmla_lane_za64_vg4x2 (w8, z17, z7, 3))
+
+/*
+** mla_lane_w8_z22_z4_5:
+** smlall za\.d\[w8, 0:3, vgx2\], {z22\.h - z23\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_5, svint16x2_t, svint16_t,
+ svmla_lane_za64_s16_vg4x2 (w8, z22, z4, 5),
+ svmla_lane_za64_vg4x2 (w8, z22, z4, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x4.c
new file mode 100644
index 0000000..9a118a7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_s16_vg4x4.c
@@ -0,0 +1,130 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (0, z0, z4, 0),
+ svmla_lane_za64_vg4x4 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (w0, z0, z7, 1),
+ svmla_lane_za64_vg4x4 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** smlall za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (w8, z28, z4, 2),
+ svmla_lane_za64_vg4x4 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w11p4_z0_z4_3:
+** smlall za\.d\[w11, 4:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w11p4_z0_z4_3, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (w11 + 4, z0, z4, 3),
+ svmla_lane_za64_vg4x4 (w11 + 4, z0, z4, 3))
+
+/*
+** mla_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** smlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_4, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (w8 + 6, z0, z4, 4),
+ svmla_lane_za64_vg4x4 (w8 + 6, z0, z4, 4))
+
+/*
+** mla_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_5, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (w8 + 7, z0, z4, 5),
+ svmla_lane_za64_vg4x4 (w8 + 7, z0, z4, 5))
+
+/*
+** mla_lane_w8p8_z0_z4_6:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_6, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (w8 + 8, z0, z4, 6),
+ svmla_lane_za64_vg4x4 (w8 + 8, z0, z4, 6))
+
+/*
+** mla_lane_w0m1_z0_z4_7:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_7, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (w0 - 1, z0, z4, 7),
+ svmla_lane_za64_vg4x4 (w0 - 1, z0, z4, 7))
+
+/*
+** mla_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** smlall za\.d\[w8, 0:3, vgx4\], {z4\.h - z7\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_0, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (w8, z4, z15, 0),
+ svmla_lane_za64_vg4x4 (w8, z4, z15, 0))
+
+/*
+** mla_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** smlall za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_3, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (w8, z28, z16, 3),
+ svmla_lane_za64_vg4x4 (w8, z28, z16, 3))
+
+/*
+** mla_lane_w8_z17_z7_4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlall za\.d\[w8, 0:3, vgx4\], [^\n]+, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_4, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (w8, z17, z7, 4),
+ svmla_lane_za64_vg4x4 (w8, z17, z7, 4))
+
+/*
+** mla_lane_w8_z22_z4_6:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlall za\.d\[w8, 0:3, vgx4\], [^\n]+, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_6, svint16x4_t, svint16_t,
+ svmla_lane_za64_s16_vg4x4 (w8, z22, z4, 6),
+ svmla_lane_za64_vg4x4 (w8, z22, z4, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x1.c
new file mode 100644
index 0000000..d55a9cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x1.c
@@ -0,0 +1,152 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.d\[\1, 0:3\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_0_z0_z0_0, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (0, z0, z0, 0),
+ svmla_lane_za64_vg4x1 (0, z0, z0, 0))
+
+/*
+** mla_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.d\[\1, 0:3\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w0_z0_z3_1, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w0, z0, z3, 1),
+ svmla_lane_za64_vg4x1 (w0, z0, z3, 1))
+
+/*
+** mla_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** umlall za\.d\[\1, 0:3\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w7_z0_z3_2, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w7, z0, z3, 2),
+ svmla_lane_za64_vg4x1 (w7, z0, z3, 2))
+
+/*
+** mla_lane_w8_z7_z3_3:
+** umlall za\.d\[w8, 0:3\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z7_z3_3, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w8, z7, z3, 3),
+ svmla_lane_za64_vg4x1 (w8, z7, z3, 3))
+
+/*
+** mla_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** umlall za\.d\[w8, 0:3\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8_z31_z16_4, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w8, z31, z16, 4),
+ svmla_lane_za64_vg4x1 (w8, z31, z16, 4))
+
+/*
+** mla_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.d\[\1, 0:3\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p1_z0_z0_5, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w8 + 1, z0, z0, 5),
+ svmla_lane_za64_vg4x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mla_lane_w8p2_z23_z0_6:
+** add (w8|w9|w10|w11), w8, #?2
+** umlall za\.d\[\1, 0:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p2_z23_z0_6, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w8 + 2, z23, z0, 6),
+ svmla_lane_za64_vg4x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mla_lane_w11p4_z23_z0_7:
+** umlall za\.d\[w11, 4:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p4_z23_z0_7, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w11 + 4, z23, z0, 7),
+ svmla_lane_za64_vg4x1 (w11 + 4, z23, z0, 7))
+
+/*
+** mla_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.d\[\1, 0:3\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p7_z7_z7_0, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w8 + 7, z7, z7, 0),
+ svmla_lane_za64_vg4x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mla_lane_w11p12_z23_z0_1:
+** umlall za\.d\[w11, 12:15\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w11p12_z23_z0_1, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w11 + 12, z23, z0, 1),
+ svmla_lane_za64_vg4x1 (w11 + 12, z23, z0, 1))
+
+/*
+** mla_lane_w8p14_z23_z0_2:
+** add (w8|w9|w10|w11), w8, #?14
+** umlall za\.d\[w8, 0:3\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p14_z23_z0_2, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w8 + 14, z23, z0, 2),
+ svmla_lane_za64_vg4x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mla_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** umlall za\.d\[\1, 0:3\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p15_z7_z7_3, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w8 + 15, z7, z7, 3),
+ svmla_lane_za64_vg4x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mla_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** umlall za\.d\[\1, 0:3\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8p16_z7_z7_4, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w8 + 16, z7, z7, 4),
+ svmla_lane_za64_vg4x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mla_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlall za\.d\[\1, 0:3\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w8m1_z16_z0_5, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w8 - 1, z16, z0, 5),
+ svmla_lane_za64_vg4x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mla_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** umlall za\.d\[\1, 0:3\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mla_lane_w12_z0_z3_6, svuint16_t,
+ svmla_lane_za64_u16_vg4x1 (w12, z0, z3, 6),
+ svmla_lane_za64_vg4x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x2.c
new file mode 100644
index 0000000..8bfb05a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x2.c
@@ -0,0 +1,124 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (0, z0, z4, 0),
+ svmla_lane_za64_vg4x2 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (w0, z0, z7, 1),
+ svmla_lane_za64_vg4x2 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** umlall za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (w8, z28, z4, 2),
+ svmla_lane_za64_vg4x2 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w11p4_z0_z4_3:
+** umlall za\.d\[w11, 4:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w11p4_z0_z4_3, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (w11 + 4, z0, z4, 3),
+ svmla_lane_za64_vg4x2 (w11 + 4, z0, z4, 3))
+
+/*
+** mla_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** umlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_4, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (w8 + 6, z0, z4, 4),
+ svmla_lane_za64_vg4x2 (w8 + 6, z0, z4, 4))
+
+/*
+** mla_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_5, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (w8 + 7, z0, z4, 5),
+ svmla_lane_za64_vg4x2 (w8 + 7, z0, z4, 5))
+
+/*
+** mla_lane_w8p8_z0_z4_6:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_6, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (w8 + 8, z0, z4, 6),
+ svmla_lane_za64_vg4x2 (w8 + 8, z0, z4, 6))
+
+/*
+** mla_lane_w0m1_z0_z4_7:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_7, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (w0 - 1, z0, z4, 7),
+ svmla_lane_za64_vg4x2 (w0 - 1, z0, z4, 7))
+
+/*
+** mla_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** umlall za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_0, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (w8, z4, z15, 0),
+ svmla_lane_za64_vg4x2 (w8, z4, z15, 0))
+
+/*
+** mla_lane_w8_z28_z16_1:
+** mov (z[0-7]).d, z16.d
+** umlall za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, \1\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_1, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (w8, z28, z16, 1),
+ svmla_lane_za64_vg4x2 (w8, z28, z16, 1))
+
+/*
+** mla_lane_w8_z17_z7_3:
+** mov [^\n]+
+** mov [^\n]+
+** umlall za\.d\[w8, 0:3, vgx2\], [^\n]+, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_3, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (w8, z17, z7, 3),
+ svmla_lane_za64_vg4x2 (w8, z17, z7, 3))
+
+/*
+** mla_lane_w8_z22_z4_5:
+** umlall za\.d\[w8, 0:3, vgx2\], {z22\.h - z23\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_5, svuint16x2_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x2 (w8, z22, z4, 5),
+ svmla_lane_za64_vg4x2 (w8, z22, z4, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x4.c
new file mode 100644
index 0000000..e9a3e47
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_lane_za64_u16_vg4x4.c
@@ -0,0 +1,130 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_0_z0_z4_0, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (0, z0, z4, 0),
+ svmla_lane_za64_vg4x4 (0, z0, z4, 0))
+
+/*
+** mla_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0_z0_z7_1, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (w0, z0, z7, 1),
+ svmla_lane_za64_vg4x4 (w0, z0, z7, 1))
+
+/*
+** mla_lane_w8_z28_z4_2:
+** umlall za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z4_2, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (w8, z28, z4, 2),
+ svmla_lane_za64_vg4x4 (w8, z28, z4, 2))
+
+/*
+** mla_lane_w11p4_z0_z4_3:
+** umlall za\.d\[w11, 4:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w11p4_z0_z4_3, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (w11 + 4, z0, z4, 3),
+ svmla_lane_za64_vg4x4 (w11 + 4, z0, z4, 3))
+
+/*
+** mla_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** umlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p6_z0_z4_4, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (w8 + 6, z0, z4, 4),
+ svmla_lane_za64_vg4x4 (w8 + 6, z0, z4, 4))
+
+/*
+** mla_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p7_z0_z4_5, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (w8 + 7, z0, z4, 5),
+ svmla_lane_za64_vg4x4 (w8 + 7, z0, z4, 5))
+
+/*
+** mla_lane_w8p8_z0_z4_6:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8p8_z0_z4_6, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (w8 + 8, z0, z4, 6),
+ svmla_lane_za64_vg4x4 (w8 + 8, z0, z4, 6))
+
+/*
+** mla_lane_w0m1_z0_z4_7:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w0m1_z0_z4_7, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (w0 - 1, z0, z4, 7),
+ svmla_lane_za64_vg4x4 (w0 - 1, z0, z4, 7))
+
+/*
+** mla_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** umlall za\.d\[w8, 0:3, vgx4\], {z4\.h - z7\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mla_lane_w8_z4_z15_0, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (w8, z4, z15, 0),
+ svmla_lane_za64_vg4x4 (w8, z4, z15, 0))
+
+/*
+** mla_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** umlall za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z28_z16_3, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (w8, z28, z16, 3),
+ svmla_lane_za64_vg4x4 (w8, z28, z16, 3))
+
+/*
+** mla_lane_w8_z17_z7_4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlall za\.d\[w8, 0:3, vgx4\], [^\n]+, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z17_z7_4, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (w8, z17, z7, 4),
+ svmla_lane_za64_vg4x4 (w8, z17, z7, 4))
+
+/*
+** mla_lane_w8_z22_z4_6:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlall za\.d\[w8, 0:3, vgx4\], [^\n]+, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mla_lane_w8_z22_z4_6, svuint16x4_t, svuint16_t,
+ svmla_lane_za64_u16_vg4x4 (w8, z22, z4, 6),
+ svmla_lane_za64_vg4x4 (w8, z22, z4, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x1.c
new file mode 100644
index 0000000..ffa6736
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlal za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_0_z0_z0, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (0, z0, z0),
+ svmla_za32_vg2x1 (0, z0, z0))
+
+/*
+** mla_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** bfmlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w0_z0_z3, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w0, z0, z3),
+ svmla_za32_vg2x1 (w0, z0, z3))
+
+/*
+** mla_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** bfmlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w7_z0_z3, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w7, z0, z3),
+ svmla_za32_vg2x1 (w7, z0, z3))
+
+/*
+** mla_w8_z7_z3:
+** bfmlal za\.s\[w8, 0:1\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z7_z3, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w8, z7, z3),
+ svmla_za32_vg2x1 (w8, z7, z3))
+
+/*
+** mla_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** bfmlal za\.s\[w8, 0:1\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z31_z16, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w8, z31, z16),
+ svmla_za32_vg2x1 (w8, z31, z16))
+
+/*
+** mla_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlal za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p1_z0_z0, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w8 + 1, z0, z0),
+ svmla_za32_vg2x1 (w8 + 1, z0, z0))
+
+/*
+** mla_w8p2_z23_z0:
+** bfmlal za\.s\[w8, 2:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p2_z23_z0, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w8 + 2, z23, z0),
+ svmla_za32_vg2x1 (w8 + 2, z23, z0))
+
+/*
+** mla_w11p6_z23_z0:
+** bfmlal za\.s\[w11, 6:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p6_z23_z0, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w11 + 6, z23, z0),
+ svmla_za32_vg2x1 (w11 + 6, z23, z0))
+
+/*
+** mla_w8p7_z7_z7:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p7_z7_z7, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w8 + 7, z7, z7),
+ svmla_za32_vg2x1 (w8 + 7, z7, z7))
+
+/*
+** mla_w11p10_z23_z0:
+** bfmlal za\.s\[w11, 10:11\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p10_z23_z0, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w11 + 10, z23, z0),
+ svmla_za32_vg2x1 (w11 + 10, z23, z0))
+
+/*
+** mla_w8p14_z23_z0:
+** bfmlal za\.s\[w8, 14:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p14_z23_z0, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w8 + 14, z23, z0),
+ svmla_za32_vg2x1 (w8 + 14, z23, z0))
+
+/*
+** mla_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** bfmlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p15_z7_z7, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w8 + 15, z7, z7),
+ svmla_za32_vg2x1 (w8 + 15, z7, z7))
+
+/*
+** mla_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** bfmlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p16_z7_z7, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w8 + 16, z7, z7),
+ svmla_za32_vg2x1 (w8 + 16, z7, z7))
+
+/*
+** mla_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** bfmlal za\.s\[\1, 0:1\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8m1_z16_z0, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w8 - 1, z16, z0),
+ svmla_za32_vg2x1 (w8 - 1, z16, z0))
+
+/*
+** mla_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** bfmlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w12_z0_z3, svbfloat16_t,
+ svmla_za32_bf16_vg2x1 (w12, z0, z3),
+ svmla_za32_vg2x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x2.c
new file mode 100644
index 0000000..db432e9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x2.c
@@ -0,0 +1,247 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (0, z0, z0),
+ svmla_za32_vg2x2 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w0, z0, z0),
+ svmla_za32_vg2x2 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8, z0, z4),
+ svmla_za32_vg2x2 (w8, z0, z4))
+
+/*
+** mla_w8_z4_z18:
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z18, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8, z4, z18),
+ svmla_za32_vg2x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z23:
+** ...
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8, z0, z23),
+ svmla_za32_vg2x2 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** bfmlal za\.s\[w8, 0:1, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8, z23, z0),
+ svmla_za32_vg2x2 (w8, z23, z0))
+
+/*
+** mla_w8_z18_z28:
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z28, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8, z18, z28),
+ svmla_za32_vg2x2 (w8, z18, z28))
+
+/*
+** mla_w8_z28_z4:
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z4, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8, z28, z4),
+ svmla_za32_vg2x2 (w8, z28, z4))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8 + 1, z4, z0),
+ svmla_za32_vg2x2 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** bfmlal za\.s\[w8, 2:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8 + 2, z4, z0),
+ svmla_za32_vg2x2 (w8 + 2, z4, z0))
+
+/*
+** mla_w8p6_z4_z0:
+** bfmlal za\.s\[w8, 6:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p6_z4_z0, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8 + 6, z4, z0),
+ svmla_za32_vg2x2 (w8 + 6, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8 + 7, z4, z0),
+ svmla_za32_vg2x2 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8 + 8, z4, z4),
+ svmla_za32_vg2x2 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svbfloat16x2_t,
+ svmla_za32_bf16_vg2x2 (w8 - 1, z4, z0),
+ svmla_za32_vg2x2 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x2 (0, z1, z0),
+ svmla_za32_vg2x2 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x2 (w0, z1, z0),
+ svmla_za32_vg2x2 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x2 (w8, z1, z0),
+ svmla_za32_vg2x2 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x2 (w8 + 1, z1, z0),
+ svmla_za32_vg2x2 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** bfmlal za\.s\[w8, 4:5, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svbfloat16x2_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x2 (w8 + 4, z20, z0),
+ svmla_za32_vg2x2 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** bfmlal za\.s\[w8, 6:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svbfloat16x2_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x2 (w8 + 6, z27, z0),
+ svmla_za32_vg2x2 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x2 (w8 + 7, z1, z0),
+ svmla_za32_vg2x2 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x2 (w8 + 8, z1, z0),
+ svmla_za32_vg2x2 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x2 (w0 - 1, z1, z0),
+ svmla_za32_vg2x2 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svbfloat16x2_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x2 (w8, z0, z15),
+ svmla_za32_vg2x2 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** bfmlal za\.s\[w8, 0:1, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svbfloat16x2_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x2 (w8, z20, z16),
+ svmla_za32_vg2x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x4.c
new file mode 100644
index 0000000..7428278
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_bf16_vg2x4.c
@@ -0,0 +1,258 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (0, z0, z0),
+ svmla_za32_vg2x4 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w0, z0, z0),
+ svmla_za32_vg2x4 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** bfmlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8, z0, z4),
+ svmla_za32_vg2x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z18:
+** ...
+** bfmlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z18, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8, z0, z18),
+ svmla_za32_vg2x4 (w8, z0, z18))
+
+/*
+** mla_w8_z18_z0:
+** ...
+** bfmlal za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z0, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8, z18, z0),
+ svmla_za32_vg2x4 (w8, z18, z0))
+
+/*
+** mla_w8_z0_z23:
+** ...
+** bfmlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8, z0, z23),
+ svmla_za32_vg2x4 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** bfmlal za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8, z23, z0),
+ svmla_za32_vg2x4 (w8, z23, z0))
+
+/*
+** mla_w8_z4_z28:
+** bfmlal za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z28, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8, z4, z28),
+ svmla_za32_vg2x4 (w8, z4, z28))
+
+/*
+** mla_w8_z28_z0:
+** bfmlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z0, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8, z28, z0),
+ svmla_za32_vg2x4 (w8, z28, z0))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8 + 1, z4, z0),
+ svmla_za32_vg2x4 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** bfmlal za\.s\[w8, 2:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8 + 2, z4, z0),
+ svmla_za32_vg2x4 (w8 + 2, z4, z0))
+
+/*
+** mla_w8p6_z4_z0:
+** bfmlal za\.s\[w8, 6:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p6_z4_z0, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8 + 6, z4, z0),
+ svmla_za32_vg2x4 (w8 + 6, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8 + 7, z4, z0),
+ svmla_za32_vg2x4 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8 + 8, z4, z4),
+ svmla_za32_vg2x4 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svbfloat16x4_t,
+ svmla_za32_bf16_vg2x4 (w8 - 1, z4, z0),
+ svmla_za32_vg2x4 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x4 (0, z1, z0),
+ svmla_za32_vg2x4 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x4 (w0, z1, z0),
+ svmla_za32_vg2x4 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** bfmlal za\.s\[w8, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x4 (w8, z1, z0),
+ svmla_za32_vg2x4 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x4 (w8 + 1, z1, z0),
+ svmla_za32_vg2x4 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** bfmlal za\.s\[w8, 4:5, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svbfloat16x4_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x4 (w8 + 4, z20, z0),
+ svmla_za32_vg2x4 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** bfmlal za\.s\[w8, 6:7, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svbfloat16x4_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x4 (w8 + 6, z27, z0),
+ svmla_za32_vg2x4 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x4 (w8 + 7, z1, z0),
+ svmla_za32_vg2x4 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x4 (w8 + 8, z1, z0),
+ svmla_za32_vg2x4 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x4 (w0 - 1, z1, z0),
+ svmla_za32_vg2x4 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** bfmlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svbfloat16x4_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x4 (w8, z0, z15),
+ svmla_za32_vg2x4 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** bfmlal za\.s\[w8, 0:1, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svbfloat16x4_t, svbfloat16_t,
+ svmla_single_za32_bf16_vg2x4 (w8, z20, z16),
+ svmla_za32_vg2x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x1.c
new file mode 100644
index 0000000..7b74179
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmlal za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_0_z0_z0, svfloat16_t,
+ svmla_za32_f16_vg2x1 (0, z0, z0),
+ svmla_za32_vg2x1 (0, z0, z0))
+
+/*
+** mla_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** fmlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w0_z0_z3, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w0, z0, z3),
+ svmla_za32_vg2x1 (w0, z0, z3))
+
+/*
+** mla_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** fmlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w7_z0_z3, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w7, z0, z3),
+ svmla_za32_vg2x1 (w7, z0, z3))
+
+/*
+** mla_w8_z7_z3:
+** fmlal za\.s\[w8, 0:1\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z7_z3, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w8, z7, z3),
+ svmla_za32_vg2x1 (w8, z7, z3))
+
+/*
+** mla_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmlal za\.s\[w8, 0:1\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z31_z16, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w8, z31, z16),
+ svmla_za32_vg2x1 (w8, z31, z16))
+
+/*
+** mla_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlal za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p1_z0_z0, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w8 + 1, z0, z0),
+ svmla_za32_vg2x1 (w8 + 1, z0, z0))
+
+/*
+** mla_w8p2_z23_z0:
+** fmlal za\.s\[w8, 2:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p2_z23_z0, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w8 + 2, z23, z0),
+ svmla_za32_vg2x1 (w8 + 2, z23, z0))
+
+/*
+** mla_w11p6_z23_z0:
+** fmlal za\.s\[w11, 6:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p6_z23_z0, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w11 + 6, z23, z0),
+ svmla_za32_vg2x1 (w11 + 6, z23, z0))
+
+/*
+** mla_w8p7_z7_z7:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p7_z7_z7, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w8 + 7, z7, z7),
+ svmla_za32_vg2x1 (w8 + 7, z7, z7))
+
+/*
+** mla_w11p10_z23_z0:
+** fmlal za\.s\[w11, 10:11\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p10_z23_z0, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w11 + 10, z23, z0),
+ svmla_za32_vg2x1 (w11 + 10, z23, z0))
+
+/*
+** mla_w8p14_z23_z0:
+** fmlal za\.s\[w8, 14:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p14_z23_z0, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w8 + 14, z23, z0),
+ svmla_za32_vg2x1 (w8 + 14, z23, z0))
+
+/*
+** mla_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** fmlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p15_z7_z7, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w8 + 15, z7, z7),
+ svmla_za32_vg2x1 (w8 + 15, z7, z7))
+
+/*
+** mla_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** fmlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p16_z7_z7, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w8 + 16, z7, z7),
+ svmla_za32_vg2x1 (w8 + 16, z7, z7))
+
+/*
+** mla_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmlal za\.s\[\1, 0:1\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8m1_z16_z0, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w8 - 1, z16, z0),
+ svmla_za32_vg2x1 (w8 - 1, z16, z0))
+
+/*
+** mla_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** fmlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w12_z0_z3, svfloat16_t,
+ svmla_za32_f16_vg2x1 (w12, z0, z3),
+ svmla_za32_vg2x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x2.c
new file mode 100644
index 0000000..fcc4b05
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x2.c
@@ -0,0 +1,247 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (0, z0, z0),
+ svmla_za32_vg2x2 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w0, z0, z0),
+ svmla_za32_vg2x2 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** fmlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8, z0, z4),
+ svmla_za32_vg2x2 (w8, z0, z4))
+
+/*
+** mla_w8_z4_z18:
+** fmlal za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z18, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8, z4, z18),
+ svmla_za32_vg2x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z23:
+** ...
+** fmlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8, z0, z23),
+ svmla_za32_vg2x2 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** fmlal za\.s\[w8, 0:1, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8, z23, z0),
+ svmla_za32_vg2x2 (w8, z23, z0))
+
+/*
+** mla_w8_z18_z28:
+** fmlal za\.s\[w8, 0:1, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z28, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8, z18, z28),
+ svmla_za32_vg2x2 (w8, z18, z28))
+
+/*
+** mla_w8_z28_z4:
+** fmlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z4, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8, z28, z4),
+ svmla_za32_vg2x2 (w8, z28, z4))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8 + 1, z4, z0),
+ svmla_za32_vg2x2 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** fmlal za\.s\[w8, 2:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8 + 2, z4, z0),
+ svmla_za32_vg2x2 (w8 + 2, z4, z0))
+
+/*
+** mla_w8p6_z4_z0:
+** fmlal za\.s\[w8, 6:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p6_z4_z0, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8 + 6, z4, z0),
+ svmla_za32_vg2x2 (w8 + 6, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8 + 7, z4, z0),
+ svmla_za32_vg2x2 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8 + 8, z4, z4),
+ svmla_za32_vg2x2 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svfloat16x2_t,
+ svmla_za32_f16_vg2x2 (w8 - 1, z4, z0),
+ svmla_za32_vg2x2 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x2 (0, z1, z0),
+ svmla_za32_vg2x2 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x2 (w0, z1, z0),
+ svmla_za32_vg2x2 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** fmlal za\.s\[w8, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x2 (w8, z1, z0),
+ svmla_za32_vg2x2 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x2 (w8 + 1, z1, z0),
+ svmla_za32_vg2x2 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** fmlal za\.s\[w8, 4:5, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svfloat16x2_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x2 (w8 + 4, z20, z0),
+ svmla_za32_vg2x2 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** fmlal za\.s\[w8, 6:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svfloat16x2_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x2 (w8 + 6, z27, z0),
+ svmla_za32_vg2x2 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x2 (w8 + 7, z1, z0),
+ svmla_za32_vg2x2 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x2 (w8 + 8, z1, z0),
+ svmla_za32_vg2x2 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x2 (w0 - 1, z1, z0),
+ svmla_za32_vg2x2 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svfloat16x2_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x2 (w8, z0, z15),
+ svmla_za32_vg2x2 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmlal za\.s\[w8, 0:1, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svfloat16x2_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x2 (w8, z20, z16),
+ svmla_za32_vg2x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x4.c
new file mode 100644
index 0000000..f875528
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f16_vg2x4.c
@@ -0,0 +1,258 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (0, z0, z0),
+ svmla_za32_vg2x4 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w0, z0, z0),
+ svmla_za32_vg2x4 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** fmlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8, z0, z4),
+ svmla_za32_vg2x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z18:
+** ...
+** fmlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z18, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8, z0, z18),
+ svmla_za32_vg2x4 (w8, z0, z18))
+
+/*
+** mla_w8_z18_z0:
+** ...
+** fmlal za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z0, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8, z18, z0),
+ svmla_za32_vg2x4 (w8, z18, z0))
+
+/*
+** mla_w8_z0_z23:
+** ...
+** fmlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8, z0, z23),
+ svmla_za32_vg2x4 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** fmlal za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8, z23, z0),
+ svmla_za32_vg2x4 (w8, z23, z0))
+
+/*
+** mla_w8_z4_z28:
+** fmlal za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z28, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8, z4, z28),
+ svmla_za32_vg2x4 (w8, z4, z28))
+
+/*
+** mla_w8_z28_z0:
+** fmlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z0, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8, z28, z0),
+ svmla_za32_vg2x4 (w8, z28, z0))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8 + 1, z4, z0),
+ svmla_za32_vg2x4 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** fmlal za\.s\[w8, 2:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8 + 2, z4, z0),
+ svmla_za32_vg2x4 (w8 + 2, z4, z0))
+
+/*
+** mla_w8p6_z4_z0:
+** fmlal za\.s\[w8, 6:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p6_z4_z0, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8 + 6, z4, z0),
+ svmla_za32_vg2x4 (w8 + 6, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8 + 7, z4, z0),
+ svmla_za32_vg2x4 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8 + 8, z4, z4),
+ svmla_za32_vg2x4 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svfloat16x4_t,
+ svmla_za32_f16_vg2x4 (w8 - 1, z4, z0),
+ svmla_za32_vg2x4 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x4 (0, z1, z0),
+ svmla_za32_vg2x4 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x4 (w0, z1, z0),
+ svmla_za32_vg2x4 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** fmlal za\.s\[w8, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x4 (w8, z1, z0),
+ svmla_za32_vg2x4 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x4 (w8 + 1, z1, z0),
+ svmla_za32_vg2x4 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** fmlal za\.s\[w8, 4:5, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svfloat16x4_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x4 (w8 + 4, z20, z0),
+ svmla_za32_vg2x4 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** fmlal za\.s\[w8, 6:7, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svfloat16x4_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x4 (w8 + 6, z27, z0),
+ svmla_za32_vg2x4 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x4 (w8 + 7, z1, z0),
+ svmla_za32_vg2x4 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x4 (w8 + 8, z1, z0),
+ svmla_za32_vg2x4 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x4 (w0 - 1, z1, z0),
+ svmla_za32_vg2x4 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svfloat16x4_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x4 (w8, z0, z15),
+ svmla_za32_vg2x4 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmlal za\.s\[w8, 0:1, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svfloat16x4_t, svfloat16_t,
+ svmla_single_za32_f16_vg2x4 (w8, z20, z16),
+ svmla_za32_vg2x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f32_vg1x2.c
new file mode 100644
index 0000000..9f0ca68
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f32_vg1x2.c
@@ -0,0 +1,180 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svfloat32x2_t,
+ svmla_za32_f32_vg1x2 (0, z0, z0),
+ svmla_za32_vg1x2 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svfloat32x2_t,
+ svmla_za32_f32_vg1x2 (w0, z0, z0),
+ svmla_za32_vg1x2 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** fmla za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svfloat32x2_t,
+ svmla_za32_f32_vg1x2 (w8, z0, z4),
+ svmla_za32_vg1x2 (w8, z0, z4))
+
+/*
+** mla_w8_z4_z18:
+** fmla za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z18, svfloat32x2_t,
+ svmla_za32_f32_vg1x2 (w8, z4, z18),
+ svmla_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z23_z0:
+** ...
+** fmla za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svfloat32x2_t,
+ svmla_za32_f32_vg1x2 (w8, z23, z0),
+ svmla_za32_vg1x2 (w8, z23, z0))
+
+/*
+** mla_w8_z18_z23:
+** ...
+** fmla za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z23, svfloat32x2_t,
+ svmla_za32_f32_vg1x2 (w8, z18, z23),
+ svmla_za32_vg1x2 (w8, z18, z23))
+
+/*
+** mla_w8_z4_z28:
+** fmla za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z28, svfloat32x2_t,
+ svmla_za32_f32_vg1x2 (w8, z4, z28),
+ svmla_za32_vg1x2 (w8, z4, z28))
+
+/*
+** mla_w8p7_z4_z0:
+** fmla za\.s\[w8, 7, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svfloat32x2_t,
+ svmla_za32_f32_vg1x2 (w8 + 7, z4, z0),
+ svmla_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svfloat32x2_t,
+ svmla_za32_f32_vg1x2 (w8 + 8, z4, z4),
+ svmla_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmla za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svfloat32x2_t,
+ svmla_za32_f32_vg1x2 (w8 - 1, z4, z0),
+ svmla_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x2 (0, z1, z0),
+ svmla_za32_vg1x2 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x2 (w0, z1, z0),
+ svmla_za32_vg1x2 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** fmla za\.s\[w8, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x2 (w8, z1, z0),
+ svmla_za32_vg1x2 (w8, z1, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** fmla za\.s\[w8, 7, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x2 (w8 + 7, z1, z0),
+ svmla_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x2 (w8 + 8, z1, z0),
+ svmla_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmla za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x2 (w0 - 1, z1, z0),
+ svmla_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmla za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svfloat32x2_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x2 (w8, z0, z15),
+ svmla_za32_vg1x2 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmla za\.s\[w8, 0, vgx2\], {z20\.s - z21\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svfloat32x2_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x2 (w8, z20, z16),
+ svmla_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f32_vg1x4.c
new file mode 100644
index 0000000..6afc9cd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_f32_vg1x4.c
@@ -0,0 +1,172 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svfloat32x4_t,
+ svmla_za32_f32_vg1x4 (0, z0, z0),
+ svmla_za32_vg1x4 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svfloat32x4_t,
+ svmla_za32_f32_vg1x4 (w0, z0, z0),
+ svmla_za32_vg1x4 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** fmla za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svfloat32x4_t,
+ svmla_za32_f32_vg1x4 (w8, z0, z4),
+ svmla_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z18:
+** ...
+** fmla za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z18, svfloat32x4_t,
+ svmla_za32_f32_vg1x4 (w8, z0, z18),
+ svmla_za32_vg1x4 (w8, z0, z18))
+
+/*
+** mla_w8_z18_z28:
+** ...
+** fmla za\.s\[w8, 0, vgx4\], [^\n]+, {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z28, svfloat32x4_t,
+ svmla_za32_f32_vg1x4 (w8, z18, z28),
+ svmla_za32_vg1x4 (w8, z18, z28))
+
+/*
+** mla_w8_z28_z23:
+** ...
+** fmla za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z23, svfloat32x4_t,
+ svmla_za32_f32_vg1x4 (w8, z28, z23),
+ svmla_za32_vg1x4 (w8, z28, z23))
+
+/*
+** mla_w8p7_z4_z0:
+** fmla za\.s\[w8, 7, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svfloat32x4_t,
+ svmla_za32_f32_vg1x4 (w8 + 7, z4, z0),
+ svmla_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svfloat32x4_t,
+ svmla_za32_f32_vg1x4 (w8 + 8, z4, z4),
+ svmla_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmla za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svfloat32x4_t,
+ svmla_za32_f32_vg1x4 (w8 - 1, z4, z0),
+ svmla_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x4 (0, z1, z0),
+ svmla_za32_vg1x4 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x4 (w0, z1, z0),
+ svmla_za32_vg1x4 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** fmla za\.s\[w8, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x4 (w8, z1, z0),
+ svmla_za32_vg1x4 (w8, z1, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** fmla za\.s\[w8, 7, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x4 (w8 + 7, z1, z0),
+ svmla_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x4 (w8 + 8, z1, z0),
+ svmla_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmla za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x4 (w0 - 1, z1, z0),
+ svmla_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmla za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svfloat32x4_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x4 (w8, z0, z15),
+ svmla_za32_vg1x4 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmla za\.s\[w8, 0, vgx4\], {z20\.s - z23\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svfloat32x4_t, svfloat32_t,
+ svmla_single_za32_f32_vg1x4 (w8, z20, z16),
+ svmla_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x1.c
new file mode 100644
index 0000000..cb78420
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlal za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_0_z0_z0, svint16_t,
+ svmla_za32_s16_vg2x1 (0, z0, z0),
+ svmla_za32_vg2x1 (0, z0, z0))
+
+/*
+** mla_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** smlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w0_z0_z3, svint16_t,
+ svmla_za32_s16_vg2x1 (w0, z0, z3),
+ svmla_za32_vg2x1 (w0, z0, z3))
+
+/*
+** mla_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** smlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w7_z0_z3, svint16_t,
+ svmla_za32_s16_vg2x1 (w7, z0, z3),
+ svmla_za32_vg2x1 (w7, z0, z3))
+
+/*
+** mla_w8_z7_z3:
+** smlal za\.s\[w8, 0:1\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z7_z3, svint16_t,
+ svmla_za32_s16_vg2x1 (w8, z7, z3),
+ svmla_za32_vg2x1 (w8, z7, z3))
+
+/*
+** mla_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** smlal za\.s\[w8, 0:1\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z31_z16, svint16_t,
+ svmla_za32_s16_vg2x1 (w8, z31, z16),
+ svmla_za32_vg2x1 (w8, z31, z16))
+
+/*
+** mla_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlal za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p1_z0_z0, svint16_t,
+ svmla_za32_s16_vg2x1 (w8 + 1, z0, z0),
+ svmla_za32_vg2x1 (w8 + 1, z0, z0))
+
+/*
+** mla_w8p2_z23_z0:
+** smlal za\.s\[w8, 2:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p2_z23_z0, svint16_t,
+ svmla_za32_s16_vg2x1 (w8 + 2, z23, z0),
+ svmla_za32_vg2x1 (w8 + 2, z23, z0))
+
+/*
+** mla_w11p6_z23_z0:
+** smlal za\.s\[w11, 6:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p6_z23_z0, svint16_t,
+ svmla_za32_s16_vg2x1 (w11 + 6, z23, z0),
+ svmla_za32_vg2x1 (w11 + 6, z23, z0))
+
+/*
+** mla_w8p7_z7_z7:
+** add (w8|w9|w10|w11), w8, #?7
+** smlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p7_z7_z7, svint16_t,
+ svmla_za32_s16_vg2x1 (w8 + 7, z7, z7),
+ svmla_za32_vg2x1 (w8 + 7, z7, z7))
+
+/*
+** mla_w11p10_z23_z0:
+** smlal za\.s\[w11, 10:11\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p10_z23_z0, svint16_t,
+ svmla_za32_s16_vg2x1 (w11 + 10, z23, z0),
+ svmla_za32_vg2x1 (w11 + 10, z23, z0))
+
+/*
+** mla_w8p14_z23_z0:
+** smlal za\.s\[w8, 14:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p14_z23_z0, svint16_t,
+ svmla_za32_s16_vg2x1 (w8 + 14, z23, z0),
+ svmla_za32_vg2x1 (w8 + 14, z23, z0))
+
+/*
+** mla_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** smlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p15_z7_z7, svint16_t,
+ svmla_za32_s16_vg2x1 (w8 + 15, z7, z7),
+ svmla_za32_vg2x1 (w8 + 15, z7, z7))
+
+/*
+** mla_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** smlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p16_z7_z7, svint16_t,
+ svmla_za32_s16_vg2x1 (w8 + 16, z7, z7),
+ svmla_za32_vg2x1 (w8 + 16, z7, z7))
+
+/*
+** mla_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlal za\.s\[\1, 0:1\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8m1_z16_z0, svint16_t,
+ svmla_za32_s16_vg2x1 (w8 - 1, z16, z0),
+ svmla_za32_vg2x1 (w8 - 1, z16, z0))
+
+/*
+** mla_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** smlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w12_z0_z3, svint16_t,
+ svmla_za32_s16_vg2x1 (w12, z0, z3),
+ svmla_za32_vg2x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x2.c
new file mode 100644
index 0000000..0832374
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x2.c
@@ -0,0 +1,247 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svint16x2_t,
+ svmla_za32_s16_vg2x2 (0, z0, z0),
+ svmla_za32_vg2x2 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w0, z0, z0),
+ svmla_za32_vg2x2 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** smlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8, z0, z4),
+ svmla_za32_vg2x2 (w8, z0, z4))
+
+/*
+** mla_w8_z4_z18:
+** smlal za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z18, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8, z4, z18),
+ svmla_za32_vg2x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z23:
+** ...
+** smlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8, z0, z23),
+ svmla_za32_vg2x2 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** smlal za\.s\[w8, 0:1, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8, z23, z0),
+ svmla_za32_vg2x2 (w8, z23, z0))
+
+/*
+** mla_w8_z18_z28:
+** smlal za\.s\[w8, 0:1, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z28, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8, z18, z28),
+ svmla_za32_vg2x2 (w8, z18, z28))
+
+/*
+** mla_w8_z28_z4:
+** smlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z4, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8, z28, z4),
+ svmla_za32_vg2x2 (w8, z28, z4))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8 + 1, z4, z0),
+ svmla_za32_vg2x2 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** smlal za\.s\[w8, 2:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8 + 2, z4, z0),
+ svmla_za32_vg2x2 (w8 + 2, z4, z0))
+
+/*
+** mla_w8p6_z4_z0:
+** smlal za\.s\[w8, 6:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p6_z4_z0, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8 + 6, z4, z0),
+ svmla_za32_vg2x2 (w8 + 6, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8 + 7, z4, z0),
+ svmla_za32_vg2x2 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8 + 8, z4, z4),
+ svmla_za32_vg2x2 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svint16x2_t,
+ svmla_za32_s16_vg2x2 (w8 - 1, z4, z0),
+ svmla_za32_vg2x2 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za32_s16_vg2x2 (0, z1, z0),
+ svmla_za32_vg2x2 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za32_s16_vg2x2 (w0, z1, z0),
+ svmla_za32_vg2x2 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** smlal za\.s\[w8, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za32_s16_vg2x2 (w8, z1, z0),
+ svmla_za32_vg2x2 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za32_s16_vg2x2 (w8 + 1, z1, z0),
+ svmla_za32_vg2x2 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** smlal za\.s\[w8, 4:5, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svint16x2_t, svint16_t,
+ svmla_single_za32_s16_vg2x2 (w8 + 4, z20, z0),
+ svmla_za32_vg2x2 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** smlal za\.s\[w8, 6:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svint16x2_t, svint16_t,
+ svmla_single_za32_s16_vg2x2 (w8 + 6, z27, z0),
+ svmla_za32_vg2x2 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za32_s16_vg2x2 (w8 + 7, z1, z0),
+ svmla_za32_vg2x2 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za32_s16_vg2x2 (w8 + 8, z1, z0),
+ svmla_za32_vg2x2 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za32_s16_vg2x2 (w0 - 1, z1, z0),
+ svmla_za32_vg2x2 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svint16x2_t, svint16_t,
+ svmla_single_za32_s16_vg2x2 (w8, z0, z15),
+ svmla_za32_vg2x2 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlal za\.s\[w8, 0:1, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svint16x2_t, svint16_t,
+ svmla_single_za32_s16_vg2x2 (w8, z20, z16),
+ svmla_za32_vg2x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x4.c
new file mode 100644
index 0000000..049cf0a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s16_vg2x4.c
@@ -0,0 +1,258 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svint16x4_t,
+ svmla_za32_s16_vg2x4 (0, z0, z0),
+ svmla_za32_vg2x4 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w0, z0, z0),
+ svmla_za32_vg2x4 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** smlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8, z0, z4),
+ svmla_za32_vg2x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z18:
+** ...
+** smlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z18, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8, z0, z18),
+ svmla_za32_vg2x4 (w8, z0, z18))
+
+/*
+** mla_w8_z18_z0:
+** ...
+** smlal za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z0, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8, z18, z0),
+ svmla_za32_vg2x4 (w8, z18, z0))
+
+/*
+** mla_w8_z0_z23:
+** ...
+** smlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8, z0, z23),
+ svmla_za32_vg2x4 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** smlal za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8, z23, z0),
+ svmla_za32_vg2x4 (w8, z23, z0))
+
+/*
+** mla_w8_z4_z28:
+** smlal za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z28, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8, z4, z28),
+ svmla_za32_vg2x4 (w8, z4, z28))
+
+/*
+** mla_w8_z28_z0:
+** smlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z0, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8, z28, z0),
+ svmla_za32_vg2x4 (w8, z28, z0))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8 + 1, z4, z0),
+ svmla_za32_vg2x4 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** smlal za\.s\[w8, 2:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8 + 2, z4, z0),
+ svmla_za32_vg2x4 (w8 + 2, z4, z0))
+
+/*
+** mla_w8p6_z4_z0:
+** smlal za\.s\[w8, 6:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p6_z4_z0, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8 + 6, z4, z0),
+ svmla_za32_vg2x4 (w8 + 6, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8 + 7, z4, z0),
+ svmla_za32_vg2x4 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8 + 8, z4, z4),
+ svmla_za32_vg2x4 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svint16x4_t,
+ svmla_za32_s16_vg2x4 (w8 - 1, z4, z0),
+ svmla_za32_vg2x4 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za32_s16_vg2x4 (0, z1, z0),
+ svmla_za32_vg2x4 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za32_s16_vg2x4 (w0, z1, z0),
+ svmla_za32_vg2x4 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** smlal za\.s\[w8, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za32_s16_vg2x4 (w8, z1, z0),
+ svmla_za32_vg2x4 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za32_s16_vg2x4 (w8 + 1, z1, z0),
+ svmla_za32_vg2x4 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** smlal za\.s\[w8, 4:5, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svint16x4_t, svint16_t,
+ svmla_single_za32_s16_vg2x4 (w8 + 4, z20, z0),
+ svmla_za32_vg2x4 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** smlal za\.s\[w8, 6:7, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svint16x4_t, svint16_t,
+ svmla_single_za32_s16_vg2x4 (w8 + 6, z27, z0),
+ svmla_za32_vg2x4 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za32_s16_vg2x4 (w8 + 7, z1, z0),
+ svmla_za32_vg2x4 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za32_s16_vg2x4 (w8 + 8, z1, z0),
+ svmla_za32_vg2x4 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za32_s16_vg2x4 (w0 - 1, z1, z0),
+ svmla_za32_vg2x4 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svint16x4_t, svint16_t,
+ svmla_single_za32_s16_vg2x4 (w8, z0, z15),
+ svmla_za32_vg2x4 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlal za\.s\[w8, 0:1, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svint16x4_t, svint16_t,
+ svmla_single_za32_s16_vg2x4 (w8, z20, z16),
+ svmla_za32_vg2x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x1.c
new file mode 100644
index 0000000..da9d04e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x1.c
@@ -0,0 +1,149 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.s\[\1, 0:3\], z0\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_0_z0_z0, svint8_t,
+ svmla_za32_s8_vg4x1 (0, z0, z0),
+ svmla_za32_vg4x1 (0, z0, z0))
+
+/*
+** mla_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w0_z0_z3, svint8_t,
+ svmla_za32_s8_vg4x1 (w0, z0, z3),
+ svmla_za32_vg4x1 (w0, z0, z3))
+
+/*
+** mla_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** smlall za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w7_z0_z3, svint8_t,
+ svmla_za32_s8_vg4x1 (w7, z0, z3),
+ svmla_za32_vg4x1 (w7, z0, z3))
+
+/*
+** mla_w8_z7_z3:
+** smlall za\.s\[w8, 0:3\], z7\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z7_z3, svint8_t,
+ svmla_za32_s8_vg4x1 (w8, z7, z3),
+ svmla_za32_vg4x1 (w8, z7, z3))
+
+/*
+** mla_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** smlall za\.s\[w8, 0:3\], z31\.b. \1\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z31_z16, svint8_t,
+ svmla_za32_s8_vg4x1 (w8, z31, z16),
+ svmla_za32_vg4x1 (w8, z31, z16))
+
+/*
+** mla_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.s\[\1, 0:3\], z0\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8p1_z0_z0, svint8_t,
+ svmla_za32_s8_vg4x1 (w8 + 1, z0, z0),
+ svmla_za32_vg4x1 (w8 + 1, z0, z0))
+
+/*
+** mla_w10p4_z23_z0:
+** smlall za\.s\[w10, 4:7\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w10p4_z23_z0, svint8_t,
+ svmla_za32_s8_vg4x1 (w10 + 4, z23, z0),
+ svmla_za32_vg4x1 (w10 + 4, z23, z0))
+
+/*
+** mla_w11p6_z23_z0:
+** add (w8|w9|w10|w11), w11, #?6
+** smlall za\.s\[\1, 0:3\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w11p6_z23_z0, svint8_t,
+ svmla_za32_s8_vg4x1 (w11 + 6, z23, z0),
+ svmla_za32_vg4x1 (w11 + 6, z23, z0))
+
+/*
+** mla_w9p8_z7_z7:
+** smlall za\.s\[w9, 8:11\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w9p8_z7_z7, svint8_t,
+ svmla_za32_s8_vg4x1 (w9 + 8, z7, z7),
+ svmla_za32_vg4x1 (w9 + 8, z7, z7))
+
+/*
+** mla_w11p12_z23_z0:
+** smlall za\.s\[w11, 12:15\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w11p12_z23_z0, svint8_t,
+ svmla_za32_s8_vg4x1 (w11 + 12, z23, z0),
+ svmla_za32_vg4x1 (w11 + 12, z23, z0))
+
+/*
+** mla_w8p14_z23_z0:
+** add (w8|w9|w10|w11), w8, #?14
+** smlall za\.s\[\1, 0:3\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8p14_z23_z0, svint8_t,
+ svmla_za32_s8_vg4x1 (w8 + 14, z23, z0),
+ svmla_za32_vg4x1 (w8 + 14, z23, z0))
+
+/*
+** mla_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** smlall za\.s\[\1, 0:3\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8p15_z7_z7, svint8_t,
+ svmla_za32_s8_vg4x1 (w8 + 15, z7, z7),
+ svmla_za32_vg4x1 (w8 + 15, z7, z7))
+
+/*
+** mla_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** smlall za\.s\[\1, 0:3\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8p16_z7_z7, svint8_t,
+ svmla_za32_s8_vg4x1 (w8 + 16, z7, z7),
+ svmla_za32_vg4x1 (w8 + 16, z7, z7))
+
+/*
+** mla_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlall za\.s\[\1, 0:3\], z16\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8m1_z16_z0, svint8_t,
+ svmla_za32_s8_vg4x1 (w8 - 1, z16, z0),
+ svmla_za32_vg4x1 (w8 - 1, z16, z0))
+
+/*
+** mla_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** smlall za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w12_z0_z3, svint8_t,
+ svmla_za32_s8_vg4x1 (w12, z0, z3),
+ svmla_za32_vg4x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x2.c
new file mode 100644
index 0000000..67dd252
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x2.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svint8x2_t,
+ svmla_za32_s8_vg4x2 (0, z0, z0),
+ svmla_za32_vg4x2 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w0, z0, z0),
+ svmla_za32_vg4x2 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** smlall za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w8, z0, z4),
+ svmla_za32_vg4x2 (w8, z0, z4))
+
+/*
+** mla_w8_z4_z18:
+** smlall za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z18, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w8, z4, z18),
+ svmla_za32_vg4x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z23:
+** ...
+** smlall za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w8, z0, z23),
+ svmla_za32_vg4x2 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** smlall za\.s\[w8, 0:3, vgx2\], [^\n]+, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w8, z23, z0),
+ svmla_za32_vg4x2 (w8, z23, z0))
+
+/*
+** mla_w8_z18_z28:
+** smlall za\.s\[w8, 0:3, vgx2\], {z18\.b - z19\.b}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z28, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w8, z18, z28),
+ svmla_za32_vg4x2 (w8, z18, z28))
+
+/*
+** mla_w8_z28_z4:
+** smlall za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z4, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w8, z28, z4),
+ svmla_za32_vg4x2 (w8, z28, z4))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w8 + 1, z4, z0),
+ svmla_za32_vg4x2 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlall za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w8 + 2, z4, z0),
+ svmla_za32_vg4x2 (w8 + 2, z4, z0))
+
+/*
+** mla_w11p4_z4_z0:
+** smlall za\.s\[w11, 4:7, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w11p4_z4_z0, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w11 + 4, z4, z0),
+ svmla_za32_vg4x2 (w11 + 4, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w8 + 7, z4, z0),
+ svmla_za32_vg4x2 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w8 + 8, z4, z4),
+ svmla_za32_vg4x2 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlall za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svint8x2_t,
+ svmla_za32_s8_vg4x2 (w8 - 1, z4, z0),
+ svmla_za32_vg4x2 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svint8x2_t, svint8_t,
+ svmla_single_za32_s8_vg4x2 (0, z1, z0),
+ svmla_za32_vg4x2 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svint8x2_t, svint8_t,
+ svmla_single_za32_s8_vg4x2 (w0, z1, z0),
+ svmla_za32_vg4x2 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** smlall za\.s\[w8, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svint8x2_t, svint8_t,
+ svmla_single_za32_s8_vg4x2 (w8, z1, z0),
+ svmla_za32_vg4x2 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svint8x2_t, svint8_t,
+ svmla_single_za32_s8_vg4x2 (w8 + 1, z1, z0),
+ svmla_za32_vg4x2 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p2_z20_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlall za\.s\[\1, 0:3, vgx2\], {z20\.b - z21\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p2_z20_z0, svint8x2_t, svint8_t,
+ svmla_single_za32_s8_vg4x2 (w8 + 2, z20, z0),
+ svmla_za32_vg4x2 (w8 + 2, z20, z0))
+
+/*
+** mla_single_w11p4_z27_z0:
+** smlall za\.s\[w11, 4:7, vgx2\], {z27\.b - z28\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w11p4_z27_z0, svint8x2_t, svint8_t,
+ svmla_single_za32_s8_vg4x2 (w11 + 4, z27, z0),
+ svmla_za32_vg4x2 (w11 + 4, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svint8x2_t, svint8_t,
+ svmla_single_za32_s8_vg4x2 (w8 + 7, z1, z0),
+ svmla_za32_vg4x2 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svint8x2_t, svint8_t,
+ svmla_single_za32_s8_vg4x2 (w8 + 8, z1, z0),
+ svmla_za32_vg4x2 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svint8x2_t, svint8_t,
+ svmla_single_za32_s8_vg4x2 (w0 - 1, z1, z0),
+ svmla_za32_vg4x2 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlall za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svint8x2_t, svint8_t,
+ svmla_single_za32_s8_vg4x2 (w8, z0, z15),
+ svmla_za32_vg4x2 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlall za\.s\[w8, 0:3, vgx2\], {z20\.b - z21\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svint8x2_t, svint8_t,
+ svmla_single_za32_s8_vg4x2 (w8, z20, z16),
+ svmla_za32_vg4x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x4.c
new file mode 100644
index 0000000..eb28600
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_s8_vg4x4.c
@@ -0,0 +1,260 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svint8x4_t,
+ svmla_za32_s8_vg4x4 (0, z0, z0),
+ svmla_za32_vg4x4 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w0, z0, z0),
+ svmla_za32_vg4x4 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** smlall za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8, z0, z4),
+ svmla_za32_vg4x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z18:
+** ...
+** smlall za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z18, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8, z0, z18),
+ svmla_za32_vg4x4 (w8, z0, z18))
+
+/*
+** mla_w8_z18_z0:
+** ...
+** smlall za\.s\[w8, 0:3, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z0, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8, z18, z0),
+ svmla_za32_vg4x4 (w8, z18, z0))
+
+/*
+** mla_w8_z0_z23:
+** ...
+** smlall za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8, z0, z23),
+ svmla_za32_vg4x4 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** smlall za\.s\[w8, 0:3, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8, z23, z0),
+ svmla_za32_vg4x4 (w8, z23, z0))
+
+/*
+** mla_w8_z4_z28:
+** smlall za\.s\[w8, 0:3, vgx4\], {z4\.b - z7\.b}, {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z28, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8, z4, z28),
+ svmla_za32_vg4x4 (w8, z4, z28))
+
+/*
+** mla_w8_z28_z0:
+** smlall za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z0, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8, z28, z0),
+ svmla_za32_vg4x4 (w8, z28, z0))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8 + 1, z4, z0),
+ svmla_za32_vg4x4 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlall za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8 + 2, z4, z0),
+ svmla_za32_vg4x4 (w8 + 2, z4, z0))
+
+/*
+** mla_w11p4_z4_z0:
+** smlall za\.s\[w11, 4:7, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w11p4_z4_z0, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w11 + 4, z4, z0),
+ svmla_za32_vg4x4 (w11 + 4, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8 + 7, z4, z0),
+ svmla_za32_vg4x4 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8 + 8, z4, z4),
+ svmla_za32_vg4x4 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlall za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svint8x4_t,
+ svmla_za32_s8_vg4x4 (w8 - 1, z4, z0),
+ svmla_za32_vg4x4 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svint8x4_t, svint8_t,
+ svmla_single_za32_s8_vg4x4 (0, z1, z0),
+ svmla_za32_vg4x4 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svint8x4_t, svint8_t,
+ svmla_single_za32_s8_vg4x4 (w0, z1, z0),
+ svmla_za32_vg4x4 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** smlall za\.s\[w8, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svint8x4_t, svint8_t,
+ svmla_single_za32_s8_vg4x4 (w8, z1, z0),
+ svmla_za32_vg4x4 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svint8x4_t, svint8_t,
+ svmla_single_za32_s8_vg4x4 (w8 + 1, z1, z0),
+ svmla_za32_vg4x4 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** smlall za\.s\[w8, 4:7, vgx4\], {z20\.b - z23\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svint8x4_t, svint8_t,
+ svmla_single_za32_s8_vg4x4 (w8 + 4, z20, z0),
+ svmla_za32_vg4x4 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** add (w8|w9|w10|w11), w8, #?6
+** smlall za\.s\[\1, 0:3, vgx4\], {z27\.b - z30\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svint8x4_t, svint8_t,
+ svmla_single_za32_s8_vg4x4 (w8 + 6, z27, z0),
+ svmla_za32_vg4x4 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svint8x4_t, svint8_t,
+ svmla_single_za32_s8_vg4x4 (w8 + 7, z1, z0),
+ svmla_za32_vg4x4 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svint8x4_t, svint8_t,
+ svmla_single_za32_s8_vg4x4 (w8 + 8, z1, z0),
+ svmla_za32_vg4x4 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svint8x4_t, svint8_t,
+ svmla_single_za32_s8_vg4x4 (w0 - 1, z1, z0),
+ svmla_za32_vg4x4 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlall za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svint8x4_t, svint8_t,
+ svmla_single_za32_s8_vg4x4 (w8, z0, z15),
+ svmla_za32_vg4x4 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlall za\.s\[w8, 0:3, vgx4\], {z20\.b - z23\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svint8x4_t, svint8_t,
+ svmla_single_za32_s8_vg4x4 (w8, z20, z16),
+ svmla_za32_vg4x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x1.c
new file mode 100644
index 0000000..9072787
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlal za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_0_z0_z0, svuint16_t,
+ svmla_za32_u16_vg2x1 (0, z0, z0),
+ svmla_za32_vg2x1 (0, z0, z0))
+
+/*
+** mla_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** umlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w0_z0_z3, svuint16_t,
+ svmla_za32_u16_vg2x1 (w0, z0, z3),
+ svmla_za32_vg2x1 (w0, z0, z3))
+
+/*
+** mla_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** umlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w7_z0_z3, svuint16_t,
+ svmla_za32_u16_vg2x1 (w7, z0, z3),
+ svmla_za32_vg2x1 (w7, z0, z3))
+
+/*
+** mla_w8_z7_z3:
+** umlal za\.s\[w8, 0:1\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z7_z3, svuint16_t,
+ svmla_za32_u16_vg2x1 (w8, z7, z3),
+ svmla_za32_vg2x1 (w8, z7, z3))
+
+/*
+** mla_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** umlal za\.s\[w8, 0:1\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z31_z16, svuint16_t,
+ svmla_za32_u16_vg2x1 (w8, z31, z16),
+ svmla_za32_vg2x1 (w8, z31, z16))
+
+/*
+** mla_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlal za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p1_z0_z0, svuint16_t,
+ svmla_za32_u16_vg2x1 (w8 + 1, z0, z0),
+ svmla_za32_vg2x1 (w8 + 1, z0, z0))
+
+/*
+** mla_w8p2_z23_z0:
+** umlal za\.s\[w8, 2:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p2_z23_z0, svuint16_t,
+ svmla_za32_u16_vg2x1 (w8 + 2, z23, z0),
+ svmla_za32_vg2x1 (w8 + 2, z23, z0))
+
+/*
+** mla_w11p6_z23_z0:
+** umlal za\.s\[w11, 6:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p6_z23_z0, svuint16_t,
+ svmla_za32_u16_vg2x1 (w11 + 6, z23, z0),
+ svmla_za32_vg2x1 (w11 + 6, z23, z0))
+
+/*
+** mla_w8p7_z7_z7:
+** add (w8|w9|w10|w11), w8, #?7
+** umlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p7_z7_z7, svuint16_t,
+ svmla_za32_u16_vg2x1 (w8 + 7, z7, z7),
+ svmla_za32_vg2x1 (w8 + 7, z7, z7))
+
+/*
+** mla_w11p10_z23_z0:
+** umlal za\.s\[w11, 10:11\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p10_z23_z0, svuint16_t,
+ svmla_za32_u16_vg2x1 (w11 + 10, z23, z0),
+ svmla_za32_vg2x1 (w11 + 10, z23, z0))
+
+/*
+** mla_w8p14_z23_z0:
+** umlal za\.s\[w8, 14:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p14_z23_z0, svuint16_t,
+ svmla_za32_u16_vg2x1 (w8 + 14, z23, z0),
+ svmla_za32_vg2x1 (w8 + 14, z23, z0))
+
+/*
+** mla_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** umlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p15_z7_z7, svuint16_t,
+ svmla_za32_u16_vg2x1 (w8 + 15, z7, z7),
+ svmla_za32_vg2x1 (w8 + 15, z7, z7))
+
+/*
+** mla_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** umlal za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p16_z7_z7, svuint16_t,
+ svmla_za32_u16_vg2x1 (w8 + 16, z7, z7),
+ svmla_za32_vg2x1 (w8 + 16, z7, z7))
+
+/*
+** mla_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlal za\.s\[\1, 0:1\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8m1_z16_z0, svuint16_t,
+ svmla_za32_u16_vg2x1 (w8 - 1, z16, z0),
+ svmla_za32_vg2x1 (w8 - 1, z16, z0))
+
+/*
+** mla_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** umlal za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w12_z0_z3, svuint16_t,
+ svmla_za32_u16_vg2x1 (w12, z0, z3),
+ svmla_za32_vg2x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x2.c
new file mode 100644
index 0000000..8f44f18
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x2.c
@@ -0,0 +1,247 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (0, z0, z0),
+ svmla_za32_vg2x2 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlal za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w0, z0, z0),
+ svmla_za32_vg2x2 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** umlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8, z0, z4),
+ svmla_za32_vg2x2 (w8, z0, z4))
+
+/*
+** mla_w8_z4_z18:
+** umlal za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z18, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8, z4, z18),
+ svmla_za32_vg2x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z23:
+** ...
+** umlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8, z0, z23),
+ svmla_za32_vg2x2 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** umlal za\.s\[w8, 0:1, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8, z23, z0),
+ svmla_za32_vg2x2 (w8, z23, z0))
+
+/*
+** mla_w8_z18_z28:
+** umlal za\.s\[w8, 0:1, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z28, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8, z18, z28),
+ svmla_za32_vg2x2 (w8, z18, z28))
+
+/*
+** mla_w8_z28_z4:
+** umlal za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z4, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8, z28, z4),
+ svmla_za32_vg2x2 (w8, z28, z4))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8 + 1, z4, z0),
+ svmla_za32_vg2x2 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** umlal za\.s\[w8, 2:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8 + 2, z4, z0),
+ svmla_za32_vg2x2 (w8 + 2, z4, z0))
+
+/*
+** mla_w8p6_z4_z0:
+** umlal za\.s\[w8, 6:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p6_z4_z0, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8 + 6, z4, z0),
+ svmla_za32_vg2x2 (w8 + 6, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8 + 7, z4, z0),
+ svmla_za32_vg2x2 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8 + 8, z4, z4),
+ svmla_za32_vg2x2 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlal za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svuint16x2_t,
+ svmla_za32_u16_vg2x2 (w8 - 1, z4, z0),
+ svmla_za32_vg2x2 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za32_u16_vg2x2 (0, z1, z0),
+ svmla_za32_vg2x2 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za32_u16_vg2x2 (w0, z1, z0),
+ svmla_za32_vg2x2 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** umlal za\.s\[w8, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za32_u16_vg2x2 (w8, z1, z0),
+ svmla_za32_vg2x2 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za32_u16_vg2x2 (w8 + 1, z1, z0),
+ svmla_za32_vg2x2 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** umlal za\.s\[w8, 4:5, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za32_u16_vg2x2 (w8 + 4, z20, z0),
+ svmla_za32_vg2x2 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** umlal za\.s\[w8, 6:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za32_u16_vg2x2 (w8 + 6, z27, z0),
+ svmla_za32_vg2x2 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za32_u16_vg2x2 (w8 + 7, z1, z0),
+ svmla_za32_vg2x2 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za32_u16_vg2x2 (w8 + 8, z1, z0),
+ svmla_za32_vg2x2 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlal za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za32_u16_vg2x2 (w0 - 1, z1, z0),
+ svmla_za32_vg2x2 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlal za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svuint16x2_t, svuint16_t,
+ svmla_single_za32_u16_vg2x2 (w8, z0, z15),
+ svmla_za32_vg2x2 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlal za\.s\[w8, 0:1, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svuint16x2_t, svuint16_t,
+ svmla_single_za32_u16_vg2x2 (w8, z20, z16),
+ svmla_za32_vg2x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x4.c
new file mode 100644
index 0000000..b87569a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u16_vg2x4.c
@@ -0,0 +1,258 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (0, z0, z0),
+ svmla_za32_vg2x4 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlal za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w0, z0, z0),
+ svmla_za32_vg2x4 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** umlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8, z0, z4),
+ svmla_za32_vg2x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z18:
+** ...
+** umlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z18, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8, z0, z18),
+ svmla_za32_vg2x4 (w8, z0, z18))
+
+/*
+** mla_w8_z18_z0:
+** ...
+** umlal za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z0, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8, z18, z0),
+ svmla_za32_vg2x4 (w8, z18, z0))
+
+/*
+** mla_w8_z0_z23:
+** ...
+** umlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8, z0, z23),
+ svmla_za32_vg2x4 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** umlal za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8, z23, z0),
+ svmla_za32_vg2x4 (w8, z23, z0))
+
+/*
+** mla_w8_z4_z28:
+** umlal za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z28, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8, z4, z28),
+ svmla_za32_vg2x4 (w8, z4, z28))
+
+/*
+** mla_w8_z28_z0:
+** umlal za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z0, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8, z28, z0),
+ svmla_za32_vg2x4 (w8, z28, z0))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8 + 1, z4, z0),
+ svmla_za32_vg2x4 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** umlal za\.s\[w8, 2:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8 + 2, z4, z0),
+ svmla_za32_vg2x4 (w8 + 2, z4, z0))
+
+/*
+** mla_w8p6_z4_z0:
+** umlal za\.s\[w8, 6:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p6_z4_z0, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8 + 6, z4, z0),
+ svmla_za32_vg2x4 (w8 + 6, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8 + 7, z4, z0),
+ svmla_za32_vg2x4 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8 + 8, z4, z4),
+ svmla_za32_vg2x4 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlal za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svuint16x4_t,
+ svmla_za32_u16_vg2x4 (w8 - 1, z4, z0),
+ svmla_za32_vg2x4 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za32_u16_vg2x4 (0, z1, z0),
+ svmla_za32_vg2x4 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za32_u16_vg2x4 (w0, z1, z0),
+ svmla_za32_vg2x4 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** umlal za\.s\[w8, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za32_u16_vg2x4 (w8, z1, z0),
+ svmla_za32_vg2x4 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za32_u16_vg2x4 (w8 + 1, z1, z0),
+ svmla_za32_vg2x4 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** umlal za\.s\[w8, 4:5, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za32_u16_vg2x4 (w8 + 4, z20, z0),
+ svmla_za32_vg2x4 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** umlal za\.s\[w8, 6:7, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za32_u16_vg2x4 (w8 + 6, z27, z0),
+ svmla_za32_vg2x4 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za32_u16_vg2x4 (w8 + 7, z1, z0),
+ svmla_za32_vg2x4 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za32_u16_vg2x4 (w8 + 8, z1, z0),
+ svmla_za32_vg2x4 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlal za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za32_u16_vg2x4 (w0 - 1, z1, z0),
+ svmla_za32_vg2x4 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlal za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svuint16x4_t, svuint16_t,
+ svmla_single_za32_u16_vg2x4 (w8, z0, z15),
+ svmla_za32_vg2x4 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlal za\.s\[w8, 0:1, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svuint16x4_t, svuint16_t,
+ svmla_single_za32_u16_vg2x4 (w8, z20, z16),
+ svmla_za32_vg2x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x1.c
new file mode 100644
index 0000000..99d75c9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x1.c
@@ -0,0 +1,149 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.s\[\1, 0:3\], z0\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_0_z0_z0, svuint8_t,
+ svmla_za32_u8_vg4x1 (0, z0, z0),
+ svmla_za32_vg4x1 (0, z0, z0))
+
+/*
+** mla_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w0_z0_z3, svuint8_t,
+ svmla_za32_u8_vg4x1 (w0, z0, z3),
+ svmla_za32_vg4x1 (w0, z0, z3))
+
+/*
+** mla_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** umlall za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w7_z0_z3, svuint8_t,
+ svmla_za32_u8_vg4x1 (w7, z0, z3),
+ svmla_za32_vg4x1 (w7, z0, z3))
+
+/*
+** mla_w8_z7_z3:
+** umlall za\.s\[w8, 0:3\], z7\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z7_z3, svuint8_t,
+ svmla_za32_u8_vg4x1 (w8, z7, z3),
+ svmla_za32_vg4x1 (w8, z7, z3))
+
+/*
+** mla_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** umlall za\.s\[w8, 0:3\], z31\.b. \1\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z31_z16, svuint8_t,
+ svmla_za32_u8_vg4x1 (w8, z31, z16),
+ svmla_za32_vg4x1 (w8, z31, z16))
+
+/*
+** mla_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.s\[\1, 0:3\], z0\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8p1_z0_z0, svuint8_t,
+ svmla_za32_u8_vg4x1 (w8 + 1, z0, z0),
+ svmla_za32_vg4x1 (w8 + 1, z0, z0))
+
+/*
+** mla_w10p4_z23_z0:
+** umlall za\.s\[w10, 4:7\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w10p4_z23_z0, svuint8_t,
+ svmla_za32_u8_vg4x1 (w10 + 4, z23, z0),
+ svmla_za32_vg4x1 (w10 + 4, z23, z0))
+
+/*
+** mla_w11p6_z23_z0:
+** add (w8|w9|w10|w11), w11, #?6
+** umlall za\.s\[\1, 0:3\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w11p6_z23_z0, svuint8_t,
+ svmla_za32_u8_vg4x1 (w11 + 6, z23, z0),
+ svmla_za32_vg4x1 (w11 + 6, z23, z0))
+
+/*
+** mla_w9p8_z7_z7:
+** umlall za\.s\[w9, 8:11\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w9p8_z7_z7, svuint8_t,
+ svmla_za32_u8_vg4x1 (w9 + 8, z7, z7),
+ svmla_za32_vg4x1 (w9 + 8, z7, z7))
+
+/*
+** mla_w11p12_z23_z0:
+** umlall za\.s\[w11, 12:15\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w11p12_z23_z0, svuint8_t,
+ svmla_za32_u8_vg4x1 (w11 + 12, z23, z0),
+ svmla_za32_vg4x1 (w11 + 12, z23, z0))
+
+/*
+** mla_w8p14_z23_z0:
+** add (w8|w9|w10|w11), w8, #?14
+** umlall za\.s\[\1, 0:3\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8p14_z23_z0, svuint8_t,
+ svmla_za32_u8_vg4x1 (w8 + 14, z23, z0),
+ svmla_za32_vg4x1 (w8 + 14, z23, z0))
+
+/*
+** mla_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** umlall za\.s\[\1, 0:3\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8p15_z7_z7, svuint8_t,
+ svmla_za32_u8_vg4x1 (w8 + 15, z7, z7),
+ svmla_za32_vg4x1 (w8 + 15, z7, z7))
+
+/*
+** mla_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** umlall za\.s\[\1, 0:3\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8p16_z7_z7, svuint8_t,
+ svmla_za32_u8_vg4x1 (w8 + 16, z7, z7),
+ svmla_za32_vg4x1 (w8 + 16, z7, z7))
+
+/*
+** mla_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlall za\.s\[\1, 0:3\], z16\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w8m1_z16_z0, svuint8_t,
+ svmla_za32_u8_vg4x1 (w8 - 1, z16, z0),
+ svmla_za32_vg4x1 (w8 - 1, z16, z0))
+
+/*
+** mla_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** umlall za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mla_w12_z0_z3, svuint8_t,
+ svmla_za32_u8_vg4x1 (w12, z0, z3),
+ svmla_za32_vg4x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x2.c
new file mode 100644
index 0000000..ae91dfb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x2.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (0, z0, z0),
+ svmla_za32_vg4x2 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w0, z0, z0),
+ svmla_za32_vg4x2 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** umlall za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w8, z0, z4),
+ svmla_za32_vg4x2 (w8, z0, z4))
+
+/*
+** mla_w8_z4_z18:
+** umlall za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z18, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w8, z4, z18),
+ svmla_za32_vg4x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z23:
+** ...
+** umlall za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w8, z0, z23),
+ svmla_za32_vg4x2 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** umlall za\.s\[w8, 0:3, vgx2\], [^\n]+, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w8, z23, z0),
+ svmla_za32_vg4x2 (w8, z23, z0))
+
+/*
+** mla_w8_z18_z28:
+** umlall za\.s\[w8, 0:3, vgx2\], {z18\.b - z19\.b}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z28, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w8, z18, z28),
+ svmla_za32_vg4x2 (w8, z18, z28))
+
+/*
+** mla_w8_z28_z4:
+** umlall za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z4, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w8, z28, z4),
+ svmla_za32_vg4x2 (w8, z28, z4))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w8 + 1, z4, z0),
+ svmla_za32_vg4x2 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlall za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w8 + 2, z4, z0),
+ svmla_za32_vg4x2 (w8 + 2, z4, z0))
+
+/*
+** mla_w11p4_z4_z0:
+** umlall za\.s\[w11, 4:7, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w11p4_z4_z0, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w11 + 4, z4, z0),
+ svmla_za32_vg4x2 (w11 + 4, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w8 + 7, z4, z0),
+ svmla_za32_vg4x2 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w8 + 8, z4, z4),
+ svmla_za32_vg4x2 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlall za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svuint8x2_t,
+ svmla_za32_u8_vg4x2 (w8 - 1, z4, z0),
+ svmla_za32_vg4x2 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svuint8x2_t, svuint8_t,
+ svmla_single_za32_u8_vg4x2 (0, z1, z0),
+ svmla_za32_vg4x2 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svuint8x2_t, svuint8_t,
+ svmla_single_za32_u8_vg4x2 (w0, z1, z0),
+ svmla_za32_vg4x2 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** umlall za\.s\[w8, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svuint8x2_t, svuint8_t,
+ svmla_single_za32_u8_vg4x2 (w8, z1, z0),
+ svmla_za32_vg4x2 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svuint8x2_t, svuint8_t,
+ svmla_single_za32_u8_vg4x2 (w8 + 1, z1, z0),
+ svmla_za32_vg4x2 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p2_z20_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlall za\.s\[\1, 0:3, vgx2\], {z20\.b - z21\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p2_z20_z0, svuint8x2_t, svuint8_t,
+ svmla_single_za32_u8_vg4x2 (w8 + 2, z20, z0),
+ svmla_za32_vg4x2 (w8 + 2, z20, z0))
+
+/*
+** mla_single_w11p4_z27_z0:
+** umlall za\.s\[w11, 4:7, vgx2\], {z27\.b - z28\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w11p4_z27_z0, svuint8x2_t, svuint8_t,
+ svmla_single_za32_u8_vg4x2 (w11 + 4, z27, z0),
+ svmla_za32_vg4x2 (w11 + 4, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svuint8x2_t, svuint8_t,
+ svmla_single_za32_u8_vg4x2 (w8 + 7, z1, z0),
+ svmla_za32_vg4x2 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svuint8x2_t, svuint8_t,
+ svmla_single_za32_u8_vg4x2 (w8 + 8, z1, z0),
+ svmla_za32_vg4x2 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlall za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svuint8x2_t, svuint8_t,
+ svmla_single_za32_u8_vg4x2 (w0 - 1, z1, z0),
+ svmla_za32_vg4x2 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlall za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svuint8x2_t, svuint8_t,
+ svmla_single_za32_u8_vg4x2 (w8, z0, z15),
+ svmla_za32_vg4x2 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlall za\.s\[w8, 0:3, vgx2\], {z20\.b - z21\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svuint8x2_t, svuint8_t,
+ svmla_single_za32_u8_vg4x2 (w8, z20, z16),
+ svmla_za32_vg4x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x4.c
new file mode 100644
index 0000000..1dde7e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za32_u8_vg4x4.c
@@ -0,0 +1,260 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (0, z0, z0),
+ svmla_za32_vg4x4 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w0, z0, z0),
+ svmla_za32_vg4x4 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** umlall za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8, z0, z4),
+ svmla_za32_vg4x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z18:
+** ...
+** umlall za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z18, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8, z0, z18),
+ svmla_za32_vg4x4 (w8, z0, z18))
+
+/*
+** mla_w8_z18_z0:
+** ...
+** umlall za\.s\[w8, 0:3, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z0, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8, z18, z0),
+ svmla_za32_vg4x4 (w8, z18, z0))
+
+/*
+** mla_w8_z0_z23:
+** ...
+** umlall za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8, z0, z23),
+ svmla_za32_vg4x4 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** umlall za\.s\[w8, 0:3, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8, z23, z0),
+ svmla_za32_vg4x4 (w8, z23, z0))
+
+/*
+** mla_w8_z4_z28:
+** umlall za\.s\[w8, 0:3, vgx4\], {z4\.b - z7\.b}, {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z28, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8, z4, z28),
+ svmla_za32_vg4x4 (w8, z4, z28))
+
+/*
+** mla_w8_z28_z0:
+** umlall za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z0, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8, z28, z0),
+ svmla_za32_vg4x4 (w8, z28, z0))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8 + 1, z4, z0),
+ svmla_za32_vg4x4 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlall za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8 + 2, z4, z0),
+ svmla_za32_vg4x4 (w8 + 2, z4, z0))
+
+/*
+** mla_w11p4_z4_z0:
+** umlall za\.s\[w11, 4:7, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w11p4_z4_z0, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w11 + 4, z4, z0),
+ svmla_za32_vg4x4 (w11 + 4, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8 + 7, z4, z0),
+ svmla_za32_vg4x4 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8 + 8, z4, z4),
+ svmla_za32_vg4x4 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlall za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svuint8x4_t,
+ svmla_za32_u8_vg4x4 (w8 - 1, z4, z0),
+ svmla_za32_vg4x4 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svuint8x4_t, svuint8_t,
+ svmla_single_za32_u8_vg4x4 (0, z1, z0),
+ svmla_za32_vg4x4 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svuint8x4_t, svuint8_t,
+ svmla_single_za32_u8_vg4x4 (w0, z1, z0),
+ svmla_za32_vg4x4 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** umlall za\.s\[w8, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svuint8x4_t, svuint8_t,
+ svmla_single_za32_u8_vg4x4 (w8, z1, z0),
+ svmla_za32_vg4x4 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svuint8x4_t, svuint8_t,
+ svmla_single_za32_u8_vg4x4 (w8 + 1, z1, z0),
+ svmla_za32_vg4x4 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** umlall za\.s\[w8, 4:7, vgx4\], {z20\.b - z23\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svuint8x4_t, svuint8_t,
+ svmla_single_za32_u8_vg4x4 (w8 + 4, z20, z0),
+ svmla_za32_vg4x4 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** add (w8|w9|w10|w11), w8, #?6
+** umlall za\.s\[\1, 0:3, vgx4\], {z27\.b - z30\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svuint8x4_t, svuint8_t,
+ svmla_single_za32_u8_vg4x4 (w8 + 6, z27, z0),
+ svmla_za32_vg4x4 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svuint8x4_t, svuint8_t,
+ svmla_single_za32_u8_vg4x4 (w8 + 7, z1, z0),
+ svmla_za32_vg4x4 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svuint8x4_t, svuint8_t,
+ svmla_single_za32_u8_vg4x4 (w8 + 8, z1, z0),
+ svmla_za32_vg4x4 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlall za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svuint8x4_t, svuint8_t,
+ svmla_single_za32_u8_vg4x4 (w0 - 1, z1, z0),
+ svmla_za32_vg4x4 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlall za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svuint8x4_t, svuint8_t,
+ svmla_single_za32_u8_vg4x4 (w8, z0, z15),
+ svmla_za32_vg4x4 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlall za\.s\[w8, 0:3, vgx4\], {z20\.b - z23\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svuint8x4_t, svuint8_t,
+ svmla_single_za32_u8_vg4x4 (w8, z20, z16),
+ svmla_za32_vg4x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_f64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_f64_vg1x2.c
new file mode 100644
index 0000000..be8b0a7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_f64_vg1x2.c
@@ -0,0 +1,182 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svfloat64x2_t,
+ svmla_za64_f64_vg1x2 (0, z0, z0),
+ svmla_za64_vg1x2 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svfloat64x2_t,
+ svmla_za64_f64_vg1x2 (w0, z0, z0),
+ svmla_za64_vg1x2 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** fmla za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svfloat64x2_t,
+ svmla_za64_f64_vg1x2 (w8, z0, z4),
+ svmla_za64_vg1x2 (w8, z0, z4))
+
+/*
+** mla_w8_z4_z18:
+** fmla za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z18, svfloat64x2_t,
+ svmla_za64_f64_vg1x2 (w8, z4, z18),
+ svmla_za64_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z23_z0:
+** ...
+** fmla za\.d\[w8, 0, vgx2\], [^\n]+, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svfloat64x2_t,
+ svmla_za64_f64_vg1x2 (w8, z23, z0),
+ svmla_za64_vg1x2 (w8, z23, z0))
+
+/*
+** mla_w8_z18_z23:
+** ...
+** fmla za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z23, svfloat64x2_t,
+ svmla_za64_f64_vg1x2 (w8, z18, z23),
+ svmla_za64_vg1x2 (w8, z18, z23))
+
+/*
+** mla_w8_z4_z28:
+** fmla za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z28, svfloat64x2_t,
+ svmla_za64_f64_vg1x2 (w8, z4, z28),
+ svmla_za64_vg1x2 (w8, z4, z28))
+
+/*
+** mla_w8p7_z4_z0:
+** fmla za\.d\[w8, 7, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svfloat64x2_t,
+ svmla_za64_f64_vg1x2 (w8 + 7, z4, z0),
+ svmla_za64_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svfloat64x2_t,
+ svmla_za64_f64_vg1x2 (w8 + 8, z4, z4),
+ svmla_za64_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmla za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svfloat64x2_t,
+ svmla_za64_f64_vg1x2 (w8 - 1, z4, z0),
+ svmla_za64_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x2 (0, z1, z0),
+ svmla_za64_vg1x2 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x2 (w0, z1, z0),
+ svmla_za64_vg1x2 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** fmla za\.d\[w8, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x2 (w8, z1, z0),
+ svmla_za64_vg1x2 (w8, z1, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** fmla za\.d\[w8, 7, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x2 (w8 + 7, z1, z0),
+ svmla_za64_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x2 (w8 + 8, z1, z0),
+ svmla_za64_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmla za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x2 (w0 - 1, z1, z0),
+ svmla_za64_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmla za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svfloat64x2_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x2 (w8, z0, z15),
+ svmla_za64_vg1x2 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmla za\.d\[w8, 0, vgx2\], {z20\.d - z21\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svfloat64x2_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x2 (w8, z20, z16),
+ svmla_za64_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_f64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_f64_vg1x4.c
new file mode 100644
index 0000000..49621e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_f64_vg1x4.c
@@ -0,0 +1,174 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svfloat64x4_t,
+ svmla_za64_f64_vg1x4 (0, z0, z0),
+ svmla_za64_vg1x4 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svfloat64x4_t,
+ svmla_za64_f64_vg1x4 (w0, z0, z0),
+ svmla_za64_vg1x4 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** fmla za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svfloat64x4_t,
+ svmla_za64_f64_vg1x4 (w8, z0, z4),
+ svmla_za64_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z18:
+** ...
+** fmla za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z18, svfloat64x4_t,
+ svmla_za64_f64_vg1x4 (w8, z0, z18),
+ svmla_za64_vg1x4 (w8, z0, z18))
+
+/*
+** mla_w8_z18_z28:
+** ...
+** fmla za\.d\[w8, 0, vgx4\], [^\n]+, {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z28, svfloat64x4_t,
+ svmla_za64_f64_vg1x4 (w8, z18, z28),
+ svmla_za64_vg1x4 (w8, z18, z28))
+
+/*
+** mla_w8_z28_z23:
+** ...
+** fmla za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z23, svfloat64x4_t,
+ svmla_za64_f64_vg1x4 (w8, z28, z23),
+ svmla_za64_vg1x4 (w8, z28, z23))
+
+/*
+** mla_w8p7_z4_z0:
+** fmla za\.d\[w8, 7, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svfloat64x4_t,
+ svmla_za64_f64_vg1x4 (w8 + 7, z4, z0),
+ svmla_za64_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svfloat64x4_t,
+ svmla_za64_f64_vg1x4 (w8 + 8, z4, z4),
+ svmla_za64_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmla za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svfloat64x4_t,
+ svmla_za64_f64_vg1x4 (w8 - 1, z4, z0),
+ svmla_za64_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmla za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x4 (0, z1, z0),
+ svmla_za64_vg1x4 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmla za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x4 (w0, z1, z0),
+ svmla_za64_vg1x4 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** fmla za\.d\[w8, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x4 (w8, z1, z0),
+ svmla_za64_vg1x4 (w8, z1, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** fmla za\.d\[w8, 7, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x4 (w8 + 7, z1, z0),
+ svmla_za64_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmla za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x4 (w8 + 8, z1, z0),
+ svmla_za64_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmla za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x4 (w0 - 1, z1, z0),
+ svmla_za64_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmla za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svfloat64x4_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x4 (w8, z0, z15),
+ svmla_za64_vg1x4 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmla za\.d\[w8, 0, vgx4\], {z20\.d - z23\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svfloat64x4_t, svfloat64_t,
+ svmla_single_za64_f64_vg1x4 (w8, z20, z16),
+ svmla_za64_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x1.c
new file mode 100644
index 0000000..92cad0c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x1.c
@@ -0,0 +1,151 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.d\[\1, 0:3\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_0_z0_z0, svint16_t,
+ svmla_za64_s16_vg4x1 (0, z0, z0),
+ svmla_za64_vg4x1 (0, z0, z0))
+
+/*
+** mla_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w0_z0_z3, svint16_t,
+ svmla_za64_s16_vg4x1 (w0, z0, z3),
+ svmla_za64_vg4x1 (w0, z0, z3))
+
+/*
+** mla_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** smlall za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w7_z0_z3, svint16_t,
+ svmla_za64_s16_vg4x1 (w7, z0, z3),
+ svmla_za64_vg4x1 (w7, z0, z3))
+
+/*
+** mla_w8_z7_z3:
+** smlall za\.d\[w8, 0:3\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z7_z3, svint16_t,
+ svmla_za64_s16_vg4x1 (w8, z7, z3),
+ svmla_za64_vg4x1 (w8, z7, z3))
+
+/*
+** mla_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** smlall za\.d\[w8, 0:3\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z31_z16, svint16_t,
+ svmla_za64_s16_vg4x1 (w8, z31, z16),
+ svmla_za64_vg4x1 (w8, z31, z16))
+
+/*
+** mla_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.d\[\1, 0:3\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p1_z0_z0, svint16_t,
+ svmla_za64_s16_vg4x1 (w8 + 1, z0, z0),
+ svmla_za64_vg4x1 (w8 + 1, z0, z0))
+
+/*
+** mla_w10p4_z23_z0:
+** smlall za\.d\[w10, 4:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w10p4_z23_z0, svint16_t,
+ svmla_za64_s16_vg4x1 (w10 + 4, z23, z0),
+ svmla_za64_vg4x1 (w10 + 4, z23, z0))
+
+/*
+** mla_w11p6_z23_z0:
+** add (w8|w9|w10|w11), w11, #?6
+** smlall za\.d\[\1, 0:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p6_z23_z0, svint16_t,
+ svmla_za64_s16_vg4x1 (w11 + 6, z23, z0),
+ svmla_za64_vg4x1 (w11 + 6, z23, z0))
+
+/*
+** mla_w9p8_z7_z7:
+** smlall za\.d\[w9, 8:11\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w9p8_z7_z7, svint16_t,
+ svmla_za64_s16_vg4x1 (w9 + 8, z7, z7),
+ svmla_za64_vg4x1 (w9 + 8, z7, z7))
+
+/*
+** mla_w11p12_z23_z0:
+** smlall za\.d\[w11, 12:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p12_z23_z0, svint16_t,
+ svmla_za64_s16_vg4x1 (w11 + 12, z23, z0),
+ svmla_za64_vg4x1 (w11 + 12, z23, z0))
+
+/*
+** mla_w8p14_z23_z0:
+** add (w8|w9|w10|w11), w8, #?14
+** smlall za\.d\[\1, 0:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p14_z23_z0, svint16_t,
+ svmla_za64_s16_vg4x1 (w8 + 14, z23, z0),
+ svmla_za64_vg4x1 (w8 + 14, z23, z0))
+
+/*
+** mla_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** smlall za\.d\[\1, 0:3\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p15_z7_z7, svint16_t,
+ svmla_za64_s16_vg4x1 (w8 + 15, z7, z7),
+ svmla_za64_vg4x1 (w8 + 15, z7, z7))
+
+/*
+** mla_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** smlall za\.d\[\1, 0:3\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p16_z7_z7, svint16_t,
+ svmla_za64_s16_vg4x1 (w8 + 16, z7, z7),
+ svmla_za64_vg4x1 (w8 + 16, z7, z7))
+
+/*
+** mla_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlall za\.d\[\1, 0:3\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8m1_z16_z0, svint16_t,
+ svmla_za64_s16_vg4x1 (w8 - 1, z16, z0),
+ svmla_za64_vg4x1 (w8 - 1, z16, z0))
+
+/*
+** mla_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** smlall za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w12_z0_z3, svint16_t,
+ svmla_za64_s16_vg4x1 (w12, z0, z3),
+ svmla_za64_vg4x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x2.c
new file mode 100644
index 0000000..2299cf8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x2.c
@@ -0,0 +1,251 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svint16x2_t,
+ svmla_za64_s16_vg4x2 (0, z0, z0),
+ svmla_za64_vg4x2 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w0, z0, z0),
+ svmla_za64_vg4x2 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** smlall za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w8, z0, z4),
+ svmla_za64_vg4x2 (w8, z0, z4))
+
+/*
+** mla_w8_z4_z18:
+** smlall za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z18, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w8, z4, z18),
+ svmla_za64_vg4x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z23:
+** ...
+** smlall za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w8, z0, z23),
+ svmla_za64_vg4x2 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** smlall za\.d\[w8, 0:3, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w8, z23, z0),
+ svmla_za64_vg4x2 (w8, z23, z0))
+
+/*
+** mla_w8_z18_z28:
+** smlall za\.d\[w8, 0:3, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z28, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w8, z18, z28),
+ svmla_za64_vg4x2 (w8, z18, z28))
+
+/*
+** mla_w8_z28_z4:
+** smlall za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z4, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w8, z28, z4),
+ svmla_za64_vg4x2 (w8, z28, z4))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w8 + 1, z4, z0),
+ svmla_za64_vg4x2 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlall za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w8 + 2, z4, z0),
+ svmla_za64_vg4x2 (w8 + 2, z4, z0))
+
+/*
+** mla_w11p4_z4_z0:
+** smlall za\.d\[w11, 4:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w11p4_z4_z0, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w11 + 4, z4, z0),
+ svmla_za64_vg4x2 (w11 + 4, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w8 + 7, z4, z0),
+ svmla_za64_vg4x2 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w8 + 8, z4, z4),
+ svmla_za64_vg4x2 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlall za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svint16x2_t,
+ svmla_za64_s16_vg4x2 (w8 - 1, z4, z0),
+ svmla_za64_vg4x2 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za64_s16_vg4x2 (0, z1, z0),
+ svmla_za64_vg4x2 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za64_s16_vg4x2 (w0, z1, z0),
+ svmla_za64_vg4x2 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** smlall za\.d\[w8, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za64_s16_vg4x2 (w8, z1, z0),
+ svmla_za64_vg4x2 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za64_s16_vg4x2 (w8 + 1, z1, z0),
+ svmla_za64_vg4x2 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p2_z20_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlall za\.d\[\1, 0:3, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p2_z20_z0, svint16x2_t, svint16_t,
+ svmla_single_za64_s16_vg4x2 (w8 + 2, z20, z0),
+ svmla_za64_vg4x2 (w8 + 2, z20, z0))
+
+/*
+** mla_single_w11p4_z27_z0:
+** smlall za\.d\[w11, 4:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w11p4_z27_z0, svint16x2_t, svint16_t,
+ svmla_single_za64_s16_vg4x2 (w11 + 4, z27, z0),
+ svmla_za64_vg4x2 (w11 + 4, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za64_s16_vg4x2 (w8 + 7, z1, z0),
+ svmla_za64_vg4x2 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za64_s16_vg4x2 (w8 + 8, z1, z0),
+ svmla_za64_vg4x2 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svint16x2_t, svint16_t,
+ svmla_single_za64_s16_vg4x2 (w0 - 1, z1, z0),
+ svmla_za64_vg4x2 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlall za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svint16x2_t, svint16_t,
+ svmla_single_za64_s16_vg4x2 (w8, z0, z15),
+ svmla_za64_vg4x2 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlall za\.d\[w8, 0:3, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svint16x2_t, svint16_t,
+ svmla_single_za64_s16_vg4x2 (w8, z20, z16),
+ svmla_za64_vg4x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x4.c
new file mode 100644
index 0000000..3a1780a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_s16_vg4x4.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svint16x4_t,
+ svmla_za64_s16_vg4x4 (0, z0, z0),
+ svmla_za64_vg4x4 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w0, z0, z0),
+ svmla_za64_vg4x4 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** smlall za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8, z0, z4),
+ svmla_za64_vg4x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z18:
+** ...
+** smlall za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z18, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8, z0, z18),
+ svmla_za64_vg4x4 (w8, z0, z18))
+
+/*
+** mla_w8_z18_z0:
+** ...
+** smlall za\.d\[w8, 0:3, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z0, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8, z18, z0),
+ svmla_za64_vg4x4 (w8, z18, z0))
+
+/*
+** mla_w8_z0_z23:
+** ...
+** smlall za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8, z0, z23),
+ svmla_za64_vg4x4 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** smlall za\.d\[w8, 0:3, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8, z23, z0),
+ svmla_za64_vg4x4 (w8, z23, z0))
+
+/*
+** mla_w8_z4_z28:
+** smlall za\.d\[w8, 0:3, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z28, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8, z4, z28),
+ svmla_za64_vg4x4 (w8, z4, z28))
+
+/*
+** mla_w8_z28_z0:
+** smlall za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z0, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8, z28, z0),
+ svmla_za64_vg4x4 (w8, z28, z0))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8 + 1, z4, z0),
+ svmla_za64_vg4x4 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlall za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8 + 2, z4, z0),
+ svmla_za64_vg4x4 (w8 + 2, z4, z0))
+
+/*
+** mla_w11p4_z4_z0:
+** smlall za\.d\[w11, 4:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w11p4_z4_z0, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w11 + 4, z4, z0),
+ svmla_za64_vg4x4 (w11 + 4, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8 + 7, z4, z0),
+ svmla_za64_vg4x4 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8 + 8, z4, z4),
+ svmla_za64_vg4x4 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlall za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svint16x4_t,
+ svmla_za64_s16_vg4x4 (w8 - 1, z4, z0),
+ svmla_za64_vg4x4 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za64_s16_vg4x4 (0, z1, z0),
+ svmla_za64_vg4x4 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za64_s16_vg4x4 (w0, z1, z0),
+ svmla_za64_vg4x4 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** smlall za\.d\[w8, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za64_s16_vg4x4 (w8, z1, z0),
+ svmla_za64_vg4x4 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za64_s16_vg4x4 (w8 + 1, z1, z0),
+ svmla_za64_vg4x4 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** smlall za\.d\[w8, 4:7, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svint16x4_t, svint16_t,
+ svmla_single_za64_s16_vg4x4 (w8 + 4, z20, z0),
+ svmla_za64_vg4x4 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** add (w8|w9|w10|w11), w8, #?6
+** smlall za\.d\[\1, 0:3, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svint16x4_t, svint16_t,
+ svmla_single_za64_s16_vg4x4 (w8 + 6, z27, z0),
+ svmla_za64_vg4x4 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za64_s16_vg4x4 (w8 + 7, z1, z0),
+ svmla_za64_vg4x4 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za64_s16_vg4x4 (w8 + 8, z1, z0),
+ svmla_za64_vg4x4 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svint16x4_t, svint16_t,
+ svmla_single_za64_s16_vg4x4 (w0 - 1, z1, z0),
+ svmla_za64_vg4x4 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlall za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svint16x4_t, svint16_t,
+ svmla_single_za64_s16_vg4x4 (w8, z0, z15),
+ svmla_za64_vg4x4 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlall za\.d\[w8, 0:3, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svint16x4_t, svint16_t,
+ svmla_single_za64_s16_vg4x4 (w8, z20, z16),
+ svmla_za64_vg4x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x1.c
new file mode 100644
index 0000000..cd50db4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x1.c
@@ -0,0 +1,151 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.d\[\1, 0:3\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_0_z0_z0, svuint16_t,
+ svmla_za64_u16_vg4x1 (0, z0, z0),
+ svmla_za64_vg4x1 (0, z0, z0))
+
+/*
+** mla_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w0_z0_z3, svuint16_t,
+ svmla_za64_u16_vg4x1 (w0, z0, z3),
+ svmla_za64_vg4x1 (w0, z0, z3))
+
+/*
+** mla_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** umlall za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w7_z0_z3, svuint16_t,
+ svmla_za64_u16_vg4x1 (w7, z0, z3),
+ svmla_za64_vg4x1 (w7, z0, z3))
+
+/*
+** mla_w8_z7_z3:
+** umlall za\.d\[w8, 0:3\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z7_z3, svuint16_t,
+ svmla_za64_u16_vg4x1 (w8, z7, z3),
+ svmla_za64_vg4x1 (w8, z7, z3))
+
+/*
+** mla_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** umlall za\.d\[w8, 0:3\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8_z31_z16, svuint16_t,
+ svmla_za64_u16_vg4x1 (w8, z31, z16),
+ svmla_za64_vg4x1 (w8, z31, z16))
+
+/*
+** mla_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.d\[\1, 0:3\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p1_z0_z0, svuint16_t,
+ svmla_za64_u16_vg4x1 (w8 + 1, z0, z0),
+ svmla_za64_vg4x1 (w8 + 1, z0, z0))
+
+/*
+** mla_w10p4_z23_z0:
+** umlall za\.d\[w10, 4:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w10p4_z23_z0, svuint16_t,
+ svmla_za64_u16_vg4x1 (w10 + 4, z23, z0),
+ svmla_za64_vg4x1 (w10 + 4, z23, z0))
+
+/*
+** mla_w11p6_z23_z0:
+** add (w8|w9|w10|w11), w11, #?6
+** umlall za\.d\[\1, 0:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p6_z23_z0, svuint16_t,
+ svmla_za64_u16_vg4x1 (w11 + 6, z23, z0),
+ svmla_za64_vg4x1 (w11 + 6, z23, z0))
+
+/*
+** mla_w9p8_z7_z7:
+** umlall za\.d\[w9, 8:11\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w9p8_z7_z7, svuint16_t,
+ svmla_za64_u16_vg4x1 (w9 + 8, z7, z7),
+ svmla_za64_vg4x1 (w9 + 8, z7, z7))
+
+/*
+** mla_w11p12_z23_z0:
+** umlall za\.d\[w11, 12:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w11p12_z23_z0, svuint16_t,
+ svmla_za64_u16_vg4x1 (w11 + 12, z23, z0),
+ svmla_za64_vg4x1 (w11 + 12, z23, z0))
+
+/*
+** mla_w8p14_z23_z0:
+** add (w8|w9|w10|w11), w8, #?14
+** umlall za\.d\[\1, 0:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p14_z23_z0, svuint16_t,
+ svmla_za64_u16_vg4x1 (w8 + 14, z23, z0),
+ svmla_za64_vg4x1 (w8 + 14, z23, z0))
+
+/*
+** mla_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** umlall za\.d\[\1, 0:3\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p15_z7_z7, svuint16_t,
+ svmla_za64_u16_vg4x1 (w8 + 15, z7, z7),
+ svmla_za64_vg4x1 (w8 + 15, z7, z7))
+
+/*
+** mla_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** umlall za\.d\[\1, 0:3\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8p16_z7_z7, svuint16_t,
+ svmla_za64_u16_vg4x1 (w8 + 16, z7, z7),
+ svmla_za64_vg4x1 (w8 + 16, z7, z7))
+
+/*
+** mla_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlall za\.d\[\1, 0:3\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w8m1_z16_z0, svuint16_t,
+ svmla_za64_u16_vg4x1 (w8 - 1, z16, z0),
+ svmla_za64_vg4x1 (w8 - 1, z16, z0))
+
+/*
+** mla_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** umlall za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mla_w12_z0_z3, svuint16_t,
+ svmla_za64_u16_vg4x1 (w12, z0, z3),
+ svmla_za64_vg4x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x2.c
new file mode 100644
index 0000000..680ef56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x2.c
@@ -0,0 +1,251 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (0, z0, z0),
+ svmla_za64_vg4x2 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w0, z0, z0),
+ svmla_za64_vg4x2 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** umlall za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w8, z0, z4),
+ svmla_za64_vg4x2 (w8, z0, z4))
+
+/*
+** mla_w8_z4_z18:
+** umlall za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z18, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w8, z4, z18),
+ svmla_za64_vg4x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z23:
+** ...
+** umlall za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w8, z0, z23),
+ svmla_za64_vg4x2 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** umlall za\.d\[w8, 0:3, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w8, z23, z0),
+ svmla_za64_vg4x2 (w8, z23, z0))
+
+/*
+** mla_w8_z18_z28:
+** umlall za\.d\[w8, 0:3, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z28, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w8, z18, z28),
+ svmla_za64_vg4x2 (w8, z18, z28))
+
+/*
+** mla_w8_z28_z4:
+** umlall za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z4, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w8, z28, z4),
+ svmla_za64_vg4x2 (w8, z28, z4))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w8 + 1, z4, z0),
+ svmla_za64_vg4x2 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlall za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w8 + 2, z4, z0),
+ svmla_za64_vg4x2 (w8 + 2, z4, z0))
+
+/*
+** mla_w11p4_z4_z0:
+** umlall za\.d\[w11, 4:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w11p4_z4_z0, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w11 + 4, z4, z0),
+ svmla_za64_vg4x2 (w11 + 4, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w8 + 7, z4, z0),
+ svmla_za64_vg4x2 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w8 + 8, z4, z4),
+ svmla_za64_vg4x2 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlall za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svuint16x2_t,
+ svmla_za64_u16_vg4x2 (w8 - 1, z4, z0),
+ svmla_za64_vg4x2 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za64_u16_vg4x2 (0, z1, z0),
+ svmla_za64_vg4x2 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za64_u16_vg4x2 (w0, z1, z0),
+ svmla_za64_vg4x2 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** umlall za\.d\[w8, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za64_u16_vg4x2 (w8, z1, z0),
+ svmla_za64_vg4x2 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za64_u16_vg4x2 (w8 + 1, z1, z0),
+ svmla_za64_vg4x2 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p2_z20_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlall za\.d\[\1, 0:3, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p2_z20_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za64_u16_vg4x2 (w8 + 2, z20, z0),
+ svmla_za64_vg4x2 (w8 + 2, z20, z0))
+
+/*
+** mla_single_w11p4_z27_z0:
+** umlall za\.d\[w11, 4:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w11p4_z27_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za64_u16_vg4x2 (w11 + 4, z27, z0),
+ svmla_za64_vg4x2 (w11 + 4, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za64_u16_vg4x2 (w8 + 7, z1, z0),
+ svmla_za64_vg4x2 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za64_u16_vg4x2 (w8 + 8, z1, z0),
+ svmla_za64_vg4x2 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlall za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svuint16x2_t, svuint16_t,
+ svmla_single_za64_u16_vg4x2 (w0 - 1, z1, z0),
+ svmla_za64_vg4x2 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlall za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svuint16x2_t, svuint16_t,
+ svmla_single_za64_u16_vg4x2 (w8, z0, z15),
+ svmla_za64_vg4x2 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlall za\.d\[w8, 0:3, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svuint16x2_t, svuint16_t,
+ svmla_single_za64_u16_vg4x2 (w8, z20, z16),
+ svmla_za64_vg4x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x4.c
new file mode 100644
index 0000000..47ae89f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mla_za64_u16_vg4x4.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mla_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_0_z0_z0, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (0, z0, z0),
+ svmla_za64_vg4x4 (0, z0, z0))
+
+/*
+** mla_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w0_z0_z0, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w0, z0, z0),
+ svmla_za64_vg4x4 (w0, z0, z0))
+
+/*
+** mla_w8_z0_z4:
+** umlall za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z4, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8, z0, z4),
+ svmla_za64_vg4x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mla_w8_z0_z18:
+** ...
+** umlall za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z18, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8, z0, z18),
+ svmla_za64_vg4x4 (w8, z0, z18))
+
+/*
+** mla_w8_z18_z0:
+** ...
+** umlall za\.d\[w8, 0:3, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z18_z0, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8, z18, z0),
+ svmla_za64_vg4x4 (w8, z18, z0))
+
+/*
+** mla_w8_z0_z23:
+** ...
+** umlall za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mla_w8_z0_z23, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8, z0, z23),
+ svmla_za64_vg4x4 (w8, z0, z23))
+
+/*
+** mla_w8_z23_z0:
+** ...
+** umlall za\.d\[w8, 0:3, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z23_z0, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8, z23, z0),
+ svmla_za64_vg4x4 (w8, z23, z0))
+
+/*
+** mla_w8_z4_z28:
+** umlall za\.d\[w8, 0:3, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z4_z28, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8, z4, z28),
+ svmla_za64_vg4x4 (w8, z4, z28))
+
+/*
+** mla_w8_z28_z0:
+** umlall za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8_z28_z0, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8, z28, z0),
+ svmla_za64_vg4x4 (w8, z28, z0))
+
+/*
+** mla_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p1_z4_z0, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8 + 1, z4, z0),
+ svmla_za64_vg4x4 (w8 + 1, z4, z0))
+
+/*
+** mla_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlall za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p2_z4_z0, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8 + 2, z4, z0),
+ svmla_za64_vg4x4 (w8 + 2, z4, z0))
+
+/*
+** mla_w11p4_z4_z0:
+** umlall za\.d\[w11, 4:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w11p4_z4_z0, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w11 + 4, z4, z0),
+ svmla_za64_vg4x4 (w11 + 4, z4, z0))
+
+/*
+** mla_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p7_z4_z0, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8 + 7, z4, z0),
+ svmla_za64_vg4x4 (w8 + 7, z4, z0))
+
+/*
+** mla_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8p8_z4_z4, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8 + 8, z4, z4),
+ svmla_za64_vg4x4 (w8 + 8, z4, z4))
+
+/*
+** mla_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlall za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mla_w8m1_z4_z0, svuint16x4_t,
+ svmla_za64_u16_vg4x4 (w8 - 1, z4, z0),
+ svmla_za64_vg4x4 (w8 - 1, z4, z0))
+
+/*
+** mla_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_0_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za64_u16_vg4x4 (0, z1, z0),
+ svmla_za64_vg4x4 (0, z1, z0))
+
+/*
+** mla_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za64_u16_vg4x4 (w0, z1, z0),
+ svmla_za64_vg4x4 (w0, z1, z0))
+
+/*
+** mla_single_w8_z1_z0:
+** umlall za\.d\[w8, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za64_u16_vg4x4 (w8, z1, z0),
+ svmla_za64_vg4x4 (w8, z1, z0))
+
+/*
+** mla_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p1_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za64_u16_vg4x4 (w8 + 1, z1, z0),
+ svmla_za64_vg4x4 (w8 + 1, z1, z0))
+
+/*
+** mla_single_w8p4_z20_z0:
+** umlall za\.d\[w8, 4:7, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p4_z20_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za64_u16_vg4x4 (w8 + 4, z20, z0),
+ svmla_za64_vg4x4 (w8 + 4, z20, z0))
+
+/*
+** mla_single_w8p6_z27_z0:
+** add (w8|w9|w10|w11), w8, #?6
+** umlall za\.d\[\1, 0:3, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p6_z27_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za64_u16_vg4x4 (w8 + 6, z27, z0),
+ svmla_za64_vg4x4 (w8 + 6, z27, z0))
+
+/*
+** mla_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p7_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za64_u16_vg4x4 (w8 + 7, z1, z0),
+ svmla_za64_vg4x4 (w8 + 7, z1, z0))
+
+/*
+** mla_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8p8_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za64_u16_vg4x4 (w8 + 8, z1, z0),
+ svmla_za64_vg4x4 (w8 + 8, z1, z0))
+
+/*
+** mla_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlall za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w0m1_z1_z0, svuint16x4_t, svuint16_t,
+ svmla_single_za64_u16_vg4x4 (w0 - 1, z1, z0),
+ svmla_za64_vg4x4 (w0 - 1, z1, z0))
+
+/*
+** mla_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlall za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mla_single_w8_z0_z15, svuint16x4_t, svuint16_t,
+ svmla_single_za64_u16_vg4x4 (w8, z0, z15),
+ svmla_za64_vg4x4 (w8, z0, z15))
+
+/*
+** mla_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlall za\.d\[w8, 0:3, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mla_single_w8_z20_z16, svuint16x4_t, svuint16_t,
+ svmla_single_za64_u16_vg4x4 (w8, z20, z16),
+ svmla_za64_vg4x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x1.c
new file mode 100644
index 0000000..55c9620
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlsl za\.s\[\1, 0:1\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_0_z0_z0_0, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (0, z0, z0, 0),
+ svmls_lane_za32_vg2x1 (0, z0, z0, 0))
+
+/*
+** mls_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** bfmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w0_z0_z3_1, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w0, z0, z3, 1),
+ svmls_lane_za32_vg2x1 (w0, z0, z3, 1))
+
+/*
+** mls_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** bfmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w7_z0_z3_2, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w7, z0, z3, 2),
+ svmls_lane_za32_vg2x1 (w7, z0, z3, 2))
+
+/*
+** mls_lane_w8_z7_z3_3:
+** bfmlsl za\.s\[w8, 0:1\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z7_z3_3, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w8, z7, z3, 3),
+ svmls_lane_za32_vg2x1 (w8, z7, z3, 3))
+
+/*
+** mls_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** bfmlsl za\.s\[w8, 0:1\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z31_z16_4, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w8, z31, z16, 4),
+ svmls_lane_za32_vg2x1 (w8, z31, z16, 4))
+
+/*
+** mls_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlsl za\.s\[\1, 0:1\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p1_z0_z0_5, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w8 + 1, z0, z0, 5),
+ svmls_lane_za32_vg2x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mls_lane_w8p2_z23_z0_6:
+** bfmlsl za\.s\[w8, 2:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p2_z23_z0_6, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w8 + 2, z23, z0, 6),
+ svmls_lane_za32_vg2x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mls_lane_w11p6_z23_z0_7:
+** bfmlsl za\.s\[w11, 6:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p6_z23_z0_7, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w11 + 6, z23, z0, 7),
+ svmls_lane_za32_vg2x1 (w11 + 6, z23, z0, 7))
+
+/*
+** mls_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p7_z7_z7_0, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w8 + 7, z7, z7, 0),
+ svmls_lane_za32_vg2x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mls_lane_w11p10_z23_z0_1:
+** bfmlsl za\.s\[w11, 10:11\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p10_z23_z0_1, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w11 + 10, z23, z0, 1),
+ svmls_lane_za32_vg2x1 (w11 + 10, z23, z0, 1))
+
+/*
+** mls_lane_w8p14_z23_z0_2:
+** bfmlsl za\.s\[w8, 14:15\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p14_z23_z0_2, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w8 + 14, z23, z0, 2),
+ svmls_lane_za32_vg2x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mls_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** bfmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p15_z7_z7_3, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w8 + 15, z7, z7, 3),
+ svmls_lane_za32_vg2x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mls_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** bfmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p16_z7_z7_4, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w8 + 16, z7, z7, 4),
+ svmls_lane_za32_vg2x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mls_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** bfmlsl za\.s\[\1, 0:1\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8m1_z16_z0_5, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w8 - 1, z16, z0, 5),
+ svmls_lane_za32_vg2x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mls_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** bfmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w12_z0_z3_6, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x1 (w12, z0, z3, 6),
+ svmls_lane_za32_vg2x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x2.c
new file mode 100644
index 0000000..26b786d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svbfloat16x2_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x2 (0, z0, z4, 0),
+ svmls_lane_za32_vg2x2 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svbfloat16x2_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x2 (w0, z0, z7, 1),
+ svmls_lane_za32_vg2x2 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svbfloat16x2_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x2 (w8, z28, z4, 2),
+ svmls_lane_za32_vg2x2 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w8p6_z0_z4_7:
+** bfmlsl za\.s\[w8, 6:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_7, svbfloat16x2_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x2 (w8 + 6, z0, z4, 7),
+ svmls_lane_za32_vg2x2 (w8 + 6, z0, z4, 7))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svbfloat16x2_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x2 (w8 + 7, z0, z4, 3),
+ svmls_lane_za32_vg2x2 (w8 + 7, z0, z4, 3))
+
+/*
+** mls_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_4, svbfloat16x2_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x2 (w8 + 8, z0, z4, 4),
+ svmls_lane_za32_vg2x2 (w8 + 8, z0, z4, 4))
+
+/*
+** mls_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_5, svbfloat16x2_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x2 (w0 - 1, z0, z4, 5),
+ svmls_lane_za32_vg2x2 (w0 - 1, z0, z4, 5))
+
+/*
+** mls_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_6, svbfloat16x2_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x2 (w8, z4, z15, 6),
+ svmls_lane_za32_vg2x2 (w8, z4, z15, 6))
+
+/*
+** mls_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_7, svbfloat16x2_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x2 (w8, z28, z16, 7),
+ svmls_lane_za32_vg2x2 (w8, z28, z16, 7))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** bfmlsl za\.s\[w8, 0:1, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svbfloat16x2_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x2 (w8, z17, z7, 0),
+ svmls_lane_za32_vg2x2 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svbfloat16x2_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x2 (w8, z22, z4, 1),
+ svmls_lane_za32_vg2x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x4.c
new file mode 100644
index 0000000..d958899
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_bf16_vg2x4.c
@@ -0,0 +1,118 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svbfloat16x4_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x4 (0, z0, z4, 0),
+ svmls_lane_za32_vg2x4 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svbfloat16x4_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x4 (w0, z0, z7, 1),
+ svmls_lane_za32_vg2x4 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** bfmlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svbfloat16x4_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x4 (w8, z28, z4, 2),
+ svmls_lane_za32_vg2x4 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w8p6_z0_z4_7:
+** bfmlsl za\.s\[w8, 6:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_7, svbfloat16x4_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x4 (w8 + 6, z0, z4, 7),
+ svmls_lane_za32_vg2x4 (w8 + 6, z0, z4, 7))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svbfloat16x4_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x4 (w8 + 7, z0, z4, 3),
+ svmls_lane_za32_vg2x4 (w8 + 7, z0, z4, 3))
+
+/*
+** mls_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_4, svbfloat16x4_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x4 (w8 + 8, z0, z4, 4),
+ svmls_lane_za32_vg2x4 (w8 + 8, z0, z4, 4))
+
+/*
+** mls_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_5, svbfloat16x4_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x4 (w0 - 1, z0, z4, 5),
+ svmls_lane_za32_vg2x4 (w0 - 1, z0, z4, 5))
+
+/*
+** mls_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** bfmlsl za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_6, svbfloat16x4_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x4 (w8, z4, z15, 6),
+ svmls_lane_za32_vg2x4 (w8, z4, z15, 6))
+
+/*
+** mls_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** bfmlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_7, svbfloat16x4_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x4 (w8, z28, z16, 7),
+ svmls_lane_za32_vg2x4 (w8, z28, z16, 7))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** bfmlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svbfloat16x4_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x4 (w8, z17, z7, 0),
+ svmls_lane_za32_vg2x4 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** bfmlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svbfloat16x4_t, svbfloat16_t,
+ svmls_lane_za32_bf16_vg2x4 (w8, z22, z4, 1),
+ svmls_lane_za32_vg2x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x1.c
new file mode 100644
index 0000000..c211a38
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** fmlsl za\.s\[\1, 0:1\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_0_z0_z0_0, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (0, z0, z0, 0),
+ svmls_lane_za32_vg2x1 (0, z0, z0, 0))
+
+/*
+** mls_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** fmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w0_z0_z3_1, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w0, z0, z3, 1),
+ svmls_lane_za32_vg2x1 (w0, z0, z3, 1))
+
+/*
+** mls_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** fmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w7_z0_z3_2, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w7, z0, z3, 2),
+ svmls_lane_za32_vg2x1 (w7, z0, z3, 2))
+
+/*
+** mls_lane_w8_z7_z3_3:
+** fmlsl za\.s\[w8, 0:1\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z7_z3_3, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w8, z7, z3, 3),
+ svmls_lane_za32_vg2x1 (w8, z7, z3, 3))
+
+/*
+** mls_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** fmlsl za\.s\[w8, 0:1\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z31_z16_4, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w8, z31, z16, 4),
+ svmls_lane_za32_vg2x1 (w8, z31, z16, 4))
+
+/*
+** mls_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlsl za\.s\[\1, 0:1\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p1_z0_z0_5, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w8 + 1, z0, z0, 5),
+ svmls_lane_za32_vg2x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mls_lane_w8p2_z23_z0_6:
+** fmlsl za\.s\[w8, 2:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p2_z23_z0_6, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w8 + 2, z23, z0, 6),
+ svmls_lane_za32_vg2x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mls_lane_w11p6_z23_z0_7:
+** fmlsl za\.s\[w11, 6:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p6_z23_z0_7, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w11 + 6, z23, z0, 7),
+ svmls_lane_za32_vg2x1 (w11 + 6, z23, z0, 7))
+
+/*
+** mls_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p7_z7_z7_0, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w8 + 7, z7, z7, 0),
+ svmls_lane_za32_vg2x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mls_lane_w11p10_z23_z0_1:
+** fmlsl za\.s\[w11, 10:11\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p10_z23_z0_1, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w11 + 10, z23, z0, 1),
+ svmls_lane_za32_vg2x1 (w11 + 10, z23, z0, 1))
+
+/*
+** mls_lane_w8p14_z23_z0_2:
+** fmlsl za\.s\[w8, 14:15\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p14_z23_z0_2, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w8 + 14, z23, z0, 2),
+ svmls_lane_za32_vg2x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mls_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** fmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p15_z7_z7_3, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w8 + 15, z7, z7, 3),
+ svmls_lane_za32_vg2x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mls_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** fmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p16_z7_z7_4, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w8 + 16, z7, z7, 4),
+ svmls_lane_za32_vg2x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mls_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmlsl za\.s\[\1, 0:1\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8m1_z16_z0_5, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w8 - 1, z16, z0, 5),
+ svmls_lane_za32_vg2x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mls_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** fmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w12_z0_z3_6, svfloat16_t,
+ svmls_lane_za32_f16_vg2x1 (w12, z0, z3, 6),
+ svmls_lane_za32_vg2x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x2.c
new file mode 100644
index 0000000..27e1bdd54
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svfloat16x2_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x2 (0, z0, z4, 0),
+ svmls_lane_za32_vg2x2 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svfloat16x2_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x2 (w0, z0, z7, 1),
+ svmls_lane_za32_vg2x2 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svfloat16x2_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x2 (w8, z28, z4, 2),
+ svmls_lane_za32_vg2x2 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w8p6_z0_z4_7:
+** fmlsl za\.s\[w8, 6:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_7, svfloat16x2_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x2 (w8 + 6, z0, z4, 7),
+ svmls_lane_za32_vg2x2 (w8 + 6, z0, z4, 7))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svfloat16x2_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x2 (w8 + 7, z0, z4, 3),
+ svmls_lane_za32_vg2x2 (w8 + 7, z0, z4, 3))
+
+/*
+** mls_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_4, svfloat16x2_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x2 (w8 + 8, z0, z4, 4),
+ svmls_lane_za32_vg2x2 (w8 + 8, z0, z4, 4))
+
+/*
+** mls_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_5, svfloat16x2_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x2 (w0 - 1, z0, z4, 5),
+ svmls_lane_za32_vg2x2 (w0 - 1, z0, z4, 5))
+
+/*
+** mls_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_6, svfloat16x2_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x2 (w8, z4, z15, 6),
+ svmls_lane_za32_vg2x2 (w8, z4, z15, 6))
+
+/*
+** mls_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_7, svfloat16x2_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x2 (w8, z28, z16, 7),
+ svmls_lane_za32_vg2x2 (w8, z28, z16, 7))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** fmlsl za\.s\[w8, 0:1, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svfloat16x2_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x2 (w8, z17, z7, 0),
+ svmls_lane_za32_vg2x2 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svfloat16x2_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x2 (w8, z22, z4, 1),
+ svmls_lane_za32_vg2x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x4.c
new file mode 100644
index 0000000..a78e710
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f16_vg2x4.c
@@ -0,0 +1,118 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svfloat16x4_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x4 (0, z0, z4, 0),
+ svmls_lane_za32_vg2x4 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svfloat16x4_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x4 (w0, z0, z7, 1),
+ svmls_lane_za32_vg2x4 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** fmlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svfloat16x4_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x4 (w8, z28, z4, 2),
+ svmls_lane_za32_vg2x4 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w8p6_z0_z4_7:
+** fmlsl za\.s\[w8, 6:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_7, svfloat16x4_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x4 (w8 + 6, z0, z4, 7),
+ svmls_lane_za32_vg2x4 (w8 + 6, z0, z4, 7))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svfloat16x4_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x4 (w8 + 7, z0, z4, 3),
+ svmls_lane_za32_vg2x4 (w8 + 7, z0, z4, 3))
+
+/*
+** mls_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_4, svfloat16x4_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x4 (w8 + 8, z0, z4, 4),
+ svmls_lane_za32_vg2x4 (w8 + 8, z0, z4, 4))
+
+/*
+** mls_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_5, svfloat16x4_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x4 (w0 - 1, z0, z4, 5),
+ svmls_lane_za32_vg2x4 (w0 - 1, z0, z4, 5))
+
+/*
+** mls_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** fmlsl za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_6, svfloat16x4_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x4 (w8, z4, z15, 6),
+ svmls_lane_za32_vg2x4 (w8, z4, z15, 6))
+
+/*
+** mls_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** fmlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_7, svfloat16x4_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x4 (w8, z28, z16, 7),
+ svmls_lane_za32_vg2x4 (w8, z28, z16, 7))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svfloat16x4_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x4 (w8, z17, z7, 0),
+ svmls_lane_za32_vg2x4 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svfloat16x4_t, svfloat16_t,
+ svmls_lane_za32_f16_vg2x4 (w8, z22, z4, 1),
+ svmls_lane_za32_vg2x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f32_vg1x2.c
new file mode 100644
index 0000000..ce7db44
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f32_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, z4\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svfloat32x2_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x2 (0, z0, z4, 0),
+ svmls_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, z7\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svfloat32x2_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x2 (w0, z0, z7, 1),
+ svmls_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** fmls za\.s\[w8, 0, vgx2\], {z28\.s - z29\.s}, z4\.s\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svfloat32x2_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x2 (w8, z28, z4, 2),
+ svmls_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** fmls za\.s\[w8, 7, vgx2\], {z0\.s - z1\.s}, z4\.s\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svfloat32x2_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x2 (w8 + 7, z0, z4, 3),
+ svmls_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** mls_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, z4\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_0, svfloat32x2_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x2 (w8 + 8, z0, z4, 0),
+ svmls_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** mls_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmls za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, z4\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_1, svfloat32x2_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x2 (w0 - 1, z0, z4, 1),
+ svmls_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** mls_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** fmls za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, z15\.s\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_2, svfloat32x2_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x2 (w8, z4, z15, 2),
+ svmls_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** mls_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** fmls za\.s\[w8, 0, vgx2\], {z28\.s - z29\.s}, \1\.s\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_3, svfloat32x2_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x2 (w8, z28, z16, 3),
+ svmls_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** fmls za\.s\[w8, 0, vgx2\], [^\n]+, z7\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svfloat32x2_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x2 (w8, z17, z7, 0),
+ svmls_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** fmls za\.s\[w8, 0, vgx2\], {z22\.s - z23\.s}, z4\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svfloat32x2_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x2 (w8, z22, z4, 1),
+ svmls_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f32_vg1x4.c
new file mode 100644
index 0000000..b642c1a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_f32_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, z4\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svfloat32x4_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x4 (0, z0, z4, 0),
+ svmls_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, z7\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svfloat32x4_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x4 (w0, z0, z7, 1),
+ svmls_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** fmls za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}, z4\.s\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svfloat32x4_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x4 (w8, z28, z4, 2),
+ svmls_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** fmls za\.s\[w8, 7, vgx4\], {z0\.s - z3\.s}, z4\.s\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svfloat32x4_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x4 (w8 + 7, z0, z4, 3),
+ svmls_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** mls_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, z4\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_0, svfloat32x4_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x4 (w8 + 8, z0, z4, 0),
+ svmls_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** mls_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmls za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, z4\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_1, svfloat32x4_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x4 (w0 - 1, z0, z4, 1),
+ svmls_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** mls_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** fmls za\.s\[w8, 0, vgx4\], {z4\.s - z7\.s}, z15\.s\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_2, svfloat32x4_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x4 (w8, z4, z15, 2),
+ svmls_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** mls_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** fmls za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}, \1\.s\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_3, svfloat32x4_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x4 (w8, z28, z16, 3),
+ svmls_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmls za\.s\[w8, 0, vgx4\], [^\n]+, z7\.s\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svfloat32x4_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x4 (w8, z17, z7, 0),
+ svmls_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmls za\.s\[w8, 0, vgx4\], [^\n]+, z4\.s\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svfloat32x4_t, svfloat32_t,
+ svmls_lane_za32_f32_vg1x4 (w8, z22, z4, 1),
+ svmls_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x1.c
new file mode 100644
index 0000000..66a0d39
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** smlsl za\.s\[\1, 0:1\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_0_z0_z0_0, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (0, z0, z0, 0),
+ svmls_lane_za32_vg2x1 (0, z0, z0, 0))
+
+/*
+** mls_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** smlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w0_z0_z3_1, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w0, z0, z3, 1),
+ svmls_lane_za32_vg2x1 (w0, z0, z3, 1))
+
+/*
+** mls_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** smlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w7_z0_z3_2, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w7, z0, z3, 2),
+ svmls_lane_za32_vg2x1 (w7, z0, z3, 2))
+
+/*
+** mls_lane_w8_z7_z3_3:
+** smlsl za\.s\[w8, 0:1\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z7_z3_3, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w8, z7, z3, 3),
+ svmls_lane_za32_vg2x1 (w8, z7, z3, 3))
+
+/*
+** mls_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** smlsl za\.s\[w8, 0:1\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z31_z16_4, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w8, z31, z16, 4),
+ svmls_lane_za32_vg2x1 (w8, z31, z16, 4))
+
+/*
+** mls_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsl za\.s\[\1, 0:1\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p1_z0_z0_5, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w8 + 1, z0, z0, 5),
+ svmls_lane_za32_vg2x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mls_lane_w8p2_z23_z0_6:
+** smlsl za\.s\[w8, 2:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p2_z23_z0_6, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w8 + 2, z23, z0, 6),
+ svmls_lane_za32_vg2x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mls_lane_w11p6_z23_z0_7:
+** smlsl za\.s\[w11, 6:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p6_z23_z0_7, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w11 + 6, z23, z0, 7),
+ svmls_lane_za32_vg2x1 (w11 + 6, z23, z0, 7))
+
+/*
+** mls_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p7_z7_z7_0, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w8 + 7, z7, z7, 0),
+ svmls_lane_za32_vg2x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mls_lane_w11p10_z23_z0_1:
+** smlsl za\.s\[w11, 10:11\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p10_z23_z0_1, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w11 + 10, z23, z0, 1),
+ svmls_lane_za32_vg2x1 (w11 + 10, z23, z0, 1))
+
+/*
+** mls_lane_w8p14_z23_z0_2:
+** smlsl za\.s\[w8, 14:15\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p14_z23_z0_2, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w8 + 14, z23, z0, 2),
+ svmls_lane_za32_vg2x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mls_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** smlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p15_z7_z7_3, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w8 + 15, z7, z7, 3),
+ svmls_lane_za32_vg2x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mls_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** smlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p16_z7_z7_4, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w8 + 16, z7, z7, 4),
+ svmls_lane_za32_vg2x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mls_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsl za\.s\[\1, 0:1\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8m1_z16_z0_5, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w8 - 1, z16, z0, 5),
+ svmls_lane_za32_vg2x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mls_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** smlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w12_z0_z3_6, svint16_t,
+ svmls_lane_za32_s16_vg2x1 (w12, z0, z3, 6),
+ svmls_lane_za32_vg2x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x2.c
new file mode 100644
index 0000000..f4a08b1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svint16x2_t, svint16_t,
+ svmls_lane_za32_s16_vg2x2 (0, z0, z4, 0),
+ svmls_lane_za32_vg2x2 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svint16x2_t, svint16_t,
+ svmls_lane_za32_s16_vg2x2 (w0, z0, z7, 1),
+ svmls_lane_za32_vg2x2 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** smlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svint16x2_t, svint16_t,
+ svmls_lane_za32_s16_vg2x2 (w8, z28, z4, 2),
+ svmls_lane_za32_vg2x2 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w8p6_z0_z4_7:
+** smlsl za\.s\[w8, 6:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_7, svint16x2_t, svint16_t,
+ svmls_lane_za32_s16_vg2x2 (w8 + 6, z0, z4, 7),
+ svmls_lane_za32_vg2x2 (w8 + 6, z0, z4, 7))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svint16x2_t, svint16_t,
+ svmls_lane_za32_s16_vg2x2 (w8 + 7, z0, z4, 3),
+ svmls_lane_za32_vg2x2 (w8 + 7, z0, z4, 3))
+
+/*
+** mls_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_4, svint16x2_t, svint16_t,
+ svmls_lane_za32_s16_vg2x2 (w8 + 8, z0, z4, 4),
+ svmls_lane_za32_vg2x2 (w8 + 8, z0, z4, 4))
+
+/*
+** mls_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_5, svint16x2_t, svint16_t,
+ svmls_lane_za32_s16_vg2x2 (w0 - 1, z0, z4, 5),
+ svmls_lane_za32_vg2x2 (w0 - 1, z0, z4, 5))
+
+/*
+** mls_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** smlsl za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_6, svint16x2_t, svint16_t,
+ svmls_lane_za32_s16_vg2x2 (w8, z4, z15, 6),
+ svmls_lane_za32_vg2x2 (w8, z4, z15, 6))
+
+/*
+** mls_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** smlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_7, svint16x2_t, svint16_t,
+ svmls_lane_za32_s16_vg2x2 (w8, z28, z16, 7),
+ svmls_lane_za32_vg2x2 (w8, z28, z16, 7))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** smlsl za\.s\[w8, 0:1, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svint16x2_t, svint16_t,
+ svmls_lane_za32_s16_vg2x2 (w8, z17, z7, 0),
+ svmls_lane_za32_vg2x2 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** smlsl za\.s\[w8, 0:1, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svint16x2_t, svint16_t,
+ svmls_lane_za32_s16_vg2x2 (w8, z22, z4, 1),
+ svmls_lane_za32_vg2x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x4.c
new file mode 100644
index 0000000..3e39ddb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s16_vg2x4.c
@@ -0,0 +1,118 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svint16x4_t, svint16_t,
+ svmls_lane_za32_s16_vg2x4 (0, z0, z4, 0),
+ svmls_lane_za32_vg2x4 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svint16x4_t, svint16_t,
+ svmls_lane_za32_s16_vg2x4 (w0, z0, z7, 1),
+ svmls_lane_za32_vg2x4 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** smlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svint16x4_t, svint16_t,
+ svmls_lane_za32_s16_vg2x4 (w8, z28, z4, 2),
+ svmls_lane_za32_vg2x4 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w8p6_z0_z4_7:
+** smlsl za\.s\[w8, 6:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_7, svint16x4_t, svint16_t,
+ svmls_lane_za32_s16_vg2x4 (w8 + 6, z0, z4, 7),
+ svmls_lane_za32_vg2x4 (w8 + 6, z0, z4, 7))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svint16x4_t, svint16_t,
+ svmls_lane_za32_s16_vg2x4 (w8 + 7, z0, z4, 3),
+ svmls_lane_za32_vg2x4 (w8 + 7, z0, z4, 3))
+
+/*
+** mls_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_4, svint16x4_t, svint16_t,
+ svmls_lane_za32_s16_vg2x4 (w8 + 8, z0, z4, 4),
+ svmls_lane_za32_vg2x4 (w8 + 8, z0, z4, 4))
+
+/*
+** mls_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_5, svint16x4_t, svint16_t,
+ svmls_lane_za32_s16_vg2x4 (w0 - 1, z0, z4, 5),
+ svmls_lane_za32_vg2x4 (w0 - 1, z0, z4, 5))
+
+/*
+** mls_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** smlsl za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_6, svint16x4_t, svint16_t,
+ svmls_lane_za32_s16_vg2x4 (w8, z4, z15, 6),
+ svmls_lane_za32_vg2x4 (w8, z4, z15, 6))
+
+/*
+** mls_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** smlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_7, svint16x4_t, svint16_t,
+ svmls_lane_za32_s16_vg2x4 (w8, z28, z16, 7),
+ svmls_lane_za32_vg2x4 (w8, z28, z16, 7))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svint16x4_t, svint16_t,
+ svmls_lane_za32_s16_vg2x4 (w8, z17, z7, 0),
+ svmls_lane_za32_vg2x4 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svint16x4_t, svint16_t,
+ svmls_lane_za32_s16_vg2x4 (w8, z22, z4, 1),
+ svmls_lane_za32_vg2x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x1.c
new file mode 100644
index 0000000..ad6e418
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x1.c
@@ -0,0 +1,150 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.s\[\1, 0:3\], z0\.b, z0\.b\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_0_z0_z0_0, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (0, z0, z0, 0),
+ svmls_lane_za32_vg4x1 (0, z0, z0, 0))
+
+/*
+** mls_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.s\[\1, 0:3\], z0\.b, z3\.b\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w0_z0_z3_1, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w0, z0, z3, 1),
+ svmls_lane_za32_vg4x1 (w0, z0, z3, 1))
+
+/*
+** mls_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** smlsll za\.s\[\1, 0:3\], z0\.b, z3\.b\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w7_z0_z3_2, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w7, z0, z3, 2),
+ svmls_lane_za32_vg4x1 (w7, z0, z3, 2))
+
+/*
+** mls_lane_w8_z7_z3_3:
+** smlsll za\.s\[w8, 0:3\], z7\.b, z3\.b\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z7_z3_3, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w8, z7, z3, 3),
+ svmls_lane_za32_vg4x1 (w8, z7, z3, 3))
+
+/*
+** mls_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** smlsll za\.s\[w8, 0:3\], z31\.b. \1\.b\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z31_z16_4, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w8, z31, z16, 4),
+ svmls_lane_za32_vg4x1 (w8, z31, z16, 4))
+
+/*
+** mls_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.s\[\1, 0:3\], z0\.b, z0\.b\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p1_z0_z0_5, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w8 + 1, z0, z0, 5),
+ svmls_lane_za32_vg4x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mls_lane_w8p2_z23_z0_6:
+** add (w8|w9|w10|w11), w8, #?2
+** smlsll za\.s\[\1, 0:3\], z23\.b, z0\.b\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p2_z23_z0_6, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w8 + 2, z23, z0, 6),
+ svmls_lane_za32_vg4x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mls_lane_w11p4_z23_z0_7:
+** smlsll za\.s\[w11, 4:7\], z23\.b, z0\.b\[7\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p4_z23_z0_7, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w11 + 4, z23, z0, 7),
+ svmls_lane_za32_vg4x1 (w11 + 4, z23, z0, 7))
+
+/*
+** mls_lane_w8p7_z7_z7_8:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.s\[\1, 0:3\], z7\.b, z7\.b\[8\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p7_z7_z7_8, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w8 + 7, z7, z7, 8),
+ svmls_lane_za32_vg4x1 (w8 + 7, z7, z7, 8))
+
+/*
+** mls_lane_w11p12_z23_z0_9:
+** smlsll za\.s\[w11, 12:15\], z23\.b, z0\.b\[9\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p12_z23_z0_9, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w11 + 12, z23, z0, 9),
+ svmls_lane_za32_vg4x1 (w11 + 12, z23, z0, 9))
+
+/*
+** mls_lane_w8p14_z23_z0_10:
+** add (w8|w9|w10|w11), w8, #?14
+** smlsll za\.s\[w8, 0:3\], z23\.b, z0\.b\[10\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p14_z23_z0_10, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w8 + 14, z23, z0, 10),
+ svmls_lane_za32_vg4x1 (w8 + 14, z23, z0, 10))
+
+/*
+** mls_lane_w8p15_z7_z7_11:
+** add (w8|w9|w10|w11), w8, #?15
+** smlsll za\.s\[\1, 0:3\], z7\.b, z7\.b\[11\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p15_z7_z7_11, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w8 + 15, z7, z7, 11),
+ svmls_lane_za32_vg4x1 (w8 + 15, z7, z7, 11))
+
+/*
+** mls_lane_w8p16_z7_z7_12:
+** add (w8|w9|w10|w11), w8, #?16
+** smlsll za\.s\[\1, 0:3\], z7\.b, z7\.b\[12\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p16_z7_z7_12, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w8 + 16, z7, z7, 12),
+ svmls_lane_za32_vg4x1 (w8 + 16, z7, z7, 12))
+
+/*
+** mls_lane_w8m1_z16_z0_13:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsll za\.s\[\1, 0:3\], z16\.b, z0\.b\[13\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8m1_z16_z0_13, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w8 - 1, z16, z0, 13),
+ svmls_lane_za32_vg4x1 (w8 - 1, z16, z0, 13))
+
+/*
+** mls_lane_w12_z0_z3_15:
+** mov (w8|w9|w10|w11), w12
+** smlsll za\.s\[\1, 0:3\], z0\.b, z3\.b\[15\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w12_z0_z3_15, svint8_t,
+ svmls_lane_za32_s8_vg4x1 (w12, z0, z3, 15),
+ svmls_lane_za32_vg4x1 (w12, z0, z3, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x2.c
new file mode 100644
index 0000000..a00e0de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (0, z0, z4, 0),
+ svmls_lane_za32_vg4x2 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (w0, z0, z7, 1),
+ svmls_lane_za32_vg4x2 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** smlsll za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (w8, z28, z4, 2),
+ svmls_lane_za32_vg4x2 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w11p4_z0_z4_3:
+** smlsll za\.s\[w11, 4:7, vgx2\], {z0\.b - z1\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w11p4_z0_z4_3, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (w11 + 4, z0, z4, 3),
+ svmls_lane_za32_vg4x2 (w11 + 4, z0, z4, 3))
+
+/*
+** mls_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** smlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_4, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (w8 + 6, z0, z4, 4),
+ svmls_lane_za32_vg4x2 (w8 + 6, z0, z4, 4))
+
+/*
+** mls_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_5, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (w8 + 7, z0, z4, 5),
+ svmls_lane_za32_vg4x2 (w8 + 7, z0, z4, 5))
+
+/*
+** mls_lane_w8p8_z0_z4_7:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_7, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (w8 + 8, z0, z4, 7),
+ svmls_lane_za32_vg4x2 (w8 + 8, z0, z4, 7))
+
+/*
+** mls_lane_w0m1_z0_z4_9:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[9\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_9, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (w0 - 1, z0, z4, 9),
+ svmls_lane_za32_vg4x2 (w0 - 1, z0, z4, 9))
+
+/*
+** mls_lane_w8_z4_z15_10:
+** str d15, \[sp, #?-16\]!
+** smlsll za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, z15\.b\[10\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_10, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (w8, z4, z15, 10),
+ svmls_lane_za32_vg4x2 (w8, z4, z15, 10))
+
+/*
+** mls_lane_w8_z28_z16_11:
+** mov (z[0-7]).d, z16.d
+** smlsll za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, \1\.b\[11\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_11, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (w8, z28, z16, 11),
+ svmls_lane_za32_vg4x2 (w8, z28, z16, 11))
+
+/*
+** mls_lane_w8_z17_z7_13:
+** mov [^\n]+
+** mov [^\n]+
+** smlsll za\.s\[w8, 0:3, vgx2\], [^\n]+, z7\.b\[13\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_13, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (w8, z17, z7, 13),
+ svmls_lane_za32_vg4x2 (w8, z17, z7, 13))
+
+/*
+** mls_lane_w8_z22_z4_15:
+** smlsll za\.s\[w8, 0:3, vgx2\], {z22\.b - z23\.b}, z4\.b\[15\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_15, svint8x2_t, svint8_t,
+ svmls_lane_za32_s8_vg4x2 (w8, z22, z4, 15),
+ svmls_lane_za32_vg4x2 (w8, z22, z4, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x4.c
new file mode 100644
index 0000000..53e9b34
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_s8_vg4x4.c
@@ -0,0 +1,128 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (0, z0, z4, 0),
+ svmls_lane_za32_vg4x4 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (w0, z0, z7, 1),
+ svmls_lane_za32_vg4x4 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** smlsll za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (w8, z28, z4, 2),
+ svmls_lane_za32_vg4x4 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w11p4_z0_z4_7:
+** smlsll za\.s\[w11, 4:7, vgx4\], {z0\.b - z3\.b}, z4\.b\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w11p4_z0_z4_7, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (w11 + 4, z0, z4, 7),
+ svmls_lane_za32_vg4x4 (w11 + 4, z0, z4, 7))
+
+/*
+** mls_lane_w8p6_z0_z4_8:
+** add (w8|w9|w10|w11), w8, #?6
+** smlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[8\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_8, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (w8 + 6, z0, z4, 8),
+ svmls_lane_za32_vg4x4 (w8 + 6, z0, z4, 8))
+
+/*
+** mls_lane_w8p7_z0_z4_9:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[9\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_9, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (w8 + 7, z0, z4, 9),
+ svmls_lane_za32_vg4x4 (w8 + 7, z0, z4, 9))
+
+/*
+** mls_lane_w8p8_z0_z4_10:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[10\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_10, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (w8 + 8, z0, z4, 10),
+ svmls_lane_za32_vg4x4 (w8 + 8, z0, z4, 10))
+
+/*
+** mls_lane_w0m1_z0_z4_11:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[11\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_11, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (w0 - 1, z0, z4, 11),
+ svmls_lane_za32_vg4x4 (w0 - 1, z0, z4, 11))
+
+/*
+** mls_lane_w8_z4_z15_12:
+** str d15, \[sp, #?-16\]!
+** smlsll za\.s\[w8, 0:3, vgx4\], {z4\.b - z7\.b}, z15\.b\[12\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_12, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (w8, z4, z15, 12),
+ svmls_lane_za32_vg4x4 (w8, z4, z15, 12))
+
+/*
+** mls_lane_w8_z28_z16_13:
+** mov (z[0-7]).d, z16.d
+** smlsll za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, \1\.b\[13\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_13, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (w8, z28, z16, 13),
+ svmls_lane_za32_vg4x4 (w8, z28, z16, 13))
+
+/*
+** mls_lane_w8_z17_z7_14:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlsll za\.s\[w8, 0:3, vgx4\], [^\n]+, z7\.b\[14\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_14, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (w8, z17, z7, 14),
+ svmls_lane_za32_vg4x4 (w8, z17, z7, 14))
+
+/*
+** mls_lane_w8_z22_z4_15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlsll za\.s\[w8, 0:3, vgx4\], [^\n]+, z4\.b\[15\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_15, svint8x4_t, svint8_t,
+ svmls_lane_za32_s8_vg4x4 (w8, z22, z4, 15),
+ svmls_lane_za32_vg4x4 (w8, z22, z4, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x1.c
new file mode 100644
index 0000000..83fa732
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** umlsl za\.s\[\1, 0:1\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_0_z0_z0_0, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (0, z0, z0, 0),
+ svmls_lane_za32_vg2x1 (0, z0, z0, 0))
+
+/*
+** mls_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** umlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w0_z0_z3_1, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w0, z0, z3, 1),
+ svmls_lane_za32_vg2x1 (w0, z0, z3, 1))
+
+/*
+** mls_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** umlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w7_z0_z3_2, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w7, z0, z3, 2),
+ svmls_lane_za32_vg2x1 (w7, z0, z3, 2))
+
+/*
+** mls_lane_w8_z7_z3_3:
+** umlsl za\.s\[w8, 0:1\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z7_z3_3, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w8, z7, z3, 3),
+ svmls_lane_za32_vg2x1 (w8, z7, z3, 3))
+
+/*
+** mls_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** umlsl za\.s\[w8, 0:1\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z31_z16_4, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w8, z31, z16, 4),
+ svmls_lane_za32_vg2x1 (w8, z31, z16, 4))
+
+/*
+** mls_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsl za\.s\[\1, 0:1\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p1_z0_z0_5, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w8 + 1, z0, z0, 5),
+ svmls_lane_za32_vg2x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mls_lane_w8p2_z23_z0_6:
+** umlsl za\.s\[w8, 2:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p2_z23_z0_6, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w8 + 2, z23, z0, 6),
+ svmls_lane_za32_vg2x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mls_lane_w11p6_z23_z0_7:
+** umlsl za\.s\[w11, 6:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p6_z23_z0_7, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w11 + 6, z23, z0, 7),
+ svmls_lane_za32_vg2x1 (w11 + 6, z23, z0, 7))
+
+/*
+** mls_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p7_z7_z7_0, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w8 + 7, z7, z7, 0),
+ svmls_lane_za32_vg2x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mls_lane_w11p10_z23_z0_1:
+** umlsl za\.s\[w11, 10:11\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p10_z23_z0_1, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w11 + 10, z23, z0, 1),
+ svmls_lane_za32_vg2x1 (w11 + 10, z23, z0, 1))
+
+/*
+** mls_lane_w8p14_z23_z0_2:
+** umlsl za\.s\[w8, 14:15\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p14_z23_z0_2, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w8 + 14, z23, z0, 2),
+ svmls_lane_za32_vg2x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mls_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** umlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p15_z7_z7_3, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w8 + 15, z7, z7, 3),
+ svmls_lane_za32_vg2x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mls_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** umlsl za\.s\[\1, 0:1\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p16_z7_z7_4, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w8 + 16, z7, z7, 4),
+ svmls_lane_za32_vg2x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mls_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsl za\.s\[\1, 0:1\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8m1_z16_z0_5, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w8 - 1, z16, z0, 5),
+ svmls_lane_za32_vg2x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mls_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** umlsl za\.s\[\1, 0:1\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w12_z0_z3_6, svuint16_t,
+ svmls_lane_za32_u16_vg2x1 (w12, z0, z3, 6),
+ svmls_lane_za32_vg2x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x2.c
new file mode 100644
index 0000000..efbcd92
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svuint16x2_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x2 (0, z0, z4, 0),
+ svmls_lane_za32_vg2x2 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svuint16x2_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x2 (w0, z0, z7, 1),
+ svmls_lane_za32_vg2x2 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** umlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svuint16x2_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x2 (w8, z28, z4, 2),
+ svmls_lane_za32_vg2x2 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w8p6_z0_z4_7:
+** umlsl za\.s\[w8, 6:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_7, svuint16x2_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x2 (w8 + 6, z0, z4, 7),
+ svmls_lane_za32_vg2x2 (w8 + 6, z0, z4, 7))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svuint16x2_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x2 (w8 + 7, z0, z4, 3),
+ svmls_lane_za32_vg2x2 (w8 + 7, z0, z4, 3))
+
+/*
+** mls_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_4, svuint16x2_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x2 (w8 + 8, z0, z4, 4),
+ svmls_lane_za32_vg2x2 (w8 + 8, z0, z4, 4))
+
+/*
+** mls_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_5, svuint16x2_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x2 (w0 - 1, z0, z4, 5),
+ svmls_lane_za32_vg2x2 (w0 - 1, z0, z4, 5))
+
+/*
+** mls_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** umlsl za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_6, svuint16x2_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x2 (w8, z4, z15, 6),
+ svmls_lane_za32_vg2x2 (w8, z4, z15, 6))
+
+/*
+** mls_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** umlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_7, svuint16x2_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x2 (w8, z28, z16, 7),
+ svmls_lane_za32_vg2x2 (w8, z28, z16, 7))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** umlsl za\.s\[w8, 0:1, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svuint16x2_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x2 (w8, z17, z7, 0),
+ svmls_lane_za32_vg2x2 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** umlsl za\.s\[w8, 0:1, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svuint16x2_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x2 (w8, z22, z4, 1),
+ svmls_lane_za32_vg2x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x4.c
new file mode 100644
index 0000000..76ac966
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u16_vg2x4.c
@@ -0,0 +1,118 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svuint16x4_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x4 (0, z0, z4, 0),
+ svmls_lane_za32_vg2x4 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svuint16x4_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x4 (w0, z0, z7, 1),
+ svmls_lane_za32_vg2x4 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** umlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svuint16x4_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x4 (w8, z28, z4, 2),
+ svmls_lane_za32_vg2x4 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w8p6_z0_z4_7:
+** umlsl za\.s\[w8, 6:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_7, svuint16x4_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x4 (w8 + 6, z0, z4, 7),
+ svmls_lane_za32_vg2x4 (w8 + 6, z0, z4, 7))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svuint16x4_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x4 (w8 + 7, z0, z4, 3),
+ svmls_lane_za32_vg2x4 (w8 + 7, z0, z4, 3))
+
+/*
+** mls_lane_w8p8_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_4, svuint16x4_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x4 (w8 + 8, z0, z4, 4),
+ svmls_lane_za32_vg2x4 (w8 + 8, z0, z4, 4))
+
+/*
+** mls_lane_w0m1_z0_z4_5:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_5, svuint16x4_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x4 (w0 - 1, z0, z4, 5),
+ svmls_lane_za32_vg2x4 (w0 - 1, z0, z4, 5))
+
+/*
+** mls_lane_w8_z4_z15_6:
+** str d15, \[sp, #?-16\]!
+** umlsl za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, z15\.h\[6\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_6, svuint16x4_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x4 (w8, z4, z15, 6),
+ svmls_lane_za32_vg2x4 (w8, z4, z15, 6))
+
+/*
+** mls_lane_w8_z28_z16_7:
+** mov (z[0-7]).d, z16.d
+** umlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, \1\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_7, svuint16x4_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x4 (w8, z28, z16, 7),
+ svmls_lane_za32_vg2x4 (w8, z28, z16, 7))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svuint16x4_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x4 (w8, z17, z7, 0),
+ svmls_lane_za32_vg2x4 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svuint16x4_t, svuint16_t,
+ svmls_lane_za32_u16_vg2x4 (w8, z22, z4, 1),
+ svmls_lane_za32_vg2x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x1.c
new file mode 100644
index 0000000..0b70449
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x1.c
@@ -0,0 +1,150 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.s\[\1, 0:3\], z0\.b, z0\.b\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_0_z0_z0_0, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (0, z0, z0, 0),
+ svmls_lane_za32_vg4x1 (0, z0, z0, 0))
+
+/*
+** mls_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.s\[\1, 0:3\], z0\.b, z3\.b\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w0_z0_z3_1, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w0, z0, z3, 1),
+ svmls_lane_za32_vg4x1 (w0, z0, z3, 1))
+
+/*
+** mls_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** umlsll za\.s\[\1, 0:3\], z0\.b, z3\.b\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w7_z0_z3_2, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w7, z0, z3, 2),
+ svmls_lane_za32_vg4x1 (w7, z0, z3, 2))
+
+/*
+** mls_lane_w8_z7_z3_3:
+** umlsll za\.s\[w8, 0:3\], z7\.b, z3\.b\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z7_z3_3, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w8, z7, z3, 3),
+ svmls_lane_za32_vg4x1 (w8, z7, z3, 3))
+
+/*
+** mls_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** umlsll za\.s\[w8, 0:3\], z31\.b. \1\.b\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z31_z16_4, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w8, z31, z16, 4),
+ svmls_lane_za32_vg4x1 (w8, z31, z16, 4))
+
+/*
+** mls_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.s\[\1, 0:3\], z0\.b, z0\.b\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p1_z0_z0_5, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w8 + 1, z0, z0, 5),
+ svmls_lane_za32_vg4x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mls_lane_w8p2_z23_z0_6:
+** add (w8|w9|w10|w11), w8, #?2
+** umlsll za\.s\[\1, 0:3\], z23\.b, z0\.b\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p2_z23_z0_6, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w8 + 2, z23, z0, 6),
+ svmls_lane_za32_vg4x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mls_lane_w11p4_z23_z0_7:
+** umlsll za\.s\[w11, 4:7\], z23\.b, z0\.b\[7\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p4_z23_z0_7, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w11 + 4, z23, z0, 7),
+ svmls_lane_za32_vg4x1 (w11 + 4, z23, z0, 7))
+
+/*
+** mls_lane_w8p7_z7_z7_8:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.s\[\1, 0:3\], z7\.b, z7\.b\[8\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p7_z7_z7_8, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w8 + 7, z7, z7, 8),
+ svmls_lane_za32_vg4x1 (w8 + 7, z7, z7, 8))
+
+/*
+** mls_lane_w11p12_z23_z0_9:
+** umlsll za\.s\[w11, 12:15\], z23\.b, z0\.b\[9\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p12_z23_z0_9, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w11 + 12, z23, z0, 9),
+ svmls_lane_za32_vg4x1 (w11 + 12, z23, z0, 9))
+
+/*
+** mls_lane_w8p14_z23_z0_10:
+** add (w8|w9|w10|w11), w8, #?14
+** umlsll za\.s\[w8, 0:3\], z23\.b, z0\.b\[10\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p14_z23_z0_10, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w8 + 14, z23, z0, 10),
+ svmls_lane_za32_vg4x1 (w8 + 14, z23, z0, 10))
+
+/*
+** mls_lane_w8p15_z7_z7_11:
+** add (w8|w9|w10|w11), w8, #?15
+** umlsll za\.s\[\1, 0:3\], z7\.b, z7\.b\[11\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p15_z7_z7_11, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w8 + 15, z7, z7, 11),
+ svmls_lane_za32_vg4x1 (w8 + 15, z7, z7, 11))
+
+/*
+** mls_lane_w8p16_z7_z7_12:
+** add (w8|w9|w10|w11), w8, #?16
+** umlsll za\.s\[\1, 0:3\], z7\.b, z7\.b\[12\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p16_z7_z7_12, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w8 + 16, z7, z7, 12),
+ svmls_lane_za32_vg4x1 (w8 + 16, z7, z7, 12))
+
+/*
+** mls_lane_w8m1_z16_z0_13:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsll za\.s\[\1, 0:3\], z16\.b, z0\.b\[13\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8m1_z16_z0_13, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w8 - 1, z16, z0, 13),
+ svmls_lane_za32_vg4x1 (w8 - 1, z16, z0, 13))
+
+/*
+** mls_lane_w12_z0_z3_15:
+** mov (w8|w9|w10|w11), w12
+** umlsll za\.s\[\1, 0:3\], z0\.b, z3\.b\[15\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w12_z0_z3_15, svuint8_t,
+ svmls_lane_za32_u8_vg4x1 (w12, z0, z3, 15),
+ svmls_lane_za32_vg4x1 (w12, z0, z3, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x2.c
new file mode 100644
index 0000000..4937c0c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (0, z0, z4, 0),
+ svmls_lane_za32_vg4x2 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (w0, z0, z7, 1),
+ svmls_lane_za32_vg4x2 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** umlsll za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (w8, z28, z4, 2),
+ svmls_lane_za32_vg4x2 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w11p4_z0_z4_3:
+** umlsll za\.s\[w11, 4:7, vgx2\], {z0\.b - z1\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w11p4_z0_z4_3, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (w11 + 4, z0, z4, 3),
+ svmls_lane_za32_vg4x2 (w11 + 4, z0, z4, 3))
+
+/*
+** mls_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** umlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_4, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (w8 + 6, z0, z4, 4),
+ svmls_lane_za32_vg4x2 (w8 + 6, z0, z4, 4))
+
+/*
+** mls_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_5, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (w8 + 7, z0, z4, 5),
+ svmls_lane_za32_vg4x2 (w8 + 7, z0, z4, 5))
+
+/*
+** mls_lane_w8p8_z0_z4_7:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_7, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (w8 + 8, z0, z4, 7),
+ svmls_lane_za32_vg4x2 (w8 + 8, z0, z4, 7))
+
+/*
+** mls_lane_w0m1_z0_z4_9:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, z4\.b\[9\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_9, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (w0 - 1, z0, z4, 9),
+ svmls_lane_za32_vg4x2 (w0 - 1, z0, z4, 9))
+
+/*
+** mls_lane_w8_z4_z15_10:
+** str d15, \[sp, #?-16\]!
+** umlsll za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, z15\.b\[10\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_10, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (w8, z4, z15, 10),
+ svmls_lane_za32_vg4x2 (w8, z4, z15, 10))
+
+/*
+** mls_lane_w8_z28_z16_11:
+** mov (z[0-7]).d, z16.d
+** umlsll za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, \1\.b\[11\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_11, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (w8, z28, z16, 11),
+ svmls_lane_za32_vg4x2 (w8, z28, z16, 11))
+
+/*
+** mls_lane_w8_z17_z7_13:
+** mov [^\n]+
+** mov [^\n]+
+** umlsll za\.s\[w8, 0:3, vgx2\], [^\n]+, z7\.b\[13\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_13, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (w8, z17, z7, 13),
+ svmls_lane_za32_vg4x2 (w8, z17, z7, 13))
+
+/*
+** mls_lane_w8_z22_z4_15:
+** umlsll za\.s\[w8, 0:3, vgx2\], {z22\.b - z23\.b}, z4\.b\[15\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_15, svuint8x2_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x2 (w8, z22, z4, 15),
+ svmls_lane_za32_vg4x2 (w8, z22, z4, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x4.c
new file mode 100644
index 0000000..05b3615
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za32_u8_vg4x4.c
@@ -0,0 +1,128 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (0, z0, z4, 0),
+ svmls_lane_za32_vg4x4 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (w0, z0, z7, 1),
+ svmls_lane_za32_vg4x4 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** umlsll za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (w8, z28, z4, 2),
+ svmls_lane_za32_vg4x4 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w11p4_z0_z4_7:
+** umlsll za\.s\[w11, 4:7, vgx4\], {z0\.b - z3\.b}, z4\.b\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w11p4_z0_z4_7, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (w11 + 4, z0, z4, 7),
+ svmls_lane_za32_vg4x4 (w11 + 4, z0, z4, 7))
+
+/*
+** mls_lane_w8p6_z0_z4_8:
+** add (w8|w9|w10|w11), w8, #?6
+** umlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[8\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_8, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (w8 + 6, z0, z4, 8),
+ svmls_lane_za32_vg4x4 (w8 + 6, z0, z4, 8))
+
+/*
+** mls_lane_w8p7_z0_z4_9:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[9\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_9, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (w8 + 7, z0, z4, 9),
+ svmls_lane_za32_vg4x4 (w8 + 7, z0, z4, 9))
+
+/*
+** mls_lane_w8p8_z0_z4_10:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[10\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_10, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (w8 + 8, z0, z4, 10),
+ svmls_lane_za32_vg4x4 (w8 + 8, z0, z4, 10))
+
+/*
+** mls_lane_w0m1_z0_z4_11:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, z4\.b\[11\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_11, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (w0 - 1, z0, z4, 11),
+ svmls_lane_za32_vg4x4 (w0 - 1, z0, z4, 11))
+
+/*
+** mls_lane_w8_z4_z15_12:
+** str d15, \[sp, #?-16\]!
+** umlsll za\.s\[w8, 0:3, vgx4\], {z4\.b - z7\.b}, z15\.b\[12\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_12, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (w8, z4, z15, 12),
+ svmls_lane_za32_vg4x4 (w8, z4, z15, 12))
+
+/*
+** mls_lane_w8_z28_z16_13:
+** mov (z[0-7]).d, z16.d
+** umlsll za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, \1\.b\[13\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_13, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (w8, z28, z16, 13),
+ svmls_lane_za32_vg4x4 (w8, z28, z16, 13))
+
+/*
+** mls_lane_w8_z17_z7_14:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlsll za\.s\[w8, 0:3, vgx4\], [^\n]+, z7\.b\[14\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_14, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (w8, z17, z7, 14),
+ svmls_lane_za32_vg4x4 (w8, z17, z7, 14))
+
+/*
+** mls_lane_w8_z22_z4_15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlsll za\.s\[w8, 0:3, vgx4\], [^\n]+, z4\.b\[15\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_15, svuint8x4_t, svuint8_t,
+ svmls_lane_za32_u8_vg4x4 (w8, z22, z4, 15),
+ svmls_lane_za32_vg4x4 (w8, z22, z4, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_f64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_f64_vg1x2.c
new file mode 100644
index 0000000..72ea604
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_f64_vg1x2.c
@@ -0,0 +1,104 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svfloat64x2_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x2 (0, z0, z4, 0),
+ svmls_lane_za64_vg1x2 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, z7\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svfloat64x2_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x2 (w0, z0, z7, 1),
+ svmls_lane_za64_vg1x2 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** fmls za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svfloat64x2_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x2 (w8, z28, z4, 0),
+ svmls_lane_za64_vg1x2 (w8, z28, z4, 0))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** fmls za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svfloat64x2_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x2 (w8 + 7, z0, z4, 1),
+ svmls_lane_za64_vg1x2 (w8 + 7, z0, z4, 1))
+
+/*
+** mls_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_0, svfloat64x2_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x2 (w8 + 8, z0, z4, 0),
+ svmls_lane_za64_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** mls_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmls za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_1, svfloat64x2_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x2 (w0 - 1, z0, z4, 1),
+ svmls_lane_za64_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** mls_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** fmls za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, z15\.d\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_2, svfloat64x2_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x2 (w8, z4, z15, 0),
+ svmls_lane_za64_vg1x2 (w8, z4, z15, 0))
+
+/*
+** mls_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** fmls za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}, \1\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_3, svfloat64x2_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x2 (w8, z28, z16, 1),
+ svmls_lane_za64_vg1x2 (w8, z28, z16, 1))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** fmls za\.d\[w8, 0, vgx2\], [^\n]+, z7\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svfloat64x2_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x2 (w8, z17, z7, 0),
+ svmls_lane_za64_vg1x2 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** fmls za\.d\[w8, 0, vgx2\], {z22\.d - z23\.d}, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svfloat64x2_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x2 (w8, z22, z4, 1),
+ svmls_lane_za64_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_f64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_f64_vg1x4.c
new file mode 100644
index 0000000..ce1de1d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_f64_vg1x4.c
@@ -0,0 +1,110 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svfloat64x4_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x4 (0, z0, z4, 0),
+ svmls_lane_za64_vg1x4 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, z7\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svfloat64x4_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x4 (w0, z0, z7, 1),
+ svmls_lane_za64_vg1x4 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** fmls za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svfloat64x4_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x4 (w8, z28, z4, 0),
+ svmls_lane_za64_vg1x4 (w8, z28, z4, 0))
+
+/*
+** mls_lane_w8p7_z0_z4_3:
+** fmls za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_3, svfloat64x4_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x4 (w8 + 7, z0, z4, 1),
+ svmls_lane_za64_vg1x4 (w8 + 7, z0, z4, 1))
+
+/*
+** mls_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, z4\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_0, svfloat64x4_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x4 (w8 + 8, z0, z4, 0),
+ svmls_lane_za64_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** mls_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmls za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_1, svfloat64x4_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x4 (w0 - 1, z0, z4, 1),
+ svmls_lane_za64_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** mls_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** fmls za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}, z15\.d\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_2, svfloat64x4_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x4 (w8, z4, z15, 0),
+ svmls_lane_za64_vg1x4 (w8, z4, z15, 0))
+
+/*
+** mls_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** fmls za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}, \1\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_3, svfloat64x4_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x4 (w8, z28, z16, 1),
+ svmls_lane_za64_vg1x4 (w8, z28, z16, 1))
+
+/*
+** mls_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmls za\.d\[w8, 0, vgx4\], [^\n]+, z7\.d\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_0, svfloat64x4_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x4 (w8, z17, z7, 0),
+ svmls_lane_za64_vg1x4 (w8, z17, z7, 0))
+
+/*
+** mls_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fmls za\.d\[w8, 0, vgx4\], [^\n]+, z4\.d\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_1, svfloat64x4_t, svfloat64_t,
+ svmls_lane_za64_f64_vg1x4 (w8, z22, z4, 1),
+ svmls_lane_za64_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x1.c
new file mode 100644
index 0000000..c136438
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x1.c
@@ -0,0 +1,152 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.d\[\1, 0:3\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_0_z0_z0_0, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (0, z0, z0, 0),
+ svmls_lane_za64_vg4x1 (0, z0, z0, 0))
+
+/*
+** mls_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.d\[\1, 0:3\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w0_z0_z3_1, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w0, z0, z3, 1),
+ svmls_lane_za64_vg4x1 (w0, z0, z3, 1))
+
+/*
+** mls_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** smlsll za\.d\[\1, 0:3\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w7_z0_z3_2, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w7, z0, z3, 2),
+ svmls_lane_za64_vg4x1 (w7, z0, z3, 2))
+
+/*
+** mls_lane_w8_z7_z3_3:
+** smlsll za\.d\[w8, 0:3\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z7_z3_3, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w8, z7, z3, 3),
+ svmls_lane_za64_vg4x1 (w8, z7, z3, 3))
+
+/*
+** mls_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** smlsll za\.d\[w8, 0:3\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z31_z16_4, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w8, z31, z16, 4),
+ svmls_lane_za64_vg4x1 (w8, z31, z16, 4))
+
+/*
+** mls_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.d\[\1, 0:3\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p1_z0_z0_5, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w8 + 1, z0, z0, 5),
+ svmls_lane_za64_vg4x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mls_lane_w8p2_z23_z0_6:
+** add (w8|w9|w10|w11), w8, #?2
+** smlsll za\.d\[\1, 0:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p2_z23_z0_6, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w8 + 2, z23, z0, 6),
+ svmls_lane_za64_vg4x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mls_lane_w11p4_z23_z0_7:
+** smlsll za\.d\[w11, 4:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p4_z23_z0_7, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w11 + 4, z23, z0, 7),
+ svmls_lane_za64_vg4x1 (w11 + 4, z23, z0, 7))
+
+/*
+** mls_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.d\[\1, 0:3\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p7_z7_z7_0, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w8 + 7, z7, z7, 0),
+ svmls_lane_za64_vg4x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mls_lane_w11p12_z23_z0_1:
+** smlsll za\.d\[w11, 12:15\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p12_z23_z0_1, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w11 + 12, z23, z0, 1),
+ svmls_lane_za64_vg4x1 (w11 + 12, z23, z0, 1))
+
+/*
+** mls_lane_w8p14_z23_z0_2:
+** add (w8|w9|w10|w11), w8, #?14
+** smlsll za\.d\[w8, 0:3\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p14_z23_z0_2, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w8 + 14, z23, z0, 2),
+ svmls_lane_za64_vg4x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mls_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** smlsll za\.d\[\1, 0:3\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p15_z7_z7_3, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w8 + 15, z7, z7, 3),
+ svmls_lane_za64_vg4x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mls_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** smlsll za\.d\[\1, 0:3\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p16_z7_z7_4, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w8 + 16, z7, z7, 4),
+ svmls_lane_za64_vg4x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mls_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsll za\.d\[\1, 0:3\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8m1_z16_z0_5, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w8 - 1, z16, z0, 5),
+ svmls_lane_za64_vg4x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mls_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** smlsll za\.d\[\1, 0:3\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w12_z0_z3_6, svint16_t,
+ svmls_lane_za64_s16_vg4x1 (w12, z0, z3, 6),
+ svmls_lane_za64_vg4x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x2.c
new file mode 100644
index 0000000..ffc50d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x2.c
@@ -0,0 +1,124 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (0, z0, z4, 0),
+ svmls_lane_za64_vg4x2 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (w0, z0, z7, 1),
+ svmls_lane_za64_vg4x2 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** smlsll za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (w8, z28, z4, 2),
+ svmls_lane_za64_vg4x2 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w11p4_z0_z4_3:
+** smlsll za\.d\[w11, 4:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w11p4_z0_z4_3, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (w11 + 4, z0, z4, 3),
+ svmls_lane_za64_vg4x2 (w11 + 4, z0, z4, 3))
+
+/*
+** mls_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** smlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_4, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (w8 + 6, z0, z4, 4),
+ svmls_lane_za64_vg4x2 (w8 + 6, z0, z4, 4))
+
+/*
+** mls_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_5, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (w8 + 7, z0, z4, 5),
+ svmls_lane_za64_vg4x2 (w8 + 7, z0, z4, 5))
+
+/*
+** mls_lane_w8p8_z0_z4_6:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_6, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (w8 + 8, z0, z4, 6),
+ svmls_lane_za64_vg4x2 (w8 + 8, z0, z4, 6))
+
+/*
+** mls_lane_w0m1_z0_z4_7:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_7, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (w0 - 1, z0, z4, 7),
+ svmls_lane_za64_vg4x2 (w0 - 1, z0, z4, 7))
+
+/*
+** mls_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** smlsll za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_0, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (w8, z4, z15, 0),
+ svmls_lane_za64_vg4x2 (w8, z4, z15, 0))
+
+/*
+** mls_lane_w8_z28_z16_1:
+** mov (z[0-7]).d, z16.d
+** smlsll za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, \1\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_1, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (w8, z28, z16, 1),
+ svmls_lane_za64_vg4x2 (w8, z28, z16, 1))
+
+/*
+** mls_lane_w8_z17_z7_3:
+** mov [^\n]+
+** mov [^\n]+
+** smlsll za\.d\[w8, 0:3, vgx2\], [^\n]+, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_3, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (w8, z17, z7, 3),
+ svmls_lane_za64_vg4x2 (w8, z17, z7, 3))
+
+/*
+** mls_lane_w8_z22_z4_5:
+** smlsll za\.d\[w8, 0:3, vgx2\], {z22\.h - z23\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_5, svint16x2_t, svint16_t,
+ svmls_lane_za64_s16_vg4x2 (w8, z22, z4, 5),
+ svmls_lane_za64_vg4x2 (w8, z22, z4, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x4.c
new file mode 100644
index 0000000..89aa97b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_s16_vg4x4.c
@@ -0,0 +1,130 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (0, z0, z4, 0),
+ svmls_lane_za64_vg4x4 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (w0, z0, z7, 1),
+ svmls_lane_za64_vg4x4 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** smlsll za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (w8, z28, z4, 2),
+ svmls_lane_za64_vg4x4 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w11p4_z0_z4_3:
+** smlsll za\.d\[w11, 4:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w11p4_z0_z4_3, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (w11 + 4, z0, z4, 3),
+ svmls_lane_za64_vg4x4 (w11 + 4, z0, z4, 3))
+
+/*
+** mls_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** smlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_4, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (w8 + 6, z0, z4, 4),
+ svmls_lane_za64_vg4x4 (w8 + 6, z0, z4, 4))
+
+/*
+** mls_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_5, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (w8 + 7, z0, z4, 5),
+ svmls_lane_za64_vg4x4 (w8 + 7, z0, z4, 5))
+
+/*
+** mls_lane_w8p8_z0_z4_6:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_6, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (w8 + 8, z0, z4, 6),
+ svmls_lane_za64_vg4x4 (w8 + 8, z0, z4, 6))
+
+/*
+** mls_lane_w0m1_z0_z4_7:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_7, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (w0 - 1, z0, z4, 7),
+ svmls_lane_za64_vg4x4 (w0 - 1, z0, z4, 7))
+
+/*
+** mls_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** smlsll za\.d\[w8, 0:3, vgx4\], {z4\.h - z7\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_0, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (w8, z4, z15, 0),
+ svmls_lane_za64_vg4x4 (w8, z4, z15, 0))
+
+/*
+** mls_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** smlsll za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_3, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (w8, z28, z16, 3),
+ svmls_lane_za64_vg4x4 (w8, z28, z16, 3))
+
+/*
+** mls_lane_w8_z17_z7_4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlsll za\.d\[w8, 0:3, vgx4\], [^\n]+, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_4, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (w8, z17, z7, 4),
+ svmls_lane_za64_vg4x4 (w8, z17, z7, 4))
+
+/*
+** mls_lane_w8_z22_z4_6:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** smlsll za\.d\[w8, 0:3, vgx4\], [^\n]+, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_6, svint16x4_t, svint16_t,
+ svmls_lane_za64_s16_vg4x4 (w8, z22, z4, 6),
+ svmls_lane_za64_vg4x4 (w8, z22, z4, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x1.c
new file mode 100644
index 0000000..7140756
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x1.c
@@ -0,0 +1,152 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z0_0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.d\[\1, 0:3\], z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_0_z0_z0_0, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (0, z0, z0, 0),
+ svmls_lane_za64_vg4x1 (0, z0, z0, 0))
+
+/*
+** mls_lane_w0_z0_z3_1:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.d\[\1, 0:3\], z0\.h, z3\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w0_z0_z3_1, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w0, z0, z3, 1),
+ svmls_lane_za64_vg4x1 (w0, z0, z3, 1))
+
+/*
+** mls_lane_w7_z0_z3_2:
+** mov (w8|w9|w10|w11), w7
+** umlsll za\.d\[\1, 0:3\], z0\.h, z3\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w7_z0_z3_2, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w7, z0, z3, 2),
+ svmls_lane_za64_vg4x1 (w7, z0, z3, 2))
+
+/*
+** mls_lane_w8_z7_z3_3:
+** umlsll za\.d\[w8, 0:3\], z7\.h, z3\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z7_z3_3, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w8, z7, z3, 3),
+ svmls_lane_za64_vg4x1 (w8, z7, z3, 3))
+
+/*
+** mls_lane_w8_z31_z16_4:
+** mov (z[0-7])\.d, z16\.d
+** umlsll za\.d\[w8, 0:3\], z31\.h. \1\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8_z31_z16_4, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w8, z31, z16, 4),
+ svmls_lane_za64_vg4x1 (w8, z31, z16, 4))
+
+/*
+** mls_lane_w8p1_z0_z0_5:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.d\[\1, 0:3\], z0\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p1_z0_z0_5, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w8 + 1, z0, z0, 5),
+ svmls_lane_za64_vg4x1 (w8 + 1, z0, z0, 5))
+
+/*
+** mls_lane_w8p2_z23_z0_6:
+** add (w8|w9|w10|w11), w8, #?2
+** umlsll za\.d\[\1, 0:3\], z23\.h, z0\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p2_z23_z0_6, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w8 + 2, z23, z0, 6),
+ svmls_lane_za64_vg4x1 (w8 + 2, z23, z0, 6))
+
+/*
+** mls_lane_w11p4_z23_z0_7:
+** umlsll za\.d\[w11, 4:7\], z23\.h, z0\.h\[7\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p4_z23_z0_7, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w11 + 4, z23, z0, 7),
+ svmls_lane_za64_vg4x1 (w11 + 4, z23, z0, 7))
+
+/*
+** mls_lane_w8p7_z7_z7_0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.d\[\1, 0:3\], z7\.h, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p7_z7_z7_0, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w8 + 7, z7, z7, 0),
+ svmls_lane_za64_vg4x1 (w8 + 7, z7, z7, 0))
+
+/*
+** mls_lane_w11p12_z23_z0_1:
+** umlsll za\.d\[w11, 12:15\], z23\.h, z0\.h\[1\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w11p12_z23_z0_1, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w11 + 12, z23, z0, 1),
+ svmls_lane_za64_vg4x1 (w11 + 12, z23, z0, 1))
+
+/*
+** mls_lane_w8p14_z23_z0_2:
+** add (w8|w9|w10|w11), w8, #?14
+** umlsll za\.d\[w8, 0:3\], z23\.h, z0\.h\[2\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p14_z23_z0_2, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w8 + 14, z23, z0, 2),
+ svmls_lane_za64_vg4x1 (w8 + 14, z23, z0, 2))
+
+/*
+** mls_lane_w8p15_z7_z7_3:
+** add (w8|w9|w10|w11), w8, #?15
+** umlsll za\.d\[\1, 0:3\], z7\.h, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p15_z7_z7_3, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w8 + 15, z7, z7, 3),
+ svmls_lane_za64_vg4x1 (w8 + 15, z7, z7, 3))
+
+/*
+** mls_lane_w8p16_z7_z7_4:
+** add (w8|w9|w10|w11), w8, #?16
+** umlsll za\.d\[\1, 0:3\], z7\.h, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8p16_z7_z7_4, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w8 + 16, z7, z7, 4),
+ svmls_lane_za64_vg4x1 (w8 + 16, z7, z7, 4))
+
+/*
+** mls_lane_w8m1_z16_z0_5:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsll za\.d\[\1, 0:3\], z16\.h, z0\.h\[5\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w8m1_z16_z0_5, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w8 - 1, z16, z0, 5),
+ svmls_lane_za64_vg4x1 (w8 - 1, z16, z0, 5))
+
+/*
+** mls_lane_w12_z0_z3_6:
+** mov (w8|w9|w10|w11), w12
+** umlsll za\.d\[\1, 0:3\], z0\.h, z3\.h\[6\]
+** ret
+*/
+TEST_ZA_X1 (mls_lane_w12_z0_z3_6, svuint16_t,
+ svmls_lane_za64_u16_vg4x1 (w12, z0, z3, 6),
+ svmls_lane_za64_vg4x1 (w12, z0, z3, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x2.c
new file mode 100644
index 0000000..62e2351
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x2.c
@@ -0,0 +1,124 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (0, z0, z4, 0),
+ svmls_lane_za64_vg4x2 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (w0, z0, z7, 1),
+ svmls_lane_za64_vg4x2 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** umlsll za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (w8, z28, z4, 2),
+ svmls_lane_za64_vg4x2 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w11p4_z0_z4_3:
+** umlsll za\.d\[w11, 4:7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w11p4_z0_z4_3, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (w11 + 4, z0, z4, 3),
+ svmls_lane_za64_vg4x2 (w11 + 4, z0, z4, 3))
+
+/*
+** mls_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** umlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_4, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (w8 + 6, z0, z4, 4),
+ svmls_lane_za64_vg4x2 (w8 + 6, z0, z4, 4))
+
+/*
+** mls_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_5, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (w8 + 7, z0, z4, 5),
+ svmls_lane_za64_vg4x2 (w8 + 7, z0, z4, 5))
+
+/*
+** mls_lane_w8p8_z0_z4_6:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_6, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (w8 + 8, z0, z4, 6),
+ svmls_lane_za64_vg4x2 (w8 + 8, z0, z4, 6))
+
+/*
+** mls_lane_w0m1_z0_z4_7:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_7, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (w0 - 1, z0, z4, 7),
+ svmls_lane_za64_vg4x2 (w0 - 1, z0, z4, 7))
+
+/*
+** mls_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** umlsll za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_0, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (w8, z4, z15, 0),
+ svmls_lane_za64_vg4x2 (w8, z4, z15, 0))
+
+/*
+** mls_lane_w8_z28_z16_1:
+** mov (z[0-7]).d, z16.d
+** umlsll za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, \1\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_1, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (w8, z28, z16, 1),
+ svmls_lane_za64_vg4x2 (w8, z28, z16, 1))
+
+/*
+** mls_lane_w8_z17_z7_3:
+** mov [^\n]+
+** mov [^\n]+
+** umlsll za\.d\[w8, 0:3, vgx2\], [^\n]+, z7\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_3, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (w8, z17, z7, 3),
+ svmls_lane_za64_vg4x2 (w8, z17, z7, 3))
+
+/*
+** mls_lane_w8_z22_z4_5:
+** umlsll za\.d\[w8, 0:3, vgx2\], {z22\.h - z23\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_5, svuint16x2_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x2 (w8, z22, z4, 5),
+ svmls_lane_za64_vg4x2 (w8, z22, z4, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x4.c
new file mode 100644
index 0000000..f095908
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_lane_za64_u16_vg4x4.c
@@ -0,0 +1,130 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_0_z0_z4_0, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (0, z0, z4, 0),
+ svmls_lane_za64_vg4x4 (0, z0, z4, 0))
+
+/*
+** mls_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0_z0_z7_1, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (w0, z0, z7, 1),
+ svmls_lane_za64_vg4x4 (w0, z0, z7, 1))
+
+/*
+** mls_lane_w8_z28_z4_2:
+** umlsll za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z4_2, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (w8, z28, z4, 2),
+ svmls_lane_za64_vg4x4 (w8, z28, z4, 2))
+
+/*
+** mls_lane_w11p4_z0_z4_3:
+** umlsll za\.d\[w11, 4:7, vgx4\], {z0\.h - z3\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w11p4_z0_z4_3, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (w11 + 4, z0, z4, 3),
+ svmls_lane_za64_vg4x4 (w11 + 4, z0, z4, 3))
+
+/*
+** mls_lane_w8p6_z0_z4_4:
+** add (w8|w9|w10|w11), w8, #?6
+** umlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p6_z0_z4_4, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (w8 + 6, z0, z4, 4),
+ svmls_lane_za64_vg4x4 (w8 + 6, z0, z4, 4))
+
+/*
+** mls_lane_w8p7_z0_z4_5:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[5\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p7_z0_z4_5, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (w8 + 7, z0, z4, 5),
+ svmls_lane_za64_vg4x4 (w8 + 7, z0, z4, 5))
+
+/*
+** mls_lane_w8p8_z0_z4_6:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8p8_z0_z4_6, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (w8 + 8, z0, z4, 6),
+ svmls_lane_za64_vg4x4 (w8 + 8, z0, z4, 6))
+
+/*
+** mls_lane_w0m1_z0_z4_7:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, z4\.h\[7\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w0m1_z0_z4_7, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (w0 - 1, z0, z4, 7),
+ svmls_lane_za64_vg4x4 (w0 - 1, z0, z4, 7))
+
+/*
+** mls_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** umlsll za\.d\[w8, 0:3, vgx4\], {z4\.h - z7\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (mls_lane_w8_z4_z15_0, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (w8, z4, z15, 0),
+ svmls_lane_za64_vg4x4 (w8, z4, z15, 0))
+
+/*
+** mls_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** umlsll za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z28_z16_3, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (w8, z28, z16, 3),
+ svmls_lane_za64_vg4x4 (w8, z28, z16, 3))
+
+/*
+** mls_lane_w8_z17_z7_4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlsll za\.d\[w8, 0:3, vgx4\], [^\n]+, z7\.h\[4\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z17_z7_4, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (w8, z17, z7, 4),
+ svmls_lane_za64_vg4x4 (w8, z17, z7, 4))
+
+/*
+** mls_lane_w8_z22_z4_6:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** umlsll za\.d\[w8, 0:3, vgx4\], [^\n]+, z4\.h\[6\]
+** ret
+*/
+TEST_ZA_LANE (mls_lane_w8_z22_z4_6, svuint16x4_t, svuint16_t,
+ svmls_lane_za64_u16_vg4x4 (w8, z22, z4, 6),
+ svmls_lane_za64_vg4x4 (w8, z22, z4, 6))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x1.c
new file mode 100644
index 0000000..0e04e33
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlsl za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_0_z0_z0, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (0, z0, z0),
+ svmls_za32_vg2x1 (0, z0, z0))
+
+/*
+** mls_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** bfmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w0_z0_z3, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w0, z0, z3),
+ svmls_za32_vg2x1 (w0, z0, z3))
+
+/*
+** mls_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** bfmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w7_z0_z3, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w7, z0, z3),
+ svmls_za32_vg2x1 (w7, z0, z3))
+
+/*
+** mls_w8_z7_z3:
+** bfmlsl za\.s\[w8, 0:1\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z7_z3, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w8, z7, z3),
+ svmls_za32_vg2x1 (w8, z7, z3))
+
+/*
+** mls_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** bfmlsl za\.s\[w8, 0:1\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z31_z16, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w8, z31, z16),
+ svmls_za32_vg2x1 (w8, z31, z16))
+
+/*
+** mls_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlsl za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p1_z0_z0, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w8 + 1, z0, z0),
+ svmls_za32_vg2x1 (w8 + 1, z0, z0))
+
+/*
+** mls_w8p2_z23_z0:
+** bfmlsl za\.s\[w8, 2:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p2_z23_z0, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w8 + 2, z23, z0),
+ svmls_za32_vg2x1 (w8 + 2, z23, z0))
+
+/*
+** mls_w11p6_z23_z0:
+** bfmlsl za\.s\[w11, 6:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p6_z23_z0, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w11 + 6, z23, z0),
+ svmls_za32_vg2x1 (w11 + 6, z23, z0))
+
+/*
+** mls_w8p7_z7_z7:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p7_z7_z7, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w8 + 7, z7, z7),
+ svmls_za32_vg2x1 (w8 + 7, z7, z7))
+
+/*
+** mls_w11p10_z23_z0:
+** bfmlsl za\.s\[w11, 10:11\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p10_z23_z0, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w11 + 10, z23, z0),
+ svmls_za32_vg2x1 (w11 + 10, z23, z0))
+
+/*
+** mls_w8p14_z23_z0:
+** bfmlsl za\.s\[w8, 14:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p14_z23_z0, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w8 + 14, z23, z0),
+ svmls_za32_vg2x1 (w8 + 14, z23, z0))
+
+/*
+** mls_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** bfmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p15_z7_z7, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w8 + 15, z7, z7),
+ svmls_za32_vg2x1 (w8 + 15, z7, z7))
+
+/*
+** mls_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** bfmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p16_z7_z7, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w8 + 16, z7, z7),
+ svmls_za32_vg2x1 (w8 + 16, z7, z7))
+
+/*
+** mls_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** bfmlsl za\.s\[\1, 0:1\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8m1_z16_z0, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w8 - 1, z16, z0),
+ svmls_za32_vg2x1 (w8 - 1, z16, z0))
+
+/*
+** mls_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** bfmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w12_z0_z3, svbfloat16_t,
+ svmls_za32_bf16_vg2x1 (w12, z0, z3),
+ svmls_za32_vg2x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x2.c
new file mode 100644
index 0000000..a923ba6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x2.c
@@ -0,0 +1,247 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (0, z0, z0),
+ svmls_za32_vg2x2 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w0, z0, z0),
+ svmls_za32_vg2x2 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8, z0, z4),
+ svmls_za32_vg2x2 (w8, z0, z4))
+
+/*
+** mls_w8_z4_z18:
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z18, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8, z4, z18),
+ svmls_za32_vg2x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z23:
+** ...
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8, z0, z23),
+ svmls_za32_vg2x2 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** bfmlsl za\.s\[w8, 0:1, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8, z23, z0),
+ svmls_za32_vg2x2 (w8, z23, z0))
+
+/*
+** mls_w8_z18_z28:
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z28, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8, z18, z28),
+ svmls_za32_vg2x2 (w8, z18, z28))
+
+/*
+** mls_w8_z28_z4:
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z4, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8, z28, z4),
+ svmls_za32_vg2x2 (w8, z28, z4))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8 + 1, z4, z0),
+ svmls_za32_vg2x2 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** bfmlsl za\.s\[w8, 2:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8 + 2, z4, z0),
+ svmls_za32_vg2x2 (w8 + 2, z4, z0))
+
+/*
+** mls_w8p6_z4_z0:
+** bfmlsl za\.s\[w8, 6:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p6_z4_z0, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8 + 6, z4, z0),
+ svmls_za32_vg2x2 (w8 + 6, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8 + 7, z4, z0),
+ svmls_za32_vg2x2 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8 + 8, z4, z4),
+ svmls_za32_vg2x2 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svbfloat16x2_t,
+ svmls_za32_bf16_vg2x2 (w8 - 1, z4, z0),
+ svmls_za32_vg2x2 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x2 (0, z1, z0),
+ svmls_za32_vg2x2 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x2 (w0, z1, z0),
+ svmls_za32_vg2x2 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x2 (w8, z1, z0),
+ svmls_za32_vg2x2 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x2 (w8 + 1, z1, z0),
+ svmls_za32_vg2x2 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** bfmlsl za\.s\[w8, 4:5, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svbfloat16x2_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x2 (w8 + 4, z20, z0),
+ svmls_za32_vg2x2 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** bfmlsl za\.s\[w8, 6:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svbfloat16x2_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x2 (w8 + 6, z27, z0),
+ svmls_za32_vg2x2 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x2 (w8 + 7, z1, z0),
+ svmls_za32_vg2x2 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x2 (w8 + 8, z1, z0),
+ svmls_za32_vg2x2 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svbfloat16x2_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x2 (w0 - 1, z1, z0),
+ svmls_za32_vg2x2 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svbfloat16x2_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x2 (w8, z0, z15),
+ svmls_za32_vg2x2 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** bfmlsl za\.s\[w8, 0:1, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svbfloat16x2_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x2 (w8, z20, z16),
+ svmls_za32_vg2x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x4.c
new file mode 100644
index 0000000..498eb10
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_bf16_vg2x4.c
@@ -0,0 +1,258 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (0, z0, z0),
+ svmls_za32_vg2x4 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w0, z0, z0),
+ svmls_za32_vg2x4 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** bfmlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8, z0, z4),
+ svmls_za32_vg2x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z18:
+** ...
+** bfmlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z18, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8, z0, z18),
+ svmls_za32_vg2x4 (w8, z0, z18))
+
+/*
+** mls_w8_z18_z0:
+** ...
+** bfmlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z0, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8, z18, z0),
+ svmls_za32_vg2x4 (w8, z18, z0))
+
+/*
+** mls_w8_z0_z23:
+** ...
+** bfmlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8, z0, z23),
+ svmls_za32_vg2x4 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** bfmlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8, z23, z0),
+ svmls_za32_vg2x4 (w8, z23, z0))
+
+/*
+** mls_w8_z4_z28:
+** bfmlsl za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z28, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8, z4, z28),
+ svmls_za32_vg2x4 (w8, z4, z28))
+
+/*
+** mls_w8_z28_z0:
+** bfmlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z0, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8, z28, z0),
+ svmls_za32_vg2x4 (w8, z28, z0))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8 + 1, z4, z0),
+ svmls_za32_vg2x4 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** bfmlsl za\.s\[w8, 2:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8 + 2, z4, z0),
+ svmls_za32_vg2x4 (w8 + 2, z4, z0))
+
+/*
+** mls_w8p6_z4_z0:
+** bfmlsl za\.s\[w8, 6:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p6_z4_z0, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8 + 6, z4, z0),
+ svmls_za32_vg2x4 (w8 + 6, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8 + 7, z4, z0),
+ svmls_za32_vg2x4 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8 + 8, z4, z4),
+ svmls_za32_vg2x4 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svbfloat16x4_t,
+ svmls_za32_bf16_vg2x4 (w8 - 1, z4, z0),
+ svmls_za32_vg2x4 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x4 (0, z1, z0),
+ svmls_za32_vg2x4 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x4 (w0, z1, z0),
+ svmls_za32_vg2x4 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** bfmlsl za\.s\[w8, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x4 (w8, z1, z0),
+ svmls_za32_vg2x4 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x4 (w8 + 1, z1, z0),
+ svmls_za32_vg2x4 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** bfmlsl za\.s\[w8, 4:5, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svbfloat16x4_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x4 (w8 + 4, z20, z0),
+ svmls_za32_vg2x4 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** bfmlsl za\.s\[w8, 6:7, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svbfloat16x4_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x4 (w8 + 6, z27, z0),
+ svmls_za32_vg2x4 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x4 (w8 + 7, z1, z0),
+ svmls_za32_vg2x4 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x4 (w8 + 8, z1, z0),
+ svmls_za32_vg2x4 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svbfloat16x4_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x4 (w0 - 1, z1, z0),
+ svmls_za32_vg2x4 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** bfmlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svbfloat16x4_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x4 (w8, z0, z15),
+ svmls_za32_vg2x4 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** bfmlsl za\.s\[w8, 0:1, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svbfloat16x4_t, svbfloat16_t,
+ svmls_single_za32_bf16_vg2x4 (w8, z20, z16),
+ svmls_za32_vg2x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x1.c
new file mode 100644
index 0000000..8d53b3d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmlsl za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_0_z0_z0, svfloat16_t,
+ svmls_za32_f16_vg2x1 (0, z0, z0),
+ svmls_za32_vg2x1 (0, z0, z0))
+
+/*
+** mls_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** fmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w0_z0_z3, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w0, z0, z3),
+ svmls_za32_vg2x1 (w0, z0, z3))
+
+/*
+** mls_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** fmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w7_z0_z3, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w7, z0, z3),
+ svmls_za32_vg2x1 (w7, z0, z3))
+
+/*
+** mls_w8_z7_z3:
+** fmlsl za\.s\[w8, 0:1\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z7_z3, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w8, z7, z3),
+ svmls_za32_vg2x1 (w8, z7, z3))
+
+/*
+** mls_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** fmlsl za\.s\[w8, 0:1\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z31_z16, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w8, z31, z16),
+ svmls_za32_vg2x1 (w8, z31, z16))
+
+/*
+** mls_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlsl za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p1_z0_z0, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w8 + 1, z0, z0),
+ svmls_za32_vg2x1 (w8 + 1, z0, z0))
+
+/*
+** mls_w8p2_z23_z0:
+** fmlsl za\.s\[w8, 2:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p2_z23_z0, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w8 + 2, z23, z0),
+ svmls_za32_vg2x1 (w8 + 2, z23, z0))
+
+/*
+** mls_w11p6_z23_z0:
+** fmlsl za\.s\[w11, 6:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p6_z23_z0, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w11 + 6, z23, z0),
+ svmls_za32_vg2x1 (w11 + 6, z23, z0))
+
+/*
+** mls_w8p7_z7_z7:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p7_z7_z7, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w8 + 7, z7, z7),
+ svmls_za32_vg2x1 (w8 + 7, z7, z7))
+
+/*
+** mls_w11p10_z23_z0:
+** fmlsl za\.s\[w11, 10:11\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p10_z23_z0, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w11 + 10, z23, z0),
+ svmls_za32_vg2x1 (w11 + 10, z23, z0))
+
+/*
+** mls_w8p14_z23_z0:
+** fmlsl za\.s\[w8, 14:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p14_z23_z0, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w8 + 14, z23, z0),
+ svmls_za32_vg2x1 (w8 + 14, z23, z0))
+
+/*
+** mls_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** fmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p15_z7_z7, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w8 + 15, z7, z7),
+ svmls_za32_vg2x1 (w8 + 15, z7, z7))
+
+/*
+** mls_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** fmlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p16_z7_z7, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w8 + 16, z7, z7),
+ svmls_za32_vg2x1 (w8 + 16, z7, z7))
+
+/*
+** mls_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmlsl za\.s\[\1, 0:1\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8m1_z16_z0, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w8 - 1, z16, z0),
+ svmls_za32_vg2x1 (w8 - 1, z16, z0))
+
+/*
+** mls_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** fmlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w12_z0_z3, svfloat16_t,
+ svmls_za32_f16_vg2x1 (w12, z0, z3),
+ svmls_za32_vg2x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x2.c
new file mode 100644
index 0000000..dafa9f0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x2.c
@@ -0,0 +1,247 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (0, z0, z0),
+ svmls_za32_vg2x2 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w0, z0, z0),
+ svmls_za32_vg2x2 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8, z0, z4),
+ svmls_za32_vg2x2 (w8, z0, z4))
+
+/*
+** mls_w8_z4_z18:
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z18, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8, z4, z18),
+ svmls_za32_vg2x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z23:
+** ...
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8, z0, z23),
+ svmls_za32_vg2x2 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** fmlsl za\.s\[w8, 0:1, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8, z23, z0),
+ svmls_za32_vg2x2 (w8, z23, z0))
+
+/*
+** mls_w8_z18_z28:
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z28, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8, z18, z28),
+ svmls_za32_vg2x2 (w8, z18, z28))
+
+/*
+** mls_w8_z28_z4:
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z4, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8, z28, z4),
+ svmls_za32_vg2x2 (w8, z28, z4))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8 + 1, z4, z0),
+ svmls_za32_vg2x2 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** fmlsl za\.s\[w8, 2:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8 + 2, z4, z0),
+ svmls_za32_vg2x2 (w8 + 2, z4, z0))
+
+/*
+** mls_w8p6_z4_z0:
+** fmlsl za\.s\[w8, 6:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p6_z4_z0, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8 + 6, z4, z0),
+ svmls_za32_vg2x2 (w8 + 6, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8 + 7, z4, z0),
+ svmls_za32_vg2x2 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8 + 8, z4, z4),
+ svmls_za32_vg2x2 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svfloat16x2_t,
+ svmls_za32_f16_vg2x2 (w8 - 1, z4, z0),
+ svmls_za32_vg2x2 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x2 (0, z1, z0),
+ svmls_za32_vg2x2 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x2 (w0, z1, z0),
+ svmls_za32_vg2x2 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x2 (w8, z1, z0),
+ svmls_za32_vg2x2 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x2 (w8 + 1, z1, z0),
+ svmls_za32_vg2x2 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** fmlsl za\.s\[w8, 4:5, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svfloat16x2_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x2 (w8 + 4, z20, z0),
+ svmls_za32_vg2x2 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** fmlsl za\.s\[w8, 6:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svfloat16x2_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x2 (w8 + 6, z27, z0),
+ svmls_za32_vg2x2 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x2 (w8 + 7, z1, z0),
+ svmls_za32_vg2x2 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x2 (w8 + 8, z1, z0),
+ svmls_za32_vg2x2 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svfloat16x2_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x2 (w0 - 1, z1, z0),
+ svmls_za32_vg2x2 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svfloat16x2_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x2 (w8, z0, z15),
+ svmls_za32_vg2x2 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmlsl za\.s\[w8, 0:1, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svfloat16x2_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x2 (w8, z20, z16),
+ svmls_za32_vg2x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x4.c
new file mode 100644
index 0000000..c0f31bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f16_vg2x4.c
@@ -0,0 +1,258 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (0, z0, z0),
+ svmls_za32_vg2x4 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w0, z0, z0),
+ svmls_za32_vg2x4 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** fmlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8, z0, z4),
+ svmls_za32_vg2x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z18:
+** ...
+** fmlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z18, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8, z0, z18),
+ svmls_za32_vg2x4 (w8, z0, z18))
+
+/*
+** mls_w8_z18_z0:
+** ...
+** fmlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z0, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8, z18, z0),
+ svmls_za32_vg2x4 (w8, z18, z0))
+
+/*
+** mls_w8_z0_z23:
+** ...
+** fmlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8, z0, z23),
+ svmls_za32_vg2x4 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** fmlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8, z23, z0),
+ svmls_za32_vg2x4 (w8, z23, z0))
+
+/*
+** mls_w8_z4_z28:
+** fmlsl za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z28, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8, z4, z28),
+ svmls_za32_vg2x4 (w8, z4, z28))
+
+/*
+** mls_w8_z28_z0:
+** fmlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z0, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8, z28, z0),
+ svmls_za32_vg2x4 (w8, z28, z0))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8 + 1, z4, z0),
+ svmls_za32_vg2x4 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** fmlsl za\.s\[w8, 2:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8 + 2, z4, z0),
+ svmls_za32_vg2x4 (w8 + 2, z4, z0))
+
+/*
+** mls_w8p6_z4_z0:
+** fmlsl za\.s\[w8, 6:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p6_z4_z0, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8 + 6, z4, z0),
+ svmls_za32_vg2x4 (w8 + 6, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8 + 7, z4, z0),
+ svmls_za32_vg2x4 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8 + 8, z4, z4),
+ svmls_za32_vg2x4 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svfloat16x4_t,
+ svmls_za32_f16_vg2x4 (w8 - 1, z4, z0),
+ svmls_za32_vg2x4 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x4 (0, z1, z0),
+ svmls_za32_vg2x4 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x4 (w0, z1, z0),
+ svmls_za32_vg2x4 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** fmlsl za\.s\[w8, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x4 (w8, z1, z0),
+ svmls_za32_vg2x4 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x4 (w8 + 1, z1, z0),
+ svmls_za32_vg2x4 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** fmlsl za\.s\[w8, 4:5, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svfloat16x4_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x4 (w8 + 4, z20, z0),
+ svmls_za32_vg2x4 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** fmlsl za\.s\[w8, 6:7, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svfloat16x4_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x4 (w8 + 6, z27, z0),
+ svmls_za32_vg2x4 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x4 (w8 + 7, z1, z0),
+ svmls_za32_vg2x4 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x4 (w8 + 8, z1, z0),
+ svmls_za32_vg2x4 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svfloat16x4_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x4 (w0 - 1, z1, z0),
+ svmls_za32_vg2x4 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svfloat16x4_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x4 (w8, z0, z15),
+ svmls_za32_vg2x4 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmlsl za\.s\[w8, 0:1, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svfloat16x4_t, svfloat16_t,
+ svmls_single_za32_f16_vg2x4 (w8, z20, z16),
+ svmls_za32_vg2x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f32_vg1x2.c
new file mode 100644
index 0000000..2319741
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f32_vg1x2.c
@@ -0,0 +1,180 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svfloat32x2_t,
+ svmls_za32_f32_vg1x2 (0, z0, z0),
+ svmls_za32_vg1x2 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svfloat32x2_t,
+ svmls_za32_f32_vg1x2 (w0, z0, z0),
+ svmls_za32_vg1x2 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** fmls za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svfloat32x2_t,
+ svmls_za32_f32_vg1x2 (w8, z0, z4),
+ svmls_za32_vg1x2 (w8, z0, z4))
+
+/*
+** mls_w8_z4_z18:
+** fmls za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z18, svfloat32x2_t,
+ svmls_za32_f32_vg1x2 (w8, z4, z18),
+ svmls_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z23_z0:
+** ...
+** fmls za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svfloat32x2_t,
+ svmls_za32_f32_vg1x2 (w8, z23, z0),
+ svmls_za32_vg1x2 (w8, z23, z0))
+
+/*
+** mls_w8_z18_z23:
+** ...
+** fmls za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z23, svfloat32x2_t,
+ svmls_za32_f32_vg1x2 (w8, z18, z23),
+ svmls_za32_vg1x2 (w8, z18, z23))
+
+/*
+** mls_w8_z4_z28:
+** fmls za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z28, svfloat32x2_t,
+ svmls_za32_f32_vg1x2 (w8, z4, z28),
+ svmls_za32_vg1x2 (w8, z4, z28))
+
+/*
+** mls_w8p7_z4_z0:
+** fmls za\.s\[w8, 7, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svfloat32x2_t,
+ svmls_za32_f32_vg1x2 (w8 + 7, z4, z0),
+ svmls_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svfloat32x2_t,
+ svmls_za32_f32_vg1x2 (w8 + 8, z4, z4),
+ svmls_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmls za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svfloat32x2_t,
+ svmls_za32_f32_vg1x2 (w8 - 1, z4, z0),
+ svmls_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x2 (0, z1, z0),
+ svmls_za32_vg1x2 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x2 (w0, z1, z0),
+ svmls_za32_vg1x2 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** fmls za\.s\[w8, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x2 (w8, z1, z0),
+ svmls_za32_vg1x2 (w8, z1, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** fmls za\.s\[w8, 7, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x2 (w8 + 7, z1, z0),
+ svmls_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x2 (w8 + 8, z1, z0),
+ svmls_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmls za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svfloat32x2_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x2 (w0 - 1, z1, z0),
+ svmls_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmls za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svfloat32x2_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x2 (w8, z0, z15),
+ svmls_za32_vg1x2 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmls za\.s\[w8, 0, vgx2\], {z20\.s - z21\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svfloat32x2_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x2 (w8, z20, z16),
+ svmls_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f32_vg1x4.c
new file mode 100644
index 0000000..9c358bb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_f32_vg1x4.c
@@ -0,0 +1,172 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svfloat32x4_t,
+ svmls_za32_f32_vg1x4 (0, z0, z0),
+ svmls_za32_vg1x4 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svfloat32x4_t,
+ svmls_za32_f32_vg1x4 (w0, z0, z0),
+ svmls_za32_vg1x4 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** fmls za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svfloat32x4_t,
+ svmls_za32_f32_vg1x4 (w8, z0, z4),
+ svmls_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z18:
+** ...
+** fmls za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z18, svfloat32x4_t,
+ svmls_za32_f32_vg1x4 (w8, z0, z18),
+ svmls_za32_vg1x4 (w8, z0, z18))
+
+/*
+** mls_w8_z18_z28:
+** ...
+** fmls za\.s\[w8, 0, vgx4\], [^\n]+, {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z28, svfloat32x4_t,
+ svmls_za32_f32_vg1x4 (w8, z18, z28),
+ svmls_za32_vg1x4 (w8, z18, z28))
+
+/*
+** mls_w8_z28_z23:
+** ...
+** fmls za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z23, svfloat32x4_t,
+ svmls_za32_f32_vg1x4 (w8, z28, z23),
+ svmls_za32_vg1x4 (w8, z28, z23))
+
+/*
+** mls_w8p7_z4_z0:
+** fmls za\.s\[w8, 7, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svfloat32x4_t,
+ svmls_za32_f32_vg1x4 (w8 + 7, z4, z0),
+ svmls_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svfloat32x4_t,
+ svmls_za32_f32_vg1x4 (w8 + 8, z4, z4),
+ svmls_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmls za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svfloat32x4_t,
+ svmls_za32_f32_vg1x4 (w8 - 1, z4, z0),
+ svmls_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x4 (0, z1, z0),
+ svmls_za32_vg1x4 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x4 (w0, z1, z0),
+ svmls_za32_vg1x4 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** fmls za\.s\[w8, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x4 (w8, z1, z0),
+ svmls_za32_vg1x4 (w8, z1, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** fmls za\.s\[w8, 7, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x4 (w8 + 7, z1, z0),
+ svmls_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x4 (w8 + 8, z1, z0),
+ svmls_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmls za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svfloat32x4_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x4 (w0 - 1, z1, z0),
+ svmls_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmls za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svfloat32x4_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x4 (w8, z0, z15),
+ svmls_za32_vg1x4 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmls za\.s\[w8, 0, vgx4\], {z20\.s - z23\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svfloat32x4_t, svfloat32_t,
+ svmls_single_za32_f32_vg1x4 (w8, z20, z16),
+ svmls_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x1.c
new file mode 100644
index 0000000..0c73d04
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsl za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_0_z0_z0, svint16_t,
+ svmls_za32_s16_vg2x1 (0, z0, z0),
+ svmls_za32_vg2x1 (0, z0, z0))
+
+/*
+** mls_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** smlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w0_z0_z3, svint16_t,
+ svmls_za32_s16_vg2x1 (w0, z0, z3),
+ svmls_za32_vg2x1 (w0, z0, z3))
+
+/*
+** mls_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** smlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w7_z0_z3, svint16_t,
+ svmls_za32_s16_vg2x1 (w7, z0, z3),
+ svmls_za32_vg2x1 (w7, z0, z3))
+
+/*
+** mls_w8_z7_z3:
+** smlsl za\.s\[w8, 0:1\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z7_z3, svint16_t,
+ svmls_za32_s16_vg2x1 (w8, z7, z3),
+ svmls_za32_vg2x1 (w8, z7, z3))
+
+/*
+** mls_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** smlsl za\.s\[w8, 0:1\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z31_z16, svint16_t,
+ svmls_za32_s16_vg2x1 (w8, z31, z16),
+ svmls_za32_vg2x1 (w8, z31, z16))
+
+/*
+** mls_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsl za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p1_z0_z0, svint16_t,
+ svmls_za32_s16_vg2x1 (w8 + 1, z0, z0),
+ svmls_za32_vg2x1 (w8 + 1, z0, z0))
+
+/*
+** mls_w8p2_z23_z0:
+** smlsl za\.s\[w8, 2:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p2_z23_z0, svint16_t,
+ svmls_za32_s16_vg2x1 (w8 + 2, z23, z0),
+ svmls_za32_vg2x1 (w8 + 2, z23, z0))
+
+/*
+** mls_w11p6_z23_z0:
+** smlsl za\.s\[w11, 6:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p6_z23_z0, svint16_t,
+ svmls_za32_s16_vg2x1 (w11 + 6, z23, z0),
+ svmls_za32_vg2x1 (w11 + 6, z23, z0))
+
+/*
+** mls_w8p7_z7_z7:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p7_z7_z7, svint16_t,
+ svmls_za32_s16_vg2x1 (w8 + 7, z7, z7),
+ svmls_za32_vg2x1 (w8 + 7, z7, z7))
+
+/*
+** mls_w11p10_z23_z0:
+** smlsl za\.s\[w11, 10:11\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p10_z23_z0, svint16_t,
+ svmls_za32_s16_vg2x1 (w11 + 10, z23, z0),
+ svmls_za32_vg2x1 (w11 + 10, z23, z0))
+
+/*
+** mls_w8p14_z23_z0:
+** smlsl za\.s\[w8, 14:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p14_z23_z0, svint16_t,
+ svmls_za32_s16_vg2x1 (w8 + 14, z23, z0),
+ svmls_za32_vg2x1 (w8 + 14, z23, z0))
+
+/*
+** mls_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** smlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p15_z7_z7, svint16_t,
+ svmls_za32_s16_vg2x1 (w8 + 15, z7, z7),
+ svmls_za32_vg2x1 (w8 + 15, z7, z7))
+
+/*
+** mls_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** smlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p16_z7_z7, svint16_t,
+ svmls_za32_s16_vg2x1 (w8 + 16, z7, z7),
+ svmls_za32_vg2x1 (w8 + 16, z7, z7))
+
+/*
+** mls_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsl za\.s\[\1, 0:1\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8m1_z16_z0, svint16_t,
+ svmls_za32_s16_vg2x1 (w8 - 1, z16, z0),
+ svmls_za32_vg2x1 (w8 - 1, z16, z0))
+
+/*
+** mls_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** smlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w12_z0_z3, svint16_t,
+ svmls_za32_s16_vg2x1 (w12, z0, z3),
+ svmls_za32_vg2x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x2.c
new file mode 100644
index 0000000..7b5d934
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x2.c
@@ -0,0 +1,247 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svint16x2_t,
+ svmls_za32_s16_vg2x2 (0, z0, z0),
+ svmls_za32_vg2x2 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w0, z0, z0),
+ svmls_za32_vg2x2 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** smlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8, z0, z4),
+ svmls_za32_vg2x2 (w8, z0, z4))
+
+/*
+** mls_w8_z4_z18:
+** smlsl za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z18, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8, z4, z18),
+ svmls_za32_vg2x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z23:
+** ...
+** smlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8, z0, z23),
+ svmls_za32_vg2x2 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** smlsl za\.s\[w8, 0:1, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8, z23, z0),
+ svmls_za32_vg2x2 (w8, z23, z0))
+
+/*
+** mls_w8_z18_z28:
+** smlsl za\.s\[w8, 0:1, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z28, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8, z18, z28),
+ svmls_za32_vg2x2 (w8, z18, z28))
+
+/*
+** mls_w8_z28_z4:
+** smlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z4, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8, z28, z4),
+ svmls_za32_vg2x2 (w8, z28, z4))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8 + 1, z4, z0),
+ svmls_za32_vg2x2 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** smlsl za\.s\[w8, 2:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8 + 2, z4, z0),
+ svmls_za32_vg2x2 (w8 + 2, z4, z0))
+
+/*
+** mls_w8p6_z4_z0:
+** smlsl za\.s\[w8, 6:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p6_z4_z0, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8 + 6, z4, z0),
+ svmls_za32_vg2x2 (w8 + 6, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8 + 7, z4, z0),
+ svmls_za32_vg2x2 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8 + 8, z4, z4),
+ svmls_za32_vg2x2 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svint16x2_t,
+ svmls_za32_s16_vg2x2 (w8 - 1, z4, z0),
+ svmls_za32_vg2x2 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za32_s16_vg2x2 (0, z1, z0),
+ svmls_za32_vg2x2 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za32_s16_vg2x2 (w0, z1, z0),
+ svmls_za32_vg2x2 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** smlsl za\.s\[w8, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za32_s16_vg2x2 (w8, z1, z0),
+ svmls_za32_vg2x2 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za32_s16_vg2x2 (w8 + 1, z1, z0),
+ svmls_za32_vg2x2 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** smlsl za\.s\[w8, 4:5, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svint16x2_t, svint16_t,
+ svmls_single_za32_s16_vg2x2 (w8 + 4, z20, z0),
+ svmls_za32_vg2x2 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** smlsl za\.s\[w8, 6:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svint16x2_t, svint16_t,
+ svmls_single_za32_s16_vg2x2 (w8 + 6, z27, z0),
+ svmls_za32_vg2x2 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za32_s16_vg2x2 (w8 + 7, z1, z0),
+ svmls_za32_vg2x2 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za32_s16_vg2x2 (w8 + 8, z1, z0),
+ svmls_za32_vg2x2 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za32_s16_vg2x2 (w0 - 1, z1, z0),
+ svmls_za32_vg2x2 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svint16x2_t, svint16_t,
+ svmls_single_za32_s16_vg2x2 (w8, z0, z15),
+ svmls_za32_vg2x2 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlsl za\.s\[w8, 0:1, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svint16x2_t, svint16_t,
+ svmls_single_za32_s16_vg2x2 (w8, z20, z16),
+ svmls_za32_vg2x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x4.c
new file mode 100644
index 0000000..d026f74
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s16_vg2x4.c
@@ -0,0 +1,258 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svint16x4_t,
+ svmls_za32_s16_vg2x4 (0, z0, z0),
+ svmls_za32_vg2x4 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w0, z0, z0),
+ svmls_za32_vg2x4 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** smlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8, z0, z4),
+ svmls_za32_vg2x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z18:
+** ...
+** smlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z18, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8, z0, z18),
+ svmls_za32_vg2x4 (w8, z0, z18))
+
+/*
+** mls_w8_z18_z0:
+** ...
+** smlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z0, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8, z18, z0),
+ svmls_za32_vg2x4 (w8, z18, z0))
+
+/*
+** mls_w8_z0_z23:
+** ...
+** smlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8, z0, z23),
+ svmls_za32_vg2x4 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** smlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8, z23, z0),
+ svmls_za32_vg2x4 (w8, z23, z0))
+
+/*
+** mls_w8_z4_z28:
+** smlsl za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z28, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8, z4, z28),
+ svmls_za32_vg2x4 (w8, z4, z28))
+
+/*
+** mls_w8_z28_z0:
+** smlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z0, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8, z28, z0),
+ svmls_za32_vg2x4 (w8, z28, z0))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8 + 1, z4, z0),
+ svmls_za32_vg2x4 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** smlsl za\.s\[w8, 2:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8 + 2, z4, z0),
+ svmls_za32_vg2x4 (w8 + 2, z4, z0))
+
+/*
+** mls_w8p6_z4_z0:
+** smlsl za\.s\[w8, 6:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p6_z4_z0, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8 + 6, z4, z0),
+ svmls_za32_vg2x4 (w8 + 6, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8 + 7, z4, z0),
+ svmls_za32_vg2x4 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8 + 8, z4, z4),
+ svmls_za32_vg2x4 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svint16x4_t,
+ svmls_za32_s16_vg2x4 (w8 - 1, z4, z0),
+ svmls_za32_vg2x4 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za32_s16_vg2x4 (0, z1, z0),
+ svmls_za32_vg2x4 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za32_s16_vg2x4 (w0, z1, z0),
+ svmls_za32_vg2x4 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** smlsl za\.s\[w8, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za32_s16_vg2x4 (w8, z1, z0),
+ svmls_za32_vg2x4 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za32_s16_vg2x4 (w8 + 1, z1, z0),
+ svmls_za32_vg2x4 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** smlsl za\.s\[w8, 4:5, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svint16x4_t, svint16_t,
+ svmls_single_za32_s16_vg2x4 (w8 + 4, z20, z0),
+ svmls_za32_vg2x4 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** smlsl za\.s\[w8, 6:7, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svint16x4_t, svint16_t,
+ svmls_single_za32_s16_vg2x4 (w8 + 6, z27, z0),
+ svmls_za32_vg2x4 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za32_s16_vg2x4 (w8 + 7, z1, z0),
+ svmls_za32_vg2x4 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za32_s16_vg2x4 (w8 + 8, z1, z0),
+ svmls_za32_vg2x4 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za32_s16_vg2x4 (w0 - 1, z1, z0),
+ svmls_za32_vg2x4 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svint16x4_t, svint16_t,
+ svmls_single_za32_s16_vg2x4 (w8, z0, z15),
+ svmls_za32_vg2x4 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlsl za\.s\[w8, 0:1, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svint16x4_t, svint16_t,
+ svmls_single_za32_s16_vg2x4 (w8, z20, z16),
+ svmls_za32_vg2x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x1.c
new file mode 100644
index 0000000..0937c49
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x1.c
@@ -0,0 +1,149 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.s\[\1, 0:3\], z0\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_0_z0_z0, svint8_t,
+ svmls_za32_s8_vg4x1 (0, z0, z0),
+ svmls_za32_vg4x1 (0, z0, z0))
+
+/*
+** mls_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w0_z0_z3, svint8_t,
+ svmls_za32_s8_vg4x1 (w0, z0, z3),
+ svmls_za32_vg4x1 (w0, z0, z3))
+
+/*
+** mls_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** smlsll za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w7_z0_z3, svint8_t,
+ svmls_za32_s8_vg4x1 (w7, z0, z3),
+ svmls_za32_vg4x1 (w7, z0, z3))
+
+/*
+** mls_w8_z7_z3:
+** smlsll za\.s\[w8, 0:3\], z7\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z7_z3, svint8_t,
+ svmls_za32_s8_vg4x1 (w8, z7, z3),
+ svmls_za32_vg4x1 (w8, z7, z3))
+
+/*
+** mls_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** smlsll za\.s\[w8, 0:3\], z31\.b. \1\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z31_z16, svint8_t,
+ svmls_za32_s8_vg4x1 (w8, z31, z16),
+ svmls_za32_vg4x1 (w8, z31, z16))
+
+/*
+** mls_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.s\[\1, 0:3\], z0\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8p1_z0_z0, svint8_t,
+ svmls_za32_s8_vg4x1 (w8 + 1, z0, z0),
+ svmls_za32_vg4x1 (w8 + 1, z0, z0))
+
+/*
+** mls_w10p4_z23_z0:
+** smlsll za\.s\[w10, 4:7\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w10p4_z23_z0, svint8_t,
+ svmls_za32_s8_vg4x1 (w10 + 4, z23, z0),
+ svmls_za32_vg4x1 (w10 + 4, z23, z0))
+
+/*
+** mls_w11p6_z23_z0:
+** add (w8|w9|w10|w11), w11, #?6
+** smlsll za\.s\[\1, 0:3\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w11p6_z23_z0, svint8_t,
+ svmls_za32_s8_vg4x1 (w11 + 6, z23, z0),
+ svmls_za32_vg4x1 (w11 + 6, z23, z0))
+
+/*
+** mls_w9p8_z7_z7:
+** smlsll za\.s\[w9, 8:11\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w9p8_z7_z7, svint8_t,
+ svmls_za32_s8_vg4x1 (w9 + 8, z7, z7),
+ svmls_za32_vg4x1 (w9 + 8, z7, z7))
+
+/*
+** mls_w11p12_z23_z0:
+** smlsll za\.s\[w11, 12:15\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w11p12_z23_z0, svint8_t,
+ svmls_za32_s8_vg4x1 (w11 + 12, z23, z0),
+ svmls_za32_vg4x1 (w11 + 12, z23, z0))
+
+/*
+** mls_w8p14_z23_z0:
+** add (w8|w9|w10|w11), w8, #?14
+** smlsll za\.s\[\1, 0:3\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8p14_z23_z0, svint8_t,
+ svmls_za32_s8_vg4x1 (w8 + 14, z23, z0),
+ svmls_za32_vg4x1 (w8 + 14, z23, z0))
+
+/*
+** mls_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** smlsll za\.s\[\1, 0:3\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8p15_z7_z7, svint8_t,
+ svmls_za32_s8_vg4x1 (w8 + 15, z7, z7),
+ svmls_za32_vg4x1 (w8 + 15, z7, z7))
+
+/*
+** mls_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** smlsll za\.s\[\1, 0:3\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8p16_z7_z7, svint8_t,
+ svmls_za32_s8_vg4x1 (w8 + 16, z7, z7),
+ svmls_za32_vg4x1 (w8 + 16, z7, z7))
+
+/*
+** mls_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsll za\.s\[\1, 0:3\], z16\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8m1_z16_z0, svint8_t,
+ svmls_za32_s8_vg4x1 (w8 - 1, z16, z0),
+ svmls_za32_vg4x1 (w8 - 1, z16, z0))
+
+/*
+** mls_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** smlsll za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w12_z0_z3, svint8_t,
+ svmls_za32_s8_vg4x1 (w12, z0, z3),
+ svmls_za32_vg4x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x2.c
new file mode 100644
index 0000000..4dcfac5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x2.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svint8x2_t,
+ svmls_za32_s8_vg4x2 (0, z0, z0),
+ svmls_za32_vg4x2 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w0, z0, z0),
+ svmls_za32_vg4x2 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** smlsll za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w8, z0, z4),
+ svmls_za32_vg4x2 (w8, z0, z4))
+
+/*
+** mls_w8_z4_z18:
+** smlsll za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z18, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w8, z4, z18),
+ svmls_za32_vg4x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z23:
+** ...
+** smlsll za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w8, z0, z23),
+ svmls_za32_vg4x2 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** smlsll za\.s\[w8, 0:3, vgx2\], [^\n]+, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w8, z23, z0),
+ svmls_za32_vg4x2 (w8, z23, z0))
+
+/*
+** mls_w8_z18_z28:
+** smlsll za\.s\[w8, 0:3, vgx2\], {z18\.b - z19\.b}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z28, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w8, z18, z28),
+ svmls_za32_vg4x2 (w8, z18, z28))
+
+/*
+** mls_w8_z28_z4:
+** smlsll za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z4, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w8, z28, z4),
+ svmls_za32_vg4x2 (w8, z28, z4))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w8 + 1, z4, z0),
+ svmls_za32_vg4x2 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlsll za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w8 + 2, z4, z0),
+ svmls_za32_vg4x2 (w8 + 2, z4, z0))
+
+/*
+** mls_w11p4_z4_z0:
+** smlsll za\.s\[w11, 4:7, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w11p4_z4_z0, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w11 + 4, z4, z0),
+ svmls_za32_vg4x2 (w11 + 4, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w8 + 7, z4, z0),
+ svmls_za32_vg4x2 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w8 + 8, z4, z4),
+ svmls_za32_vg4x2 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsll za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svint8x2_t,
+ svmls_za32_s8_vg4x2 (w8 - 1, z4, z0),
+ svmls_za32_vg4x2 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svint8x2_t, svint8_t,
+ svmls_single_za32_s8_vg4x2 (0, z1, z0),
+ svmls_za32_vg4x2 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svint8x2_t, svint8_t,
+ svmls_single_za32_s8_vg4x2 (w0, z1, z0),
+ svmls_za32_vg4x2 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** smlsll za\.s\[w8, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svint8x2_t, svint8_t,
+ svmls_single_za32_s8_vg4x2 (w8, z1, z0),
+ svmls_za32_vg4x2 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svint8x2_t, svint8_t,
+ svmls_single_za32_s8_vg4x2 (w8 + 1, z1, z0),
+ svmls_za32_vg4x2 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p2_z20_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlsll za\.s\[\1, 0:3, vgx2\], {z20\.b - z21\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p2_z20_z0, svint8x2_t, svint8_t,
+ svmls_single_za32_s8_vg4x2 (w8 + 2, z20, z0),
+ svmls_za32_vg4x2 (w8 + 2, z20, z0))
+
+/*
+** mls_single_w11p4_z27_z0:
+** smlsll za\.s\[w11, 4:7, vgx2\], {z27\.b - z28\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w11p4_z27_z0, svint8x2_t, svint8_t,
+ svmls_single_za32_s8_vg4x2 (w11 + 4, z27, z0),
+ svmls_za32_vg4x2 (w11 + 4, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svint8x2_t, svint8_t,
+ svmls_single_za32_s8_vg4x2 (w8 + 7, z1, z0),
+ svmls_za32_vg4x2 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svint8x2_t, svint8_t,
+ svmls_single_za32_s8_vg4x2 (w8 + 8, z1, z0),
+ svmls_za32_vg4x2 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svint8x2_t, svint8_t,
+ svmls_single_za32_s8_vg4x2 (w0 - 1, z1, z0),
+ svmls_za32_vg4x2 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlsll za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svint8x2_t, svint8_t,
+ svmls_single_za32_s8_vg4x2 (w8, z0, z15),
+ svmls_za32_vg4x2 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlsll za\.s\[w8, 0:3, vgx2\], {z20\.b - z21\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svint8x2_t, svint8_t,
+ svmls_single_za32_s8_vg4x2 (w8, z20, z16),
+ svmls_za32_vg4x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x4.c
new file mode 100644
index 0000000..86e6627
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_s8_vg4x4.c
@@ -0,0 +1,260 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svint8x4_t,
+ svmls_za32_s8_vg4x4 (0, z0, z0),
+ svmls_za32_vg4x4 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w0, z0, z0),
+ svmls_za32_vg4x4 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** smlsll za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8, z0, z4),
+ svmls_za32_vg4x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z18:
+** ...
+** smlsll za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z18, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8, z0, z18),
+ svmls_za32_vg4x4 (w8, z0, z18))
+
+/*
+** mls_w8_z18_z0:
+** ...
+** smlsll za\.s\[w8, 0:3, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z0, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8, z18, z0),
+ svmls_za32_vg4x4 (w8, z18, z0))
+
+/*
+** mls_w8_z0_z23:
+** ...
+** smlsll za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8, z0, z23),
+ svmls_za32_vg4x4 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** smlsll za\.s\[w8, 0:3, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8, z23, z0),
+ svmls_za32_vg4x4 (w8, z23, z0))
+
+/*
+** mls_w8_z4_z28:
+** smlsll za\.s\[w8, 0:3, vgx4\], {z4\.b - z7\.b}, {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z28, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8, z4, z28),
+ svmls_za32_vg4x4 (w8, z4, z28))
+
+/*
+** mls_w8_z28_z0:
+** smlsll za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z0, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8, z28, z0),
+ svmls_za32_vg4x4 (w8, z28, z0))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8 + 1, z4, z0),
+ svmls_za32_vg4x4 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlsll za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8 + 2, z4, z0),
+ svmls_za32_vg4x4 (w8 + 2, z4, z0))
+
+/*
+** mls_w11p4_z4_z0:
+** smlsll za\.s\[w11, 4:7, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w11p4_z4_z0, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w11 + 4, z4, z0),
+ svmls_za32_vg4x4 (w11 + 4, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8 + 7, z4, z0),
+ svmls_za32_vg4x4 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8 + 8, z4, z4),
+ svmls_za32_vg4x4 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsll za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svint8x4_t,
+ svmls_za32_s8_vg4x4 (w8 - 1, z4, z0),
+ svmls_za32_vg4x4 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svint8x4_t, svint8_t,
+ svmls_single_za32_s8_vg4x4 (0, z1, z0),
+ svmls_za32_vg4x4 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svint8x4_t, svint8_t,
+ svmls_single_za32_s8_vg4x4 (w0, z1, z0),
+ svmls_za32_vg4x4 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** smlsll za\.s\[w8, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svint8x4_t, svint8_t,
+ svmls_single_za32_s8_vg4x4 (w8, z1, z0),
+ svmls_za32_vg4x4 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svint8x4_t, svint8_t,
+ svmls_single_za32_s8_vg4x4 (w8 + 1, z1, z0),
+ svmls_za32_vg4x4 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** smlsll za\.s\[w8, 4:7, vgx4\], {z20\.b - z23\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svint8x4_t, svint8_t,
+ svmls_single_za32_s8_vg4x4 (w8 + 4, z20, z0),
+ svmls_za32_vg4x4 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** add (w8|w9|w10|w11), w8, #?6
+** smlsll za\.s\[\1, 0:3, vgx4\], {z27\.b - z30\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svint8x4_t, svint8_t,
+ svmls_single_za32_s8_vg4x4 (w8 + 6, z27, z0),
+ svmls_za32_vg4x4 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svint8x4_t, svint8_t,
+ svmls_single_za32_s8_vg4x4 (w8 + 7, z1, z0),
+ svmls_za32_vg4x4 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svint8x4_t, svint8_t,
+ svmls_single_za32_s8_vg4x4 (w8 + 8, z1, z0),
+ svmls_za32_vg4x4 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svint8x4_t, svint8_t,
+ svmls_single_za32_s8_vg4x4 (w0 - 1, z1, z0),
+ svmls_za32_vg4x4 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlsll za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svint8x4_t, svint8_t,
+ svmls_single_za32_s8_vg4x4 (w8, z0, z15),
+ svmls_za32_vg4x4 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlsll za\.s\[w8, 0:3, vgx4\], {z20\.b - z23\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svint8x4_t, svint8_t,
+ svmls_single_za32_s8_vg4x4 (w8, z20, z16),
+ svmls_za32_vg4x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x1.c
new file mode 100644
index 0000000..fcc7057b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x1.c
@@ -0,0 +1,148 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsl za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_0_z0_z0, svuint16_t,
+ svmls_za32_u16_vg2x1 (0, z0, z0),
+ svmls_za32_vg2x1 (0, z0, z0))
+
+/*
+** mls_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** umlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w0_z0_z3, svuint16_t,
+ svmls_za32_u16_vg2x1 (w0, z0, z3),
+ svmls_za32_vg2x1 (w0, z0, z3))
+
+/*
+** mls_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** umlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w7_z0_z3, svuint16_t,
+ svmls_za32_u16_vg2x1 (w7, z0, z3),
+ svmls_za32_vg2x1 (w7, z0, z3))
+
+/*
+** mls_w8_z7_z3:
+** umlsl za\.s\[w8, 0:1\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z7_z3, svuint16_t,
+ svmls_za32_u16_vg2x1 (w8, z7, z3),
+ svmls_za32_vg2x1 (w8, z7, z3))
+
+/*
+** mls_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** umlsl za\.s\[w8, 0:1\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z31_z16, svuint16_t,
+ svmls_za32_u16_vg2x1 (w8, z31, z16),
+ svmls_za32_vg2x1 (w8, z31, z16))
+
+/*
+** mls_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsl za\.s\[\1, 0:1\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p1_z0_z0, svuint16_t,
+ svmls_za32_u16_vg2x1 (w8 + 1, z0, z0),
+ svmls_za32_vg2x1 (w8 + 1, z0, z0))
+
+/*
+** mls_w8p2_z23_z0:
+** umlsl za\.s\[w8, 2:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p2_z23_z0, svuint16_t,
+ svmls_za32_u16_vg2x1 (w8 + 2, z23, z0),
+ svmls_za32_vg2x1 (w8 + 2, z23, z0))
+
+/*
+** mls_w11p6_z23_z0:
+** umlsl za\.s\[w11, 6:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p6_z23_z0, svuint16_t,
+ svmls_za32_u16_vg2x1 (w11 + 6, z23, z0),
+ svmls_za32_vg2x1 (w11 + 6, z23, z0))
+
+/*
+** mls_w8p7_z7_z7:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p7_z7_z7, svuint16_t,
+ svmls_za32_u16_vg2x1 (w8 + 7, z7, z7),
+ svmls_za32_vg2x1 (w8 + 7, z7, z7))
+
+/*
+** mls_w11p10_z23_z0:
+** umlsl za\.s\[w11, 10:11\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p10_z23_z0, svuint16_t,
+ svmls_za32_u16_vg2x1 (w11 + 10, z23, z0),
+ svmls_za32_vg2x1 (w11 + 10, z23, z0))
+
+/*
+** mls_w8p14_z23_z0:
+** umlsl za\.s\[w8, 14:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p14_z23_z0, svuint16_t,
+ svmls_za32_u16_vg2x1 (w8 + 14, z23, z0),
+ svmls_za32_vg2x1 (w8 + 14, z23, z0))
+
+/*
+** mls_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** umlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p15_z7_z7, svuint16_t,
+ svmls_za32_u16_vg2x1 (w8 + 15, z7, z7),
+ svmls_za32_vg2x1 (w8 + 15, z7, z7))
+
+/*
+** mls_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** umlsl za\.s\[\1, 0:1\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p16_z7_z7, svuint16_t,
+ svmls_za32_u16_vg2x1 (w8 + 16, z7, z7),
+ svmls_za32_vg2x1 (w8 + 16, z7, z7))
+
+/*
+** mls_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsl za\.s\[\1, 0:1\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8m1_z16_z0, svuint16_t,
+ svmls_za32_u16_vg2x1 (w8 - 1, z16, z0),
+ svmls_za32_vg2x1 (w8 - 1, z16, z0))
+
+/*
+** mls_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** umlsl za\.s\[\1, 0:1\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w12_z0_z3, svuint16_t,
+ svmls_za32_u16_vg2x1 (w12, z0, z3),
+ svmls_za32_vg2x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x2.c
new file mode 100644
index 0000000..ae283d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x2.c
@@ -0,0 +1,247 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (0, z0, z0),
+ svmls_za32_vg2x2 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsl za\.s\[\1, 0:1, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w0, z0, z0),
+ svmls_za32_vg2x2 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** umlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8, z0, z4),
+ svmls_za32_vg2x2 (w8, z0, z4))
+
+/*
+** mls_w8_z4_z18:
+** umlsl za\.s\[w8, 0:1, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z18, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8, z4, z18),
+ svmls_za32_vg2x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z23:
+** ...
+** umlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8, z0, z23),
+ svmls_za32_vg2x2 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** umlsl za\.s\[w8, 0:1, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8, z23, z0),
+ svmls_za32_vg2x2 (w8, z23, z0))
+
+/*
+** mls_w8_z18_z28:
+** umlsl za\.s\[w8, 0:1, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z28, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8, z18, z28),
+ svmls_za32_vg2x2 (w8, z18, z28))
+
+/*
+** mls_w8_z28_z4:
+** umlsl za\.s\[w8, 0:1, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z4, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8, z28, z4),
+ svmls_za32_vg2x2 (w8, z28, z4))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8 + 1, z4, z0),
+ svmls_za32_vg2x2 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** umlsl za\.s\[w8, 2:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8 + 2, z4, z0),
+ svmls_za32_vg2x2 (w8 + 2, z4, z0))
+
+/*
+** mls_w8p6_z4_z0:
+** umlsl za\.s\[w8, 6:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p6_z4_z0, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8 + 6, z4, z0),
+ svmls_za32_vg2x2 (w8 + 6, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8 + 7, z4, z0),
+ svmls_za32_vg2x2 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8 + 8, z4, z4),
+ svmls_za32_vg2x2 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsl za\.s\[\1, 0:1, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svuint16x2_t,
+ svmls_za32_u16_vg2x2 (w8 - 1, z4, z0),
+ svmls_za32_vg2x2 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za32_u16_vg2x2 (0, z1, z0),
+ svmls_za32_vg2x2 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za32_u16_vg2x2 (w0, z1, z0),
+ svmls_za32_vg2x2 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** umlsl za\.s\[w8, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za32_u16_vg2x2 (w8, z1, z0),
+ svmls_za32_vg2x2 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za32_u16_vg2x2 (w8 + 1, z1, z0),
+ svmls_za32_vg2x2 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** umlsl za\.s\[w8, 4:5, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za32_u16_vg2x2 (w8 + 4, z20, z0),
+ svmls_za32_vg2x2 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** umlsl za\.s\[w8, 6:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za32_u16_vg2x2 (w8 + 6, z27, z0),
+ svmls_za32_vg2x2 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za32_u16_vg2x2 (w8 + 7, z1, z0),
+ svmls_za32_vg2x2 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za32_u16_vg2x2 (w8 + 8, z1, z0),
+ svmls_za32_vg2x2 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsl za\.s\[\1, 0:1, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za32_u16_vg2x2 (w0 - 1, z1, z0),
+ svmls_za32_vg2x2 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlsl za\.s\[w8, 0:1, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svuint16x2_t, svuint16_t,
+ svmls_single_za32_u16_vg2x2 (w8, z0, z15),
+ svmls_za32_vg2x2 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlsl za\.s\[w8, 0:1, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svuint16x2_t, svuint16_t,
+ svmls_single_za32_u16_vg2x2 (w8, z20, z16),
+ svmls_za32_vg2x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x4.c
new file mode 100644
index 0000000..813e0d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u16_vg2x4.c
@@ -0,0 +1,258 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (0, z0, z0),
+ svmls_za32_vg2x4 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsl za\.s\[\1, 0:1, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w0, z0, z0),
+ svmls_za32_vg2x4 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** umlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8, z0, z4),
+ svmls_za32_vg2x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z18:
+** ...
+** umlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z18, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8, z0, z18),
+ svmls_za32_vg2x4 (w8, z0, z18))
+
+/*
+** mls_w8_z18_z0:
+** ...
+** umlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z0, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8, z18, z0),
+ svmls_za32_vg2x4 (w8, z18, z0))
+
+/*
+** mls_w8_z0_z23:
+** ...
+** umlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8, z0, z23),
+ svmls_za32_vg2x4 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** umlsl za\.s\[w8, 0:1, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8, z23, z0),
+ svmls_za32_vg2x4 (w8, z23, z0))
+
+/*
+** mls_w8_z4_z28:
+** umlsl za\.s\[w8, 0:1, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z28, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8, z4, z28),
+ svmls_za32_vg2x4 (w8, z4, z28))
+
+/*
+** mls_w8_z28_z0:
+** umlsl za\.s\[w8, 0:1, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z0, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8, z28, z0),
+ svmls_za32_vg2x4 (w8, z28, z0))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8 + 1, z4, z0),
+ svmls_za32_vg2x4 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** umlsl za\.s\[w8, 2:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8 + 2, z4, z0),
+ svmls_za32_vg2x4 (w8 + 2, z4, z0))
+
+/*
+** mls_w8p6_z4_z0:
+** umlsl za\.s\[w8, 6:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p6_z4_z0, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8 + 6, z4, z0),
+ svmls_za32_vg2x4 (w8 + 6, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8 + 7, z4, z0),
+ svmls_za32_vg2x4 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8 + 8, z4, z4),
+ svmls_za32_vg2x4 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsl za\.s\[\1, 0:1, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svuint16x4_t,
+ svmls_za32_u16_vg2x4 (w8 - 1, z4, z0),
+ svmls_za32_vg2x4 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za32_u16_vg2x4 (0, z1, z0),
+ svmls_za32_vg2x4 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za32_u16_vg2x4 (w0, z1, z0),
+ svmls_za32_vg2x4 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** umlsl za\.s\[w8, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za32_u16_vg2x4 (w8, z1, z0),
+ svmls_za32_vg2x4 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za32_u16_vg2x4 (w8 + 1, z1, z0),
+ svmls_za32_vg2x4 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** umlsl za\.s\[w8, 4:5, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za32_u16_vg2x4 (w8 + 4, z20, z0),
+ svmls_za32_vg2x4 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** umlsl za\.s\[w8, 6:7, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za32_u16_vg2x4 (w8 + 6, z27, z0),
+ svmls_za32_vg2x4 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za32_u16_vg2x4 (w8 + 7, z1, z0),
+ svmls_za32_vg2x4 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za32_u16_vg2x4 (w8 + 8, z1, z0),
+ svmls_za32_vg2x4 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsl za\.s\[\1, 0:1, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za32_u16_vg2x4 (w0 - 1, z1, z0),
+ svmls_za32_vg2x4 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlsl za\.s\[w8, 0:1, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svuint16x4_t, svuint16_t,
+ svmls_single_za32_u16_vg2x4 (w8, z0, z15),
+ svmls_za32_vg2x4 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlsl za\.s\[w8, 0:1, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svuint16x4_t, svuint16_t,
+ svmls_single_za32_u16_vg2x4 (w8, z20, z16),
+ svmls_za32_vg2x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x1.c
new file mode 100644
index 0000000..064bbe3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x1.c
@@ -0,0 +1,149 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.s\[\1, 0:3\], z0\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_0_z0_z0, svuint8_t,
+ svmls_za32_u8_vg4x1 (0, z0, z0),
+ svmls_za32_vg4x1 (0, z0, z0))
+
+/*
+** mls_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w0_z0_z3, svuint8_t,
+ svmls_za32_u8_vg4x1 (w0, z0, z3),
+ svmls_za32_vg4x1 (w0, z0, z3))
+
+/*
+** mls_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** umlsll za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w7_z0_z3, svuint8_t,
+ svmls_za32_u8_vg4x1 (w7, z0, z3),
+ svmls_za32_vg4x1 (w7, z0, z3))
+
+/*
+** mls_w8_z7_z3:
+** umlsll za\.s\[w8, 0:3\], z7\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z7_z3, svuint8_t,
+ svmls_za32_u8_vg4x1 (w8, z7, z3),
+ svmls_za32_vg4x1 (w8, z7, z3))
+
+/*
+** mls_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** umlsll za\.s\[w8, 0:3\], z31\.b. \1\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z31_z16, svuint8_t,
+ svmls_za32_u8_vg4x1 (w8, z31, z16),
+ svmls_za32_vg4x1 (w8, z31, z16))
+
+/*
+** mls_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.s\[\1, 0:3\], z0\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8p1_z0_z0, svuint8_t,
+ svmls_za32_u8_vg4x1 (w8 + 1, z0, z0),
+ svmls_za32_vg4x1 (w8 + 1, z0, z0))
+
+/*
+** mls_w10p4_z23_z0:
+** umlsll za\.s\[w10, 4:7\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w10p4_z23_z0, svuint8_t,
+ svmls_za32_u8_vg4x1 (w10 + 4, z23, z0),
+ svmls_za32_vg4x1 (w10 + 4, z23, z0))
+
+/*
+** mls_w11p6_z23_z0:
+** add (w8|w9|w10|w11), w11, #?6
+** umlsll za\.s\[\1, 0:3\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w11p6_z23_z0, svuint8_t,
+ svmls_za32_u8_vg4x1 (w11 + 6, z23, z0),
+ svmls_za32_vg4x1 (w11 + 6, z23, z0))
+
+/*
+** mls_w9p8_z7_z7:
+** umlsll za\.s\[w9, 8:11\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w9p8_z7_z7, svuint8_t,
+ svmls_za32_u8_vg4x1 (w9 + 8, z7, z7),
+ svmls_za32_vg4x1 (w9 + 8, z7, z7))
+
+/*
+** mls_w11p12_z23_z0:
+** umlsll za\.s\[w11, 12:15\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w11p12_z23_z0, svuint8_t,
+ svmls_za32_u8_vg4x1 (w11 + 12, z23, z0),
+ svmls_za32_vg4x1 (w11 + 12, z23, z0))
+
+/*
+** mls_w8p14_z23_z0:
+** add (w8|w9|w10|w11), w8, #?14
+** umlsll za\.s\[\1, 0:3\], z23\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8p14_z23_z0, svuint8_t,
+ svmls_za32_u8_vg4x1 (w8 + 14, z23, z0),
+ svmls_za32_vg4x1 (w8 + 14, z23, z0))
+
+/*
+** mls_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** umlsll za\.s\[\1, 0:3\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8p15_z7_z7, svuint8_t,
+ svmls_za32_u8_vg4x1 (w8 + 15, z7, z7),
+ svmls_za32_vg4x1 (w8 + 15, z7, z7))
+
+/*
+** mls_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** umlsll za\.s\[\1, 0:3\], z7\.b, z7\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8p16_z7_z7, svuint8_t,
+ svmls_za32_u8_vg4x1 (w8 + 16, z7, z7),
+ svmls_za32_vg4x1 (w8 + 16, z7, z7))
+
+/*
+** mls_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsll za\.s\[\1, 0:3\], z16\.b, z0\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w8m1_z16_z0, svuint8_t,
+ svmls_za32_u8_vg4x1 (w8 - 1, z16, z0),
+ svmls_za32_vg4x1 (w8 - 1, z16, z0))
+
+/*
+** mls_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** umlsll za\.s\[\1, 0:3\], z0\.b, z3\.b
+** ret
+*/
+TEST_ZA_X1 (mls_w12_z0_z3, svuint8_t,
+ svmls_za32_u8_vg4x1 (w12, z0, z3),
+ svmls_za32_vg4x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x2.c
new file mode 100644
index 0000000..652c5be
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x2.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (0, z0, z0),
+ svmls_za32_vg4x2 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.s\[\1, 0:3, vgx2\], {z0\.b - z1\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w0, z0, z0),
+ svmls_za32_vg4x2 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** umlsll za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w8, z0, z4),
+ svmls_za32_vg4x2 (w8, z0, z4))
+
+/*
+** mls_w8_z4_z18:
+** umlsll za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z18, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w8, z4, z18),
+ svmls_za32_vg4x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z23:
+** ...
+** umlsll za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w8, z0, z23),
+ svmls_za32_vg4x2 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** umlsll za\.s\[w8, 0:3, vgx2\], [^\n]+, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w8, z23, z0),
+ svmls_za32_vg4x2 (w8, z23, z0))
+
+/*
+** mls_w8_z18_z28:
+** umlsll za\.s\[w8, 0:3, vgx2\], {z18\.b - z19\.b}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z28, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w8, z18, z28),
+ svmls_za32_vg4x2 (w8, z18, z28))
+
+/*
+** mls_w8_z28_z4:
+** umlsll za\.s\[w8, 0:3, vgx2\], {z28\.b - z29\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z4, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w8, z28, z4),
+ svmls_za32_vg4x2 (w8, z28, z4))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w8 + 1, z4, z0),
+ svmls_za32_vg4x2 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlsll za\.s\[w8, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w8 + 2, z4, z0),
+ svmls_za32_vg4x2 (w8 + 2, z4, z0))
+
+/*
+** mls_w11p4_z4_z0:
+** umlsll za\.s\[w11, 4:7, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w11p4_z4_z0, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w11 + 4, z4, z0),
+ svmls_za32_vg4x2 (w11 + 4, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w8 + 7, z4, z0),
+ svmls_za32_vg4x2 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w8 + 8, z4, z4),
+ svmls_za32_vg4x2 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsll za\.s\[\1, 0:3, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svuint8x2_t,
+ svmls_za32_u8_vg4x2 (w8 - 1, z4, z0),
+ svmls_za32_vg4x2 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svuint8x2_t, svuint8_t,
+ svmls_single_za32_u8_vg4x2 (0, z1, z0),
+ svmls_za32_vg4x2 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svuint8x2_t, svuint8_t,
+ svmls_single_za32_u8_vg4x2 (w0, z1, z0),
+ svmls_za32_vg4x2 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** umlsll za\.s\[w8, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svuint8x2_t, svuint8_t,
+ svmls_single_za32_u8_vg4x2 (w8, z1, z0),
+ svmls_za32_vg4x2 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svuint8x2_t, svuint8_t,
+ svmls_single_za32_u8_vg4x2 (w8 + 1, z1, z0),
+ svmls_za32_vg4x2 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p2_z20_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlsll za\.s\[\1, 0:3, vgx2\], {z20\.b - z21\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p2_z20_z0, svuint8x2_t, svuint8_t,
+ svmls_single_za32_u8_vg4x2 (w8 + 2, z20, z0),
+ svmls_za32_vg4x2 (w8 + 2, z20, z0))
+
+/*
+** mls_single_w11p4_z27_z0:
+** umlsll za\.s\[w11, 4:7, vgx2\], {z27\.b - z28\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w11p4_z27_z0, svuint8x2_t, svuint8_t,
+ svmls_single_za32_u8_vg4x2 (w11 + 4, z27, z0),
+ svmls_za32_vg4x2 (w11 + 4, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svuint8x2_t, svuint8_t,
+ svmls_single_za32_u8_vg4x2 (w8 + 7, z1, z0),
+ svmls_za32_vg4x2 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svuint8x2_t, svuint8_t,
+ svmls_single_za32_u8_vg4x2 (w8 + 8, z1, z0),
+ svmls_za32_vg4x2 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsll za\.s\[\1, 0:3, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svuint8x2_t, svuint8_t,
+ svmls_single_za32_u8_vg4x2 (w0 - 1, z1, z0),
+ svmls_za32_vg4x2 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlsll za\.s\[w8, 0:3, vgx2\], {z0\.b - z1\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svuint8x2_t, svuint8_t,
+ svmls_single_za32_u8_vg4x2 (w8, z0, z15),
+ svmls_za32_vg4x2 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlsll za\.s\[w8, 0:3, vgx2\], {z20\.b - z21\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svuint8x2_t, svuint8_t,
+ svmls_single_za32_u8_vg4x2 (w8, z20, z16),
+ svmls_za32_vg4x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x4.c
new file mode 100644
index 0000000..94e0556
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za32_u8_vg4x4.c
@@ -0,0 +1,260 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (0, z0, z0),
+ svmls_za32_vg4x4 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.s\[\1, 0:3, vgx4\], {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w0, z0, z0),
+ svmls_za32_vg4x4 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** umlsll za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8, z0, z4),
+ svmls_za32_vg4x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z18:
+** ...
+** umlsll za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z18, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8, z0, z18),
+ svmls_za32_vg4x4 (w8, z0, z18))
+
+/*
+** mls_w8_z18_z0:
+** ...
+** umlsll za\.s\[w8, 0:3, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z0, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8, z18, z0),
+ svmls_za32_vg4x4 (w8, z18, z0))
+
+/*
+** mls_w8_z0_z23:
+** ...
+** umlsll za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8, z0, z23),
+ svmls_za32_vg4x4 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** umlsll za\.s\[w8, 0:3, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8, z23, z0),
+ svmls_za32_vg4x4 (w8, z23, z0))
+
+/*
+** mls_w8_z4_z28:
+** umlsll za\.s\[w8, 0:3, vgx4\], {z4\.b - z7\.b}, {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z28, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8, z4, z28),
+ svmls_za32_vg4x4 (w8, z4, z28))
+
+/*
+** mls_w8_z28_z0:
+** umlsll za\.s\[w8, 0:3, vgx4\], {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z0, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8, z28, z0),
+ svmls_za32_vg4x4 (w8, z28, z0))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8 + 1, z4, z0),
+ svmls_za32_vg4x4 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlsll za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8 + 2, z4, z0),
+ svmls_za32_vg4x4 (w8 + 2, z4, z0))
+
+/*
+** mls_w11p4_z4_z0:
+** umlsll za\.s\[w11, 4:7, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w11p4_z4_z0, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w11 + 4, z4, z0),
+ svmls_za32_vg4x4 (w11 + 4, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8 + 7, z4, z0),
+ svmls_za32_vg4x4 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8 + 8, z4, z4),
+ svmls_za32_vg4x4 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsll za\.s\[\1, 0:3, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svuint8x4_t,
+ svmls_za32_u8_vg4x4 (w8 - 1, z4, z0),
+ svmls_za32_vg4x4 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svuint8x4_t, svuint8_t,
+ svmls_single_za32_u8_vg4x4 (0, z1, z0),
+ svmls_za32_vg4x4 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svuint8x4_t, svuint8_t,
+ svmls_single_za32_u8_vg4x4 (w0, z1, z0),
+ svmls_za32_vg4x4 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** umlsll za\.s\[w8, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svuint8x4_t, svuint8_t,
+ svmls_single_za32_u8_vg4x4 (w8, z1, z0),
+ svmls_za32_vg4x4 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svuint8x4_t, svuint8_t,
+ svmls_single_za32_u8_vg4x4 (w8 + 1, z1, z0),
+ svmls_za32_vg4x4 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** umlsll za\.s\[w8, 4:7, vgx4\], {z20\.b - z23\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svuint8x4_t, svuint8_t,
+ svmls_single_za32_u8_vg4x4 (w8 + 4, z20, z0),
+ svmls_za32_vg4x4 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** add (w8|w9|w10|w11), w8, #?6
+** umlsll za\.s\[\1, 0:3, vgx4\], {z27\.b - z30\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svuint8x4_t, svuint8_t,
+ svmls_single_za32_u8_vg4x4 (w8 + 6, z27, z0),
+ svmls_za32_vg4x4 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svuint8x4_t, svuint8_t,
+ svmls_single_za32_u8_vg4x4 (w8 + 7, z1, z0),
+ svmls_za32_vg4x4 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svuint8x4_t, svuint8_t,
+ svmls_single_za32_u8_vg4x4 (w8 + 8, z1, z0),
+ svmls_za32_vg4x4 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsll za\.s\[\1, 0:3, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svuint8x4_t, svuint8_t,
+ svmls_single_za32_u8_vg4x4 (w0 - 1, z1, z0),
+ svmls_za32_vg4x4 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlsll za\.s\[w8, 0:3, vgx4\], {z0\.b - z3\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svuint8x4_t, svuint8_t,
+ svmls_single_za32_u8_vg4x4 (w8, z0, z15),
+ svmls_za32_vg4x4 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlsll za\.s\[w8, 0:3, vgx4\], {z20\.b - z23\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svuint8x4_t, svuint8_t,
+ svmls_single_za32_u8_vg4x4 (w8, z20, z16),
+ svmls_za32_vg4x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_f64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_f64_vg1x2.c
new file mode 100644
index 0000000..e39b281
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_f64_vg1x2.c
@@ -0,0 +1,182 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svfloat64x2_t,
+ svmls_za64_f64_vg1x2 (0, z0, z0),
+ svmls_za64_vg1x2 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svfloat64x2_t,
+ svmls_za64_f64_vg1x2 (w0, z0, z0),
+ svmls_za64_vg1x2 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** fmls za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svfloat64x2_t,
+ svmls_za64_f64_vg1x2 (w8, z0, z4),
+ svmls_za64_vg1x2 (w8, z0, z4))
+
+/*
+** mls_w8_z4_z18:
+** fmls za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z18, svfloat64x2_t,
+ svmls_za64_f64_vg1x2 (w8, z4, z18),
+ svmls_za64_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z23_z0:
+** ...
+** fmls za\.d\[w8, 0, vgx2\], [^\n]+, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svfloat64x2_t,
+ svmls_za64_f64_vg1x2 (w8, z23, z0),
+ svmls_za64_vg1x2 (w8, z23, z0))
+
+/*
+** mls_w8_z18_z23:
+** ...
+** fmls za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z23, svfloat64x2_t,
+ svmls_za64_f64_vg1x2 (w8, z18, z23),
+ svmls_za64_vg1x2 (w8, z18, z23))
+
+/*
+** mls_w8_z4_z28:
+** fmls za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z28, svfloat64x2_t,
+ svmls_za64_f64_vg1x2 (w8, z4, z28),
+ svmls_za64_vg1x2 (w8, z4, z28))
+
+/*
+** mls_w8p7_z4_z0:
+** fmls za\.d\[w8, 7, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svfloat64x2_t,
+ svmls_za64_f64_vg1x2 (w8 + 7, z4, z0),
+ svmls_za64_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svfloat64x2_t,
+ svmls_za64_f64_vg1x2 (w8 + 8, z4, z4),
+ svmls_za64_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmls za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svfloat64x2_t,
+ svmls_za64_f64_vg1x2 (w8 - 1, z4, z0),
+ svmls_za64_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x2 (0, z1, z0),
+ svmls_za64_vg1x2 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x2 (w0, z1, z0),
+ svmls_za64_vg1x2 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** fmls za\.d\[w8, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x2 (w8, z1, z0),
+ svmls_za64_vg1x2 (w8, z1, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** fmls za\.d\[w8, 7, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x2 (w8 + 7, z1, z0),
+ svmls_za64_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x2 (w8 + 8, z1, z0),
+ svmls_za64_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmls za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svfloat64x2_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x2 (w0 - 1, z1, z0),
+ svmls_za64_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmls za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svfloat64x2_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x2 (w8, z0, z15),
+ svmls_za64_vg1x2 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmls za\.d\[w8, 0, vgx2\], {z20\.d - z21\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svfloat64x2_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x2 (w8, z20, z16),
+ svmls_za64_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_f64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_f64_vg1x4.c
new file mode 100644
index 0000000..0858f1f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_f64_vg1x4.c
@@ -0,0 +1,174 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svfloat64x4_t,
+ svmls_za64_f64_vg1x4 (0, z0, z0),
+ svmls_za64_vg1x4 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svfloat64x4_t,
+ svmls_za64_f64_vg1x4 (w0, z0, z0),
+ svmls_za64_vg1x4 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** fmls za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svfloat64x4_t,
+ svmls_za64_f64_vg1x4 (w8, z0, z4),
+ svmls_za64_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z18:
+** ...
+** fmls za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z18, svfloat64x4_t,
+ svmls_za64_f64_vg1x4 (w8, z0, z18),
+ svmls_za64_vg1x4 (w8, z0, z18))
+
+/*
+** mls_w8_z18_z28:
+** ...
+** fmls za\.d\[w8, 0, vgx4\], [^\n]+, {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z28, svfloat64x4_t,
+ svmls_za64_f64_vg1x4 (w8, z18, z28),
+ svmls_za64_vg1x4 (w8, z18, z28))
+
+/*
+** mls_w8_z28_z23:
+** ...
+** fmls za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z23, svfloat64x4_t,
+ svmls_za64_f64_vg1x4 (w8, z28, z23),
+ svmls_za64_vg1x4 (w8, z28, z23))
+
+/*
+** mls_w8p7_z4_z0:
+** fmls za\.d\[w8, 7, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svfloat64x4_t,
+ svmls_za64_f64_vg1x4 (w8 + 7, z4, z0),
+ svmls_za64_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svfloat64x4_t,
+ svmls_za64_f64_vg1x4 (w8 + 8, z4, z4),
+ svmls_za64_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fmls za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svfloat64x4_t,
+ svmls_za64_f64_vg1x4 (w8 - 1, z4, z0),
+ svmls_za64_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** fmls za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x4 (0, z1, z0),
+ svmls_za64_vg1x4 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** fmls za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x4 (w0, z1, z0),
+ svmls_za64_vg1x4 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** fmls za\.d\[w8, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x4 (w8, z1, z0),
+ svmls_za64_vg1x4 (w8, z1, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** fmls za\.d\[w8, 7, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x4 (w8 + 7, z1, z0),
+ svmls_za64_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fmls za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x4 (w8 + 8, z1, z0),
+ svmls_za64_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** fmls za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svfloat64x4_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x4 (w0 - 1, z1, z0),
+ svmls_za64_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** fmls za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svfloat64x4_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x4 (w8, z0, z15),
+ svmls_za64_vg1x4 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** fmls za\.d\[w8, 0, vgx4\], {z20\.d - z23\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svfloat64x4_t, svfloat64_t,
+ svmls_single_za64_f64_vg1x4 (w8, z20, z16),
+ svmls_za64_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x1.c
new file mode 100644
index 0000000..101d667
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x1.c
@@ -0,0 +1,151 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.d\[\1, 0:3\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_0_z0_z0, svint16_t,
+ svmls_za64_s16_vg4x1 (0, z0, z0),
+ svmls_za64_vg4x1 (0, z0, z0))
+
+/*
+** mls_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w0_z0_z3, svint16_t,
+ svmls_za64_s16_vg4x1 (w0, z0, z3),
+ svmls_za64_vg4x1 (w0, z0, z3))
+
+/*
+** mls_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** smlsll za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w7_z0_z3, svint16_t,
+ svmls_za64_s16_vg4x1 (w7, z0, z3),
+ svmls_za64_vg4x1 (w7, z0, z3))
+
+/*
+** mls_w8_z7_z3:
+** smlsll za\.d\[w8, 0:3\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z7_z3, svint16_t,
+ svmls_za64_s16_vg4x1 (w8, z7, z3),
+ svmls_za64_vg4x1 (w8, z7, z3))
+
+/*
+** mls_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** smlsll za\.d\[w8, 0:3\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z31_z16, svint16_t,
+ svmls_za64_s16_vg4x1 (w8, z31, z16),
+ svmls_za64_vg4x1 (w8, z31, z16))
+
+/*
+** mls_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.d\[\1, 0:3\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p1_z0_z0, svint16_t,
+ svmls_za64_s16_vg4x1 (w8 + 1, z0, z0),
+ svmls_za64_vg4x1 (w8 + 1, z0, z0))
+
+/*
+** mls_w10p4_z23_z0:
+** smlsll za\.d\[w10, 4:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w10p4_z23_z0, svint16_t,
+ svmls_za64_s16_vg4x1 (w10 + 4, z23, z0),
+ svmls_za64_vg4x1 (w10 + 4, z23, z0))
+
+/*
+** mls_w11p6_z23_z0:
+** add (w8|w9|w10|w11), w11, #?6
+** smlsll za\.d\[\1, 0:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p6_z23_z0, svint16_t,
+ svmls_za64_s16_vg4x1 (w11 + 6, z23, z0),
+ svmls_za64_vg4x1 (w11 + 6, z23, z0))
+
+/*
+** mls_w9p8_z7_z7:
+** smlsll za\.d\[w9, 8:11\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w9p8_z7_z7, svint16_t,
+ svmls_za64_s16_vg4x1 (w9 + 8, z7, z7),
+ svmls_za64_vg4x1 (w9 + 8, z7, z7))
+
+/*
+** mls_w11p12_z23_z0:
+** smlsll za\.d\[w11, 12:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p12_z23_z0, svint16_t,
+ svmls_za64_s16_vg4x1 (w11 + 12, z23, z0),
+ svmls_za64_vg4x1 (w11 + 12, z23, z0))
+
+/*
+** mls_w8p14_z23_z0:
+** add (w8|w9|w10|w11), w8, #?14
+** smlsll za\.d\[\1, 0:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p14_z23_z0, svint16_t,
+ svmls_za64_s16_vg4x1 (w8 + 14, z23, z0),
+ svmls_za64_vg4x1 (w8 + 14, z23, z0))
+
+/*
+** mls_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** smlsll za\.d\[\1, 0:3\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p15_z7_z7, svint16_t,
+ svmls_za64_s16_vg4x1 (w8 + 15, z7, z7),
+ svmls_za64_vg4x1 (w8 + 15, z7, z7))
+
+/*
+** mls_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** smlsll za\.d\[\1, 0:3\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p16_z7_z7, svint16_t,
+ svmls_za64_s16_vg4x1 (w8 + 16, z7, z7),
+ svmls_za64_vg4x1 (w8 + 16, z7, z7))
+
+/*
+** mls_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsll za\.d\[\1, 0:3\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8m1_z16_z0, svint16_t,
+ svmls_za64_s16_vg4x1 (w8 - 1, z16, z0),
+ svmls_za64_vg4x1 (w8 - 1, z16, z0))
+
+/*
+** mls_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** smlsll za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w12_z0_z3, svint16_t,
+ svmls_za64_s16_vg4x1 (w12, z0, z3),
+ svmls_za64_vg4x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x2.c
new file mode 100644
index 0000000..441a812
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x2.c
@@ -0,0 +1,251 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svint16x2_t,
+ svmls_za64_s16_vg4x2 (0, z0, z0),
+ svmls_za64_vg4x2 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w0, z0, z0),
+ svmls_za64_vg4x2 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** smlsll za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w8, z0, z4),
+ svmls_za64_vg4x2 (w8, z0, z4))
+
+/*
+** mls_w8_z4_z18:
+** smlsll za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z18, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w8, z4, z18),
+ svmls_za64_vg4x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z23:
+** ...
+** smlsll za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w8, z0, z23),
+ svmls_za64_vg4x2 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** smlsll za\.d\[w8, 0:3, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w8, z23, z0),
+ svmls_za64_vg4x2 (w8, z23, z0))
+
+/*
+** mls_w8_z18_z28:
+** smlsll za\.d\[w8, 0:3, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z28, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w8, z18, z28),
+ svmls_za64_vg4x2 (w8, z18, z28))
+
+/*
+** mls_w8_z28_z4:
+** smlsll za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z4, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w8, z28, z4),
+ svmls_za64_vg4x2 (w8, z28, z4))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w8 + 1, z4, z0),
+ svmls_za64_vg4x2 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlsll za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w8 + 2, z4, z0),
+ svmls_za64_vg4x2 (w8 + 2, z4, z0))
+
+/*
+** mls_w11p4_z4_z0:
+** smlsll za\.d\[w11, 4:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w11p4_z4_z0, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w11 + 4, z4, z0),
+ svmls_za64_vg4x2 (w11 + 4, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w8 + 7, z4, z0),
+ svmls_za64_vg4x2 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w8 + 8, z4, z4),
+ svmls_za64_vg4x2 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsll za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svint16x2_t,
+ svmls_za64_s16_vg4x2 (w8 - 1, z4, z0),
+ svmls_za64_vg4x2 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za64_s16_vg4x2 (0, z1, z0),
+ svmls_za64_vg4x2 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za64_s16_vg4x2 (w0, z1, z0),
+ svmls_za64_vg4x2 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** smlsll za\.d\[w8, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za64_s16_vg4x2 (w8, z1, z0),
+ svmls_za64_vg4x2 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za64_s16_vg4x2 (w8 + 1, z1, z0),
+ svmls_za64_vg4x2 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p2_z20_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlsll za\.d\[\1, 0:3, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p2_z20_z0, svint16x2_t, svint16_t,
+ svmls_single_za64_s16_vg4x2 (w8 + 2, z20, z0),
+ svmls_za64_vg4x2 (w8 + 2, z20, z0))
+
+/*
+** mls_single_w11p4_z27_z0:
+** smlsll za\.d\[w11, 4:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w11p4_z27_z0, svint16x2_t, svint16_t,
+ svmls_single_za64_s16_vg4x2 (w11 + 4, z27, z0),
+ svmls_za64_vg4x2 (w11 + 4, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za64_s16_vg4x2 (w8 + 7, z1, z0),
+ svmls_za64_vg4x2 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za64_s16_vg4x2 (w8 + 8, z1, z0),
+ svmls_za64_vg4x2 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svint16x2_t, svint16_t,
+ svmls_single_za64_s16_vg4x2 (w0 - 1, z1, z0),
+ svmls_za64_vg4x2 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlsll za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svint16x2_t, svint16_t,
+ svmls_single_za64_s16_vg4x2 (w8, z0, z15),
+ svmls_za64_vg4x2 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlsll za\.d\[w8, 0:3, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svint16x2_t, svint16_t,
+ svmls_single_za64_s16_vg4x2 (w8, z20, z16),
+ svmls_za64_vg4x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x4.c
new file mode 100644
index 0000000..b555caa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_s16_vg4x4.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svint16x4_t,
+ svmls_za64_s16_vg4x4 (0, z0, z0),
+ svmls_za64_vg4x4 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w0, z0, z0),
+ svmls_za64_vg4x4 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** smlsll za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8, z0, z4),
+ svmls_za64_vg4x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z18:
+** ...
+** smlsll za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z18, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8, z0, z18),
+ svmls_za64_vg4x4 (w8, z0, z18))
+
+/*
+** mls_w8_z18_z0:
+** ...
+** smlsll za\.d\[w8, 0:3, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z0, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8, z18, z0),
+ svmls_za64_vg4x4 (w8, z18, z0))
+
+/*
+** mls_w8_z0_z23:
+** ...
+** smlsll za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8, z0, z23),
+ svmls_za64_vg4x4 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** smlsll za\.d\[w8, 0:3, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8, z23, z0),
+ svmls_za64_vg4x4 (w8, z23, z0))
+
+/*
+** mls_w8_z4_z28:
+** smlsll za\.d\[w8, 0:3, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z28, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8, z4, z28),
+ svmls_za64_vg4x4 (w8, z4, z28))
+
+/*
+** mls_w8_z28_z0:
+** smlsll za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z0, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8, z28, z0),
+ svmls_za64_vg4x4 (w8, z28, z0))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8 + 1, z4, z0),
+ svmls_za64_vg4x4 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** smlsll za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8 + 2, z4, z0),
+ svmls_za64_vg4x4 (w8 + 2, z4, z0))
+
+/*
+** mls_w11p4_z4_z0:
+** smlsll za\.d\[w11, 4:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w11p4_z4_z0, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w11 + 4, z4, z0),
+ svmls_za64_vg4x4 (w11 + 4, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8 + 7, z4, z0),
+ svmls_za64_vg4x4 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8 + 8, z4, z4),
+ svmls_za64_vg4x4 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** smlsll za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svint16x4_t,
+ svmls_za64_s16_vg4x4 (w8 - 1, z4, z0),
+ svmls_za64_vg4x4 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** smlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za64_s16_vg4x4 (0, z1, z0),
+ svmls_za64_vg4x4 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** smlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za64_s16_vg4x4 (w0, z1, z0),
+ svmls_za64_vg4x4 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** smlsll za\.d\[w8, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za64_s16_vg4x4 (w8, z1, z0),
+ svmls_za64_vg4x4 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** smlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za64_s16_vg4x4 (w8 + 1, z1, z0),
+ svmls_za64_vg4x4 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** smlsll za\.d\[w8, 4:7, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svint16x4_t, svint16_t,
+ svmls_single_za64_s16_vg4x4 (w8 + 4, z20, z0),
+ svmls_za64_vg4x4 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** add (w8|w9|w10|w11), w8, #?6
+** smlsll za\.d\[\1, 0:3, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svint16x4_t, svint16_t,
+ svmls_single_za64_s16_vg4x4 (w8 + 6, z27, z0),
+ svmls_za64_vg4x4 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** smlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za64_s16_vg4x4 (w8 + 7, z1, z0),
+ svmls_za64_vg4x4 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** smlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za64_s16_vg4x4 (w8 + 8, z1, z0),
+ svmls_za64_vg4x4 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** smlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svint16x4_t, svint16_t,
+ svmls_single_za64_s16_vg4x4 (w0 - 1, z1, z0),
+ svmls_za64_vg4x4 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** smlsll za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svint16x4_t, svint16_t,
+ svmls_single_za64_s16_vg4x4 (w8, z0, z15),
+ svmls_za64_vg4x4 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** smlsll za\.d\[w8, 0:3, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svint16x4_t, svint16_t,
+ svmls_single_za64_s16_vg4x4 (w8, z20, z16),
+ svmls_za64_vg4x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x1.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x1.c
new file mode 100644
index 0000000..196b3348
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x1.c
@@ -0,0 +1,151 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.d\[\1, 0:3\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_0_z0_z0, svuint16_t,
+ svmls_za64_u16_vg4x1 (0, z0, z0),
+ svmls_za64_vg4x1 (0, z0, z0))
+
+/*
+** mls_w0_z0_z3:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w0_z0_z3, svuint16_t,
+ svmls_za64_u16_vg4x1 (w0, z0, z3),
+ svmls_za64_vg4x1 (w0, z0, z3))
+
+/*
+** mls_w7_z0_z3:
+** mov (w8|w9|w10|w11), w7
+** umlsll za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w7_z0_z3, svuint16_t,
+ svmls_za64_u16_vg4x1 (w7, z0, z3),
+ svmls_za64_vg4x1 (w7, z0, z3))
+
+/*
+** mls_w8_z7_z3:
+** umlsll za\.d\[w8, 0:3\], z7\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z7_z3, svuint16_t,
+ svmls_za64_u16_vg4x1 (w8, z7, z3),
+ svmls_za64_vg4x1 (w8, z7, z3))
+
+/*
+** mls_w8_z31_z16:
+** mov (z[0-7])\.d, z16\.d
+** umlsll za\.d\[w8, 0:3\], z31\.h. \1\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8_z31_z16, svuint16_t,
+ svmls_za64_u16_vg4x1 (w8, z31, z16),
+ svmls_za64_vg4x1 (w8, z31, z16))
+
+/*
+** mls_w8p1_z0_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.d\[\1, 0:3\], z0\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p1_z0_z0, svuint16_t,
+ svmls_za64_u16_vg4x1 (w8 + 1, z0, z0),
+ svmls_za64_vg4x1 (w8 + 1, z0, z0))
+
+/*
+** mls_w10p4_z23_z0:
+** umlsll za\.d\[w10, 4:7\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w10p4_z23_z0, svuint16_t,
+ svmls_za64_u16_vg4x1 (w10 + 4, z23, z0),
+ svmls_za64_vg4x1 (w10 + 4, z23, z0))
+
+/*
+** mls_w11p6_z23_z0:
+** add (w8|w9|w10|w11), w11, #?6
+** umlsll za\.d\[\1, 0:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p6_z23_z0, svuint16_t,
+ svmls_za64_u16_vg4x1 (w11 + 6, z23, z0),
+ svmls_za64_vg4x1 (w11 + 6, z23, z0))
+
+/*
+** mls_w9p8_z7_z7:
+** umlsll za\.d\[w9, 8:11\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w9p8_z7_z7, svuint16_t,
+ svmls_za64_u16_vg4x1 (w9 + 8, z7, z7),
+ svmls_za64_vg4x1 (w9 + 8, z7, z7))
+
+/*
+** mls_w11p12_z23_z0:
+** umlsll za\.d\[w11, 12:15\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w11p12_z23_z0, svuint16_t,
+ svmls_za64_u16_vg4x1 (w11 + 12, z23, z0),
+ svmls_za64_vg4x1 (w11 + 12, z23, z0))
+
+/*
+** mls_w8p14_z23_z0:
+** add (w8|w9|w10|w11), w8, #?14
+** umlsll za\.d\[\1, 0:3\], z23\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p14_z23_z0, svuint16_t,
+ svmls_za64_u16_vg4x1 (w8 + 14, z23, z0),
+ svmls_za64_vg4x1 (w8 + 14, z23, z0))
+
+/*
+** mls_w8p15_z7_z7:
+** add (w8|w9|w10|w11), w8, #?15
+** umlsll za\.d\[\1, 0:3\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p15_z7_z7, svuint16_t,
+ svmls_za64_u16_vg4x1 (w8 + 15, z7, z7),
+ svmls_za64_vg4x1 (w8 + 15, z7, z7))
+
+/*
+** mls_w8p16_z7_z7:
+** add (w8|w9|w10|w11), w8, #?16
+** umlsll za\.d\[\1, 0:3\], z7\.h, z7\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8p16_z7_z7, svuint16_t,
+ svmls_za64_u16_vg4x1 (w8 + 16, z7, z7),
+ svmls_za64_vg4x1 (w8 + 16, z7, z7))
+
+/*
+** mls_w8m1_z16_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsll za\.d\[\1, 0:3\], z16\.h, z0\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w8m1_z16_z0, svuint16_t,
+ svmls_za64_u16_vg4x1 (w8 - 1, z16, z0),
+ svmls_za64_vg4x1 (w8 - 1, z16, z0))
+
+/*
+** mls_w12_z0_z3:
+** mov (w8|w9|w10|w11), w12
+** umlsll za\.d\[\1, 0:3\], z0\.h, z3\.h
+** ret
+*/
+TEST_ZA_X1 (mls_w12_z0_z3, svuint16_t,
+ svmls_za64_u16_vg4x1 (w12, z0, z3),
+ svmls_za64_vg4x1 (w12, z0, z3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x2.c
new file mode 100644
index 0000000..4c05f24
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x2.c
@@ -0,0 +1,251 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (0, z0, z0),
+ svmls_za64_vg4x2 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.d\[\1, 0:3, vgx2\], {z0\.h - z1\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w0, z0, z0),
+ svmls_za64_vg4x2 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** umlsll za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w8, z0, z4),
+ svmls_za64_vg4x2 (w8, z0, z4))
+
+/*
+** mls_w8_z4_z18:
+** umlsll za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z18, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w8, z4, z18),
+ svmls_za64_vg4x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z23:
+** ...
+** umlsll za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w8, z0, z23),
+ svmls_za64_vg4x2 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** umlsll za\.d\[w8, 0:3, vgx2\], [^\n]+, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w8, z23, z0),
+ svmls_za64_vg4x2 (w8, z23, z0))
+
+/*
+** mls_w8_z18_z28:
+** umlsll za\.d\[w8, 0:3, vgx2\], {z18\.h - z19\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z28, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w8, z18, z28),
+ svmls_za64_vg4x2 (w8, z18, z28))
+
+/*
+** mls_w8_z28_z4:
+** umlsll za\.d\[w8, 0:3, vgx2\], {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z4, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w8, z28, z4),
+ svmls_za64_vg4x2 (w8, z28, z4))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w8 + 1, z4, z0),
+ svmls_za64_vg4x2 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlsll za\.d\[w8, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w8 + 2, z4, z0),
+ svmls_za64_vg4x2 (w8 + 2, z4, z0))
+
+/*
+** mls_w11p4_z4_z0:
+** umlsll za\.d\[w11, 4:7, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w11p4_z4_z0, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w11 + 4, z4, z0),
+ svmls_za64_vg4x2 (w11 + 4, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w8 + 7, z4, z0),
+ svmls_za64_vg4x2 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w8 + 8, z4, z4),
+ svmls_za64_vg4x2 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsll za\.d\[\1, 0:3, vgx2\], {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svuint16x2_t,
+ svmls_za64_u16_vg4x2 (w8 - 1, z4, z0),
+ svmls_za64_vg4x2 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za64_u16_vg4x2 (0, z1, z0),
+ svmls_za64_vg4x2 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za64_u16_vg4x2 (w0, z1, z0),
+ svmls_za64_vg4x2 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** umlsll za\.d\[w8, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za64_u16_vg4x2 (w8, z1, z0),
+ svmls_za64_vg4x2 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za64_u16_vg4x2 (w8 + 1, z1, z0),
+ svmls_za64_vg4x2 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p2_z20_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlsll za\.d\[\1, 0:3, vgx2\], {z20\.h - z21\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p2_z20_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za64_u16_vg4x2 (w8 + 2, z20, z0),
+ svmls_za64_vg4x2 (w8 + 2, z20, z0))
+
+/*
+** mls_single_w11p4_z27_z0:
+** umlsll za\.d\[w11, 4:7, vgx2\], {z27\.h - z28\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w11p4_z27_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za64_u16_vg4x2 (w11 + 4, z27, z0),
+ svmls_za64_vg4x2 (w11 + 4, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za64_u16_vg4x2 (w8 + 7, z1, z0),
+ svmls_za64_vg4x2 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za64_u16_vg4x2 (w8 + 8, z1, z0),
+ svmls_za64_vg4x2 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsll za\.d\[\1, 0:3, vgx2\], {z1\.h - z2\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svuint16x2_t, svuint16_t,
+ svmls_single_za64_u16_vg4x2 (w0 - 1, z1, z0),
+ svmls_za64_vg4x2 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlsll za\.d\[w8, 0:3, vgx2\], {z0\.h - z1\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svuint16x2_t, svuint16_t,
+ svmls_single_za64_u16_vg4x2 (w8, z0, z15),
+ svmls_za64_vg4x2 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlsll za\.d\[w8, 0:3, vgx2\], {z20\.h - z21\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svuint16x2_t, svuint16_t,
+ svmls_single_za64_u16_vg4x2 (w8, z20, z16),
+ svmls_za64_vg4x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x4.c
new file mode 100644
index 0000000..6ceb826
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mls_za64_u16_vg4x4.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** mls_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_0_z0_z0, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (0, z0, z0),
+ svmls_za64_vg4x4 (0, z0, z0))
+
+/*
+** mls_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.d\[\1, 0:3, vgx4\], {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w0_z0_z0, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w0, z0, z0),
+ svmls_za64_vg4x4 (w0, z0, z0))
+
+/*
+** mls_w8_z0_z4:
+** umlsll za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z4, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8, z0, z4),
+ svmls_za64_vg4x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** mls_w8_z0_z18:
+** ...
+** umlsll za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z18, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8, z0, z18),
+ svmls_za64_vg4x4 (w8, z0, z18))
+
+/*
+** mls_w8_z18_z0:
+** ...
+** umlsll za\.d\[w8, 0:3, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z18_z0, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8, z18, z0),
+ svmls_za64_vg4x4 (w8, z18, z0))
+
+/*
+** mls_w8_z0_z23:
+** ...
+** umlsll za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (mls_w8_z0_z23, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8, z0, z23),
+ svmls_za64_vg4x4 (w8, z0, z23))
+
+/*
+** mls_w8_z23_z0:
+** ...
+** umlsll za\.d\[w8, 0:3, vgx4\], [^\n]+, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z23_z0, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8, z23, z0),
+ svmls_za64_vg4x4 (w8, z23, z0))
+
+/*
+** mls_w8_z4_z28:
+** umlsll za\.d\[w8, 0:3, vgx4\], {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z4_z28, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8, z4, z28),
+ svmls_za64_vg4x4 (w8, z4, z28))
+
+/*
+** mls_w8_z28_z0:
+** umlsll za\.d\[w8, 0:3, vgx4\], {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8_z28_z0, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8, z28, z0),
+ svmls_za64_vg4x4 (w8, z28, z0))
+
+/*
+** mls_w8p1_z4_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p1_z4_z0, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8 + 1, z4, z0),
+ svmls_za64_vg4x4 (w8 + 1, z4, z0))
+
+/*
+** mls_w8p2_z4_z0:
+** add (w8|w9|w10|w11), w8, #?2
+** umlsll za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p2_z4_z0, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8 + 2, z4, z0),
+ svmls_za64_vg4x4 (w8 + 2, z4, z0))
+
+/*
+** mls_w11p4_z4_z0:
+** umlsll za\.d\[w11, 4:7, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w11p4_z4_z0, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w11 + 4, z4, z0),
+ svmls_za64_vg4x4 (w11 + 4, z4, z0))
+
+/*
+** mls_w8p7_z4_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p7_z4_z0, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8 + 7, z4, z0),
+ svmls_za64_vg4x4 (w8 + 7, z4, z0))
+
+/*
+** mls_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8p8_z4_z4, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8 + 8, z4, z4),
+ svmls_za64_vg4x4 (w8 + 8, z4, z4))
+
+/*
+** mls_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** umlsll za\.d\[\1, 0:3, vgx4\], {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (mls_w8m1_z4_z0, svuint16x4_t,
+ svmls_za64_u16_vg4x4 (w8 - 1, z4, z0),
+ svmls_za64_vg4x4 (w8 - 1, z4, z0))
+
+/*
+** mls_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** umlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_0_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za64_u16_vg4x4 (0, z1, z0),
+ svmls_za64_vg4x4 (0, z1, z0))
+
+/*
+** mls_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** umlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za64_u16_vg4x4 (w0, z1, z0),
+ svmls_za64_vg4x4 (w0, z1, z0))
+
+/*
+** mls_single_w8_z1_z0:
+** umlsll za\.d\[w8, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za64_u16_vg4x4 (w8, z1, z0),
+ svmls_za64_vg4x4 (w8, z1, z0))
+
+/*
+** mls_single_w8p1_z1_z0:
+** add (w8|w9|w10|w11), w8, #?1
+** umlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p1_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za64_u16_vg4x4 (w8 + 1, z1, z0),
+ svmls_za64_vg4x4 (w8 + 1, z1, z0))
+
+/*
+** mls_single_w8p4_z20_z0:
+** umlsll za\.d\[w8, 4:7, vgx4\], {z20\.h - z23\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p4_z20_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za64_u16_vg4x4 (w8 + 4, z20, z0),
+ svmls_za64_vg4x4 (w8 + 4, z20, z0))
+
+/*
+** mls_single_w8p6_z27_z0:
+** add (w8|w9|w10|w11), w8, #?6
+** umlsll za\.d\[\1, 0:3, vgx4\], {z27\.h - z30\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p6_z27_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za64_u16_vg4x4 (w8 + 6, z27, z0),
+ svmls_za64_vg4x4 (w8 + 6, z27, z0))
+
+/*
+** mls_single_w8p7_z1_z0:
+** add (w8|w9|w10|w11), w8, #?7
+** umlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p7_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za64_u16_vg4x4 (w8 + 7, z1, z0),
+ svmls_za64_vg4x4 (w8 + 7, z1, z0))
+
+/*
+** mls_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** umlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8p8_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za64_u16_vg4x4 (w8 + 8, z1, z0),
+ svmls_za64_vg4x4 (w8 + 8, z1, z0))
+
+/*
+** mls_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** umlsll za\.d\[\1, 0:3, vgx4\], {z1\.h - z4\.h}, z0\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w0m1_z1_z0, svuint16x4_t, svuint16_t,
+ svmls_single_za64_u16_vg4x4 (w0 - 1, z1, z0),
+ svmls_za64_vg4x4 (w0 - 1, z1, z0))
+
+/*
+** mls_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** umlsll za\.d\[w8, 0:3, vgx4\], {z0\.h - z3\.h}, z15\.h
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (mls_single_w8_z0_z15, svuint16x4_t, svuint16_t,
+ svmls_single_za64_u16_vg4x4 (w8, z0, z15),
+ svmls_za64_vg4x4 (w8, z0, z15))
+
+/*
+** mls_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** umlsll za\.d\[w8, 0:3, vgx4\], {z20\.h - z23\.h}, \1\.h
+** ret
+*/
+TEST_ZA_SINGLE (mls_single_w8_z20_z16, svuint16x4_t, svuint16_t,
+ svmls_single_za64_u16_vg4x4 (w8, z20, z16),
+ svmls_za64_vg4x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mopa_za32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mopa_za32.c
new file mode 100644
index 0000000..7b24387
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mopa_za32.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mopa_za32_s16_0_p0_p1_z0_z1:
+** smopa za0\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_s16_0_p0_p1_z0_z1, svint16_t,
+ svmopa_za32_s16_m (0, p0, p1, z0, z1),
+ svmopa_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mopa_za32_s16_0_p1_p0_z1_z0:
+** smopa za0\.s, p1/m, p0/m, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_s16_0_p1_p0_z1_z0, svint16_t,
+ svmopa_za32_s16_m (0, p1, p0, z1, z0),
+ svmopa_za32_m (0, p1, p0, z1, z0))
+
+/*
+** mopa_za32_s16_3_p0_p1_z0_z1:
+** smopa za3\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_s16_3_p0_p1_z0_z1, svint16_t,
+ svmopa_za32_s16_m (3, p0, p1, z0, z1),
+ svmopa_za32_m (3, p0, p1, z0, z1))
+
+/*
+** mopa_za32_u16_0_p0_p1_z0_z1:
+** umopa za0\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_u16_0_p0_p1_z0_z1, svuint16_t,
+ svmopa_za32_u16_m (0, p0, p1, z0, z1),
+ svmopa_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mopa_za32_u16_3_p0_p1_z0_z1:
+** umopa za3\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mopa_za32_u16_3_p0_p1_z0_z1, svuint16_t,
+ svmopa_za32_u16_m (3, p0, p1, z0, z1),
+ svmopa_za32_m (3, p0, p1, z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mops_za32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mops_za32.c
new file mode 100644
index 0000000..04a104a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/mops_za32.c
@@ -0,0 +1,48 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** mops_za32_s16_0_p0_p1_z0_z1:
+** smops za0\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_s16_0_p0_p1_z0_z1, svint16_t,
+ svmops_za32_s16_m (0, p0, p1, z0, z1),
+ svmops_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mops_za32_s16_0_p1_p0_z1_z0:
+** smops za0\.s, p1/m, p0/m, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_s16_0_p1_p0_z1_z0, svint16_t,
+ svmops_za32_s16_m (0, p1, p0, z1, z0),
+ svmops_za32_m (0, p1, p0, z1, z0))
+
+/*
+** mops_za32_s16_3_p0_p1_z0_z1:
+** smops za3\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_s16_3_p0_p1_z0_z1, svint16_t,
+ svmops_za32_s16_m (3, p0, p1, z0, z1),
+ svmops_za32_m (3, p0, p1, z0, z1))
+
+/*
+** mops_za32_u16_0_p0_p1_z0_z1:
+** umops za0\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_u16_0_p0_p1_z0_z1, svuint16_t,
+ svmops_za32_u16_m (0, p0, p1, z0, z1),
+ svmops_za32_m (0, p0, p1, z0, z1))
+
+/*
+** mops_za32_u16_3_p0_p1_z0_z1:
+** umops za3\.s, p0/m, p1/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZA (mops_za32_u16_3_p0_p1_z0_z1, svuint16_t,
+ svmops_za32_u16_m (3, p0, p1, z0, z1),
+ svmops_za32_m (3, p0, p1, z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c16.c
new file mode 100644
index 0000000..b03a8ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c16.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** pext_p2_pn0_0:
+** mov p([0-9]+)\.b, p0\.b
+** pext p2\.h, pn\1\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn0_0, svbool_t,
+ p2 = svpext_c16 (pn0, 0),
+ p2 = svpext_c16 (pn0, 0))
+
+/*
+** pext_p5_pn7_1:
+** mov p([0-9]+)\.b, p7\.b
+** pext p5\.h, pn\1\[1\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p5_pn7_1, svbool_t,
+ p5 = svpext_c16 (pn7, 1),
+ p5 = svpext_c16 (pn7, 1))
+
+/*
+** pext_p9_pn8_2:
+** pext p9\.h, pn8\[2\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p9_pn8_2, svbool_t,
+ p9 = svpext_c16 (pn8, 2),
+ p9 = svpext_c16 (pn8, 2))
+
+/*
+** pext_p12_pn11_3:
+** pext p12\.h, pn11\[3\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p12_pn11_3, svbool_t,
+ p12 = svpext_c16 (pn11, 3),
+ p12 = svpext_c16 (pn11, 3))
+
+/*
+** pext_p2_pn15_0:
+** pext p2\.h, pn15\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn15_0, svbool_t,
+ p2 = svpext_c16 (pn15, 0),
+ p2 = svpext_c16 (pn15, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c16_x2.c
new file mode 100644
index 0000000..c07d820
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c16_x2.c
@@ -0,0 +1,54 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** pext_p2_pn0_0:
+** mov p([0-9]+)\.b, p0\.b
+** pext {p2\.h, p3\.h}, pn\1\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn0_0, svboolx2_t,
+ p2 = svpext_c16_x2 (pn0, 0),
+ p2 = svpext_c16_x2 (pn0, 0))
+
+/*
+** pext_p5_pn7_1:
+** mov p([0-9]+)\.b, p7\.b
+** pext {[^}]+}, pn\1\[1\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_EXTRACT_PN (pext_p5_pn7_1, svboolx2_t,
+ p5 = svpext_c16_x2 (pn7, 1),
+ p5 = svpext_c16_x2 (pn7, 1))
+
+/*
+** pext_p9_pn8_0:
+** pext {[^}]+}, pn8\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_EXTRACT_PN (pext_p9_pn8_0, svboolx2_t,
+ p9 = svpext_c16_x2 (pn8, 0),
+ p9 = svpext_c16_x2 (pn8, 0))
+
+/*
+** pext_p12_pn11_1:
+** pext {p12\.h, p13\.h}, pn11\[1\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p12_pn11_1, svboolx2_t,
+ p12 = svpext_c16_x2 (pn11, 1),
+ p12 = svpext_c16_x2 (pn11, 1))
+
+/*
+** pext_p2_pn15_0:
+** pext {p2\.h, p3\.h}, pn15\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn15_0, svboolx2_t,
+ p2 = svpext_c16_x2 (pn15, 0),
+ p2 = svpext_c16_x2 (pn15, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c32.c
new file mode 100644
index 0000000..2a63942
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c32.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** pext_p2_pn0_0:
+** mov p([0-9]+)\.b, p0\.b
+** pext p2\.s, pn\1\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn0_0, svbool_t,
+ p2 = svpext_c32 (pn0, 0),
+ p2 = svpext_c32 (pn0, 0))
+
+/*
+** pext_p5_pn7_1:
+** mov p([0-9]+)\.b, p7\.b
+** pext p5\.s, pn\1\[1\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p5_pn7_1, svbool_t,
+ p5 = svpext_c32 (pn7, 1),
+ p5 = svpext_c32 (pn7, 1))
+
+/*
+** pext_p9_pn8_2:
+** pext p9\.s, pn8\[2\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p9_pn8_2, svbool_t,
+ p9 = svpext_c32 (pn8, 2),
+ p9 = svpext_c32 (pn8, 2))
+
+/*
+** pext_p12_pn11_3:
+** pext p12\.s, pn11\[3\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p12_pn11_3, svbool_t,
+ p12 = svpext_c32 (pn11, 3),
+ p12 = svpext_c32 (pn11, 3))
+
+/*
+** pext_p2_pn15_0:
+** pext p2\.s, pn15\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn15_0, svbool_t,
+ p2 = svpext_c32 (pn15, 0),
+ p2 = svpext_c32 (pn15, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c32_x2.c
new file mode 100644
index 0000000..1629b44
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c32_x2.c
@@ -0,0 +1,54 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** pext_p2_pn0_0:
+** mov p([0-9]+)\.b, p0\.b
+** pext {p2\.s, p3\.s}, pn\1\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn0_0, svboolx2_t,
+ p2 = svpext_c32_x2 (pn0, 0),
+ p2 = svpext_c32_x2 (pn0, 0))
+
+/*
+** pext_p5_pn7_1:
+** mov p([0-9]+)\.b, p7\.b
+** pext {[^}]+}, pn\1\[1\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_EXTRACT_PN (pext_p5_pn7_1, svboolx2_t,
+ p5 = svpext_c32_x2 (pn7, 1),
+ p5 = svpext_c32_x2 (pn7, 1))
+
+/*
+** pext_p9_pn8_0:
+** pext {[^}]+}, pn8\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_EXTRACT_PN (pext_p9_pn8_0, svboolx2_t,
+ p9 = svpext_c32_x2 (pn8, 0),
+ p9 = svpext_c32_x2 (pn8, 0))
+
+/*
+** pext_p12_pn11_1:
+** pext {p12\.s, p13\.s}, pn11\[1\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p12_pn11_1, svboolx2_t,
+ p12 = svpext_c32_x2 (pn11, 1),
+ p12 = svpext_c32_x2 (pn11, 1))
+
+/*
+** pext_p2_pn15_0:
+** pext {p2\.s, p3\.s}, pn15\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn15_0, svboolx2_t,
+ p2 = svpext_c32_x2 (pn15, 0),
+ p2 = svpext_c32_x2 (pn15, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c64.c
new file mode 100644
index 0000000..8c93d7b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c64.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** pext_p2_pn0_0:
+** mov p([0-9]+)\.b, p0\.b
+** pext p2\.d, pn\1\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn0_0, svbool_t,
+ p2 = svpext_c64 (pn0, 0),
+ p2 = svpext_c64 (pn0, 0))
+
+/*
+** pext_p5_pn7_1:
+** mov p([0-9]+)\.b, p7\.b
+** pext p5\.d, pn\1\[1\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p5_pn7_1, svbool_t,
+ p5 = svpext_c64 (pn7, 1),
+ p5 = svpext_c64 (pn7, 1))
+
+/*
+** pext_p9_pn8_2:
+** pext p9\.d, pn8\[2\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p9_pn8_2, svbool_t,
+ p9 = svpext_c64 (pn8, 2),
+ p9 = svpext_c64 (pn8, 2))
+
+/*
+** pext_p12_pn11_3:
+** pext p12\.d, pn11\[3\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p12_pn11_3, svbool_t,
+ p12 = svpext_c64 (pn11, 3),
+ p12 = svpext_c64 (pn11, 3))
+
+/*
+** pext_p2_pn15_0:
+** pext p2\.d, pn15\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn15_0, svbool_t,
+ p2 = svpext_c64 (pn15, 0),
+ p2 = svpext_c64 (pn15, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c64_x2.c
new file mode 100644
index 0000000..ab12c8a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c64_x2.c
@@ -0,0 +1,54 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** pext_p2_pn0_0:
+** mov p([0-9]+)\.b, p0\.b
+** pext {p2\.d, p3\.d}, pn\1\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn0_0, svboolx2_t,
+ p2 = svpext_c64_x2 (pn0, 0),
+ p2 = svpext_c64_x2 (pn0, 0))
+
+/*
+** pext_p5_pn7_1:
+** mov p([0-9]+)\.b, p7\.b
+** pext {[^}]+}, pn\1\[1\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_EXTRACT_PN (pext_p5_pn7_1, svboolx2_t,
+ p5 = svpext_c64_x2 (pn7, 1),
+ p5 = svpext_c64_x2 (pn7, 1))
+
+/*
+** pext_p9_pn8_0:
+** pext {[^}]+}, pn8\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_EXTRACT_PN (pext_p9_pn8_0, svboolx2_t,
+ p9 = svpext_c64_x2 (pn8, 0),
+ p9 = svpext_c64_x2 (pn8, 0))
+
+/*
+** pext_p12_pn11_1:
+** pext {p12\.d, p13\.d}, pn11\[1\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p12_pn11_1, svboolx2_t,
+ p12 = svpext_c64_x2 (pn11, 1),
+ p12 = svpext_c64_x2 (pn11, 1))
+
+/*
+** pext_p2_pn15_0:
+** pext {p2\.d, p3\.d}, pn15\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn15_0, svboolx2_t,
+ p2 = svpext_c64_x2 (pn15, 0),
+ p2 = svpext_c64_x2 (pn15, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c8.c
new file mode 100644
index 0000000..8f882ce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c8.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** pext_p2_pn0_0:
+** mov p([0-9]+)\.b, p0\.b
+** pext p2\.b, pn\1\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn0_0, svbool_t,
+ p2 = svpext_c8 (pn0, 0),
+ p2 = svpext_c8 (pn0, 0))
+
+/*
+** pext_p5_pn7_1:
+** mov p([0-9]+)\.b, p7\.b
+** pext p5\.b, pn\1\[1\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p5_pn7_1, svbool_t,
+ p5 = svpext_c8 (pn7, 1),
+ p5 = svpext_c8 (pn7, 1))
+
+/*
+** pext_p9_pn8_2:
+** pext p9\.b, pn8\[2\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p9_pn8_2, svbool_t,
+ p9 = svpext_c8 (pn8, 2),
+ p9 = svpext_c8 (pn8, 2))
+
+/*
+** pext_p12_pn11_3:
+** pext p12\.b, pn11\[3\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p12_pn11_3, svbool_t,
+ p12 = svpext_c8 (pn11, 3),
+ p12 = svpext_c8 (pn11, 3))
+
+/*
+** pext_p2_pn15_0:
+** pext p2\.b, pn15\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn15_0, svbool_t,
+ p2 = svpext_c8 (pn15, 0),
+ p2 = svpext_c8 (pn15, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c8_x2.c
new file mode 100644
index 0000000..814e36d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pext_c8_x2.c
@@ -0,0 +1,54 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** pext_p2_pn0_0:
+** mov p([0-9]+)\.b, p0\.b
+** pext {p2\.b, p3\.b}, pn\1\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn0_0, svboolx2_t,
+ p2 = svpext_c8_x2 (pn0, 0),
+ p2 = svpext_c8_x2 (pn0, 0))
+
+/*
+** pext_p5_pn7_1:
+** mov p([0-9]+)\.b, p7\.b
+** pext {[^}]+}, pn\1\[1\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_EXTRACT_PN (pext_p5_pn7_1, svboolx2_t,
+ p5 = svpext_c8_x2 (pn7, 1),
+ p5 = svpext_c8_x2 (pn7, 1))
+
+/*
+** pext_p9_pn8_0:
+** pext {[^}]+}, pn8\[0\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_EXTRACT_PN (pext_p9_pn8_0, svboolx2_t,
+ p9 = svpext_c8_x2 (pn8, 0),
+ p9 = svpext_c8_x2 (pn8, 0))
+
+/*
+** pext_p12_pn11_1:
+** pext {p12\.b, p13\.b}, pn11\[1\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p12_pn11_1, svboolx2_t,
+ p12 = svpext_c8_x2 (pn11, 1),
+ p12 = svpext_c8_x2 (pn11, 1))
+
+/*
+** pext_p2_pn15_0:
+** pext {p2\.b, p3\.b}, pn15\[0\]
+** ret
+*/
+TEST_EXTRACT_PN (pext_p2_pn15_0, svboolx2_t,
+ p2 = svpext_c8_x2 (pn15, 0),
+ p2 = svpext_c8_x2 (pn15, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pfalse_c.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pfalse_c.c
new file mode 100644
index 0000000..ebd3c00
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/pfalse_c.c
@@ -0,0 +1,39 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** pfalse_pn0:
+** pfalse p0\.b
+** ret
+*/
+TEST_PN (pfalse_pn0,
+ pn0 = svpfalse_c (),
+ pn0 = svpfalse_c ())
+
+/*
+** pfalse_pn7:
+** pfalse p7\.b
+** ret
+*/
+TEST_PN (pfalse_pn7,
+ pn7 = svpfalse_c (),
+ pn7 = svpfalse_c ())
+
+/*
+** pfalse_pn8:
+** pfalse p8\.b
+** ret
+*/
+TEST_PN (pfalse_pn8,
+ pn8 = svpfalse_c (),
+ pn8 = svpfalse_c ())
+
+/*
+** pfalse_pn15:
+** pfalse p15\.b
+** ret
+*/
+TEST_PN (pfalse_pn15,
+ pn15 = svpfalse_c (),
+ pn15 = svpfalse_c ())
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b16.c
new file mode 100644
index 0000000..5df2aa0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b16.c
@@ -0,0 +1,89 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** psel_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p2_p7_0, svbool_t,
+ p0 = svpsel_b16 (p2, p7, 0),
+ p0 = svpsel_b16 (p2, p7, 0))
+
+/*
+** psel_p2_p7_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p7, p8\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p7_p8_w11, svbool_t,
+ p2 = svpsel_b16 (p7, p8, w11),
+ p2 = svpsel_b16 (p7, p8, w11))
+
+/*
+** psel_p7_p8_p13_w12:
+** psel p7, p8, p13\.h\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p7_p8_p13_w12, svbool_t,
+ p7 = svpsel_b16 (p8, p13, w12),
+ p7 = svpsel_b16 (p8, p13, w12))
+
+/*
+** psel_p8_p13_p15_w15:
+** psel p8, p13, p15\.h\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p8_p13_p15_w15, svbool_t,
+ p8 = svpsel_b16 (p13, p15, w15),
+ p8 = svpsel_b16 (p13, p15, w15))
+
+/*
+** psel_p13_p15_p0_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p15, p0\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p15_p0_w16, svbool_t,
+ p13 = svpsel_b16 (p15, p0, w16),
+ p13 = svpsel_b16 (p15, p0, w16))
+
+/*
+** psel_p15_p13_p8_w12p1:
+** psel p15, p13, p8\.h\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_p15_p13_p8_w12p1, svbool_t,
+ p15 = svpsel_b16 (p13, p8, w12 + 1),
+ p15 = svpsel_b16 (p13, p8, w12 + 1))
+
+/*
+** psel_p13_p8_p7_w12p7:
+** psel p13, p8, p7\.h\[w12, 7\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p8_p7_w12p7, svbool_t,
+ p13 = svpsel_b16 (p8, p7, w12 + 7),
+ p13 = svpsel_b16 (p8, p7, w12 + 7))
+
+/*
+** psel_p0_p0_p0_w12p8:
+** add (w[0-9]+), w12, #?8
+** psel p0, p0, p0\.h\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p0_p0_w12p8, svbool_t,
+ p0 = svpsel_b16 (p0, p0, w12 + 8),
+ p0 = svpsel_b16 (p0, p0, w12 + 8))
+
+/*
+** psel_p15_p15_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p15, p15, p15\.h\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p15_p15_p15_w12m1, svbool_t,
+ p15 = svpsel_b16 (p15, p15, w12 - 1),
+ p15 = svpsel_b16 (p15, p15, w12 - 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b32.c
new file mode 100644
index 0000000..8489dca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b32.c
@@ -0,0 +1,89 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** psel_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p2_p7_0, svbool_t,
+ p0 = svpsel_b32 (p2, p7, 0),
+ p0 = svpsel_b32 (p2, p7, 0))
+
+/*
+** psel_p2_p7_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p7, p8\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p7_p8_w11, svbool_t,
+ p2 = svpsel_b32 (p7, p8, w11),
+ p2 = svpsel_b32 (p7, p8, w11))
+
+/*
+** psel_p7_p8_p13_w12:
+** psel p7, p8, p13\.s\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p7_p8_p13_w12, svbool_t,
+ p7 = svpsel_b32 (p8, p13, w12),
+ p7 = svpsel_b32 (p8, p13, w12))
+
+/*
+** psel_p8_p13_p15_w15:
+** psel p8, p13, p15\.s\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p8_p13_p15_w15, svbool_t,
+ p8 = svpsel_b32 (p13, p15, w15),
+ p8 = svpsel_b32 (p13, p15, w15))
+
+/*
+** psel_p13_p15_p0_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p15, p0\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p15_p0_w16, svbool_t,
+ p13 = svpsel_b32 (p15, p0, w16),
+ p13 = svpsel_b32 (p15, p0, w16))
+
+/*
+** psel_p15_p13_p8_w12p1:
+** psel p15, p13, p8\.s\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_p15_p13_p8_w12p1, svbool_t,
+ p15 = svpsel_b32 (p13, p8, w12 + 1),
+ p15 = svpsel_b32 (p13, p8, w12 + 1))
+
+/*
+** psel_p13_p8_p7_w12p3:
+** psel p13, p8, p7\.s\[w12, 3\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p8_p7_w12p3, svbool_t,
+ p13 = svpsel_b32 (p8, p7, w12 + 3),
+ p13 = svpsel_b32 (p8, p7, w12 + 3))
+
+/*
+** psel_p0_p0_p0_w12p4:
+** add (w[0-9]+), w12, #?4
+** psel p0, p0, p0\.s\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p0_p0_w12p4, svbool_t,
+ p0 = svpsel_b32 (p0, p0, w12 + 4),
+ p0 = svpsel_b32 (p0, p0, w12 + 4))
+
+/*
+** psel_p15_p15_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p15, p15, p15\.s\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p15_p15_p15_w12m1, svbool_t,
+ p15 = svpsel_b32 (p15, p15, w12 - 1),
+ p15 = svpsel_b32 (p15, p15, w12 - 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b64.c
new file mode 100644
index 0000000..5d2d8b4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b64.c
@@ -0,0 +1,80 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** psel_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p2_p7_0, svbool_t,
+ p0 = svpsel_b64 (p2, p7, 0),
+ p0 = svpsel_b64 (p2, p7, 0))
+
+/*
+** psel_p2_p7_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p7, p8\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p7_p8_w11, svbool_t,
+ p2 = svpsel_b64 (p7, p8, w11),
+ p2 = svpsel_b64 (p7, p8, w11))
+
+/*
+** psel_p7_p8_p13_w12:
+** psel p7, p8, p13\.d\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p7_p8_p13_w12, svbool_t,
+ p7 = svpsel_b64 (p8, p13, w12),
+ p7 = svpsel_b64 (p8, p13, w12))
+
+/*
+** psel_p8_p13_p15_w15:
+** psel p8, p13, p15\.d\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p8_p13_p15_w15, svbool_t,
+ p8 = svpsel_b64 (p13, p15, w15),
+ p8 = svpsel_b64 (p13, p15, w15))
+
+/*
+** psel_p13_p15_p0_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p15, p0\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p15_p0_w16, svbool_t,
+ p13 = svpsel_b64 (p15, p0, w16),
+ p13 = svpsel_b64 (p15, p0, w16))
+
+/*
+** psel_p15_p13_p8_w12p1:
+** psel p15, p13, p8\.d\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_p15_p13_p8_w12p1, svbool_t,
+ p15 = svpsel_b64 (p13, p8, w12 + 1),
+ p15 = svpsel_b64 (p13, p8, w12 + 1))
+
+/*
+** psel_p0_p0_p0_w12p2:
+** add (w[0-9]+), w12, #?2
+** psel p0, p0, p0\.d\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p0_p0_w12p2, svbool_t,
+ p0 = svpsel_b64 (p0, p0, w12 + 2),
+ p0 = svpsel_b64 (p0, p0, w12 + 2))
+
+/*
+** psel_p15_p15_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p15, p15, p15\.d\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p15_p15_p15_w12m1, svbool_t,
+ p15 = svpsel_b64 (p15, p15, w12 - 1),
+ p15 = svpsel_b64 (p15, p15, w12 - 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b8.c
new file mode 100644
index 0000000..4387358
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_b8.c
@@ -0,0 +1,89 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** psel_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p2_p7_0, svbool_t,
+ p0 = svpsel_b8 (p2, p7, 0),
+ p0 = svpsel_b8 (p2, p7, 0))
+
+/*
+** psel_p2_p7_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p7, p8\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p7_p8_w11, svbool_t,
+ p2 = svpsel_b8 (p7, p8, w11),
+ p2 = svpsel_b8 (p7, p8, w11))
+
+/*
+** psel_p7_p8_p13_w12:
+** psel p7, p8, p13\.b\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p7_p8_p13_w12, svbool_t,
+ p7 = svpsel_b8 (p8, p13, w12),
+ p7 = svpsel_b8 (p8, p13, w12))
+
+/*
+** psel_p8_p13_p15_w15:
+** psel p8, p13, p15\.b\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p8_p13_p15_w15, svbool_t,
+ p8 = svpsel_b8 (p13, p15, w15),
+ p8 = svpsel_b8 (p13, p15, w15))
+
+/*
+** psel_p13_p15_p0_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p15, p0\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p15_p0_w16, svbool_t,
+ p13 = svpsel_b8 (p15, p0, w16),
+ p13 = svpsel_b8 (p15, p0, w16))
+
+/*
+** psel_p15_p13_p8_w12p1:
+** psel p15, p13, p8\.b\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_p15_p13_p8_w12p1, svbool_t,
+ p15 = svpsel_b8 (p13, p8, w12 + 1),
+ p15 = svpsel_b8 (p13, p8, w12 + 1))
+
+/*
+** psel_p13_p8_p7_w12p15:
+** psel p13, p8, p7\.b\[w12, 15\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p8_p7_w12p15, svbool_t,
+ p13 = svpsel_b8 (p8, p7, w12 + 15),
+ p13 = svpsel_b8 (p8, p7, w12 + 15))
+
+/*
+** psel_p0_p0_p0_w12p16:
+** add (w[0-9]+), w12, #?16
+** psel p0, p0, p0\.b\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p0_p0_w12p16, svbool_t,
+ p0 = svpsel_b8 (p0, p0, w12 + 16),
+ p0 = svpsel_b8 (p0, p0, w12 + 16))
+
+/*
+** psel_p15_p15_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p15, p15, p15\.b\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p15_p15_p15_w12m1, svbool_t,
+ p15 = svpsel_b8 (p15, p15, w12 - 1),
+ p15 = svpsel_b8 (p15, p15, w12 - 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c16.c
new file mode 100644
index 0000000..1cf45e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c16.c
@@ -0,0 +1,89 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** psel_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p2_p7_0, svcount_t,
+ p0 = svpsel_c16 (p2, p7, 0),
+ p0 = svpsel_c16 (p2, p7, 0))
+
+/*
+** psel_p2_p0_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p0, p8\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p0_p8_w11, svcount_t,
+ p2 = svpsel_c16 (p0, p8, w11),
+ p2 = svpsel_c16 (p0, p8, w11))
+
+/*
+** psel_p2_p13_p15_w12:
+** psel p2, p13, p15\.h\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p13_p15_w12, svcount_t,
+ p2 = svpsel_c16 (p13, p15, w12),
+ p2 = svpsel_c16 (p13, p15, w12))
+
+/*
+** psel_p0_p13_p15_w15:
+** psel p0, p13, p15\.h\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p13_p15_w15, svcount_t,
+ p0 = svpsel_c16 (p13, p15, w15),
+ p0 = svpsel_c16 (p13, p15, w15))
+
+/*
+** psel_p13_p0_p15_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p0, p15\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p0_p15_w16, svcount_t,
+ p13 = svpsel_c16 (p0, p15, w16),
+ p13 = svpsel_c16 (p0, p15, w16))
+
+/*
+** psel_p2_p13_p8_w12p1:
+** psel p2, p13, p8\.h\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p13_p8_w12p1, svcount_t,
+ p2 = svpsel_c16 (p13, p8, w12 + 1),
+ p2 = svpsel_c16 (p13, p8, w12 + 1))
+
+/*
+** psel_p13_p0_p7_w12p7:
+** psel p13, p0, p7\.h\[w12, 7\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p0_p7_w12p7, svcount_t,
+ p13 = svpsel_c16 (p0, p7, w12 + 7),
+ p13 = svpsel_c16 (p0, p7, w12 + 7))
+
+/*
+** psel_p0_p0_p15_w12p8:
+** add (w[0-9]+), w12, #?8
+** psel p0, p0, p15\.h\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p0_p15_w12p8, svcount_t,
+ p0 = svpsel_c16 (p0, p15, w12 + 8),
+ p0 = svpsel_c16 (p0, p15, w12 + 8))
+
+/*
+** psel_p13_p13_p7_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p13, p13, p7\.h\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p13_p7_w12m1, svcount_t,
+ p13 = svpsel_c16 (p13, p7, w12 - 1),
+ p13 = svpsel_c16 (p13, p7, w12 - 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c32.c
new file mode 100644
index 0000000..5246a03
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c32.c
@@ -0,0 +1,89 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** psel_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p2_p7_0, svcount_t,
+ p0 = svpsel_c32 (p2, p7, 0),
+ p0 = svpsel_c32 (p2, p7, 0))
+
+/*
+** psel_p2_p13_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p13, p8\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p13_p8_w11, svcount_t,
+ p2 = svpsel_c32 (p13, p8, w11),
+ p2 = svpsel_c32 (p13, p8, w11))
+
+/*
+** psel_p0_p13_p15_w12:
+** psel p0, p13, p15\.s\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p13_p15_w12, svcount_t,
+ p0 = svpsel_c32 (p13, p15, w12),
+ p0 = svpsel_c32 (p13, p15, w12))
+
+/*
+** psel_p2_p0_p15_w15:
+** psel p2, p0, p15\.s\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p0_p15_w15, svcount_t,
+ p2 = svpsel_c32 (p0, p15, w15),
+ p2 = svpsel_c32 (p0, p15, w15))
+
+/*
+** psel_p13_p0_p7_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p0, p7\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p0_p7_w16, svcount_t,
+ p13 = svpsel_c32 (p0, p7, w16),
+ p13 = svpsel_c32 (p0, p7, w16))
+
+/*
+** psel_p2_p13_p8_w12p1:
+** psel p2, p13, p8\.s\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p13_p8_w12p1, svcount_t,
+ p2 = svpsel_c32 (p13, p8, w12 + 1),
+ p2 = svpsel_c32 (p13, p8, w12 + 1))
+
+/*
+** psel_p13_p0_p7_w12p3:
+** psel p13, p0, p7\.s\[w12, 3\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p0_p7_w12p3, svcount_t,
+ p13 = svpsel_c32 (p0, p7, w12 + 3),
+ p13 = svpsel_c32 (p0, p7, w12 + 3))
+
+/*
+** psel_p0_p0_p7_w12p4:
+** add (w[0-9]+), w12, #?4
+** psel p0, p0, p7\.s\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p0_p7_w12p4, svcount_t,
+ p0 = svpsel_c32 (p0, p7, w12 + 4),
+ p0 = svpsel_c32 (p0, p7, w12 + 4))
+
+/*
+** psel_p13_p13_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p13, p13, p15\.s\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p13_p15_w12m1, svcount_t,
+ p13 = svpsel_c32 (p13, p15, w12 - 1),
+ p13 = svpsel_c32 (p13, p15, w12 - 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c64.c
new file mode 100644
index 0000000..97304fd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c64.c
@@ -0,0 +1,80 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** psel_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p2_p7_0, svcount_t,
+ p0 = svpsel_c64 (p2, p7, 0),
+ p0 = svpsel_c64 (p2, p7, 0))
+
+/*
+** psel_p2_p13_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p13, p8\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p13_p8_w11, svcount_t,
+ p2 = svpsel_c64 (p13, p8, w11),
+ p2 = svpsel_c64 (p13, p8, w11))
+
+/*
+** psel_p2_p0_p15_w12:
+** psel p2, p0, p15\.d\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p0_p15_w12, svcount_t,
+ p2 = svpsel_c64 (p0, p15, w12),
+ p2 = svpsel_c64 (p0, p15, w12))
+
+/*
+** psel_p0_p13_p15_w15:
+** psel p0, p13, p15\.d\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p13_p15_w15, svcount_t,
+ p0 = svpsel_c64 (p13, p15, w15),
+ p0 = svpsel_c64 (p13, p15, w15))
+
+/*
+** psel_p13_p0_p15_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p0, p15\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p0_p15_w16, svcount_t,
+ p13 = svpsel_c64 (p0, p15, w16),
+ p13 = svpsel_c64 (p0, p15, w16))
+
+/*
+** psel_p2_p13_p8_w12p1:
+** psel p2, p13, p8\.d\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p13_p8_w12p1, svcount_t,
+ p2 = svpsel_c64 (p13, p8, w12 + 1),
+ p2 = svpsel_c64 (p13, p8, w12 + 1))
+
+/*
+** psel_p0_p0_p8_w12p2:
+** add (w[0-9]+), w12, #?2
+** psel p0, p0, p8\.d\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p0_p8_w12p2, svcount_t,
+ p0 = svpsel_c64 (p0, p8, w12 + 2),
+ p0 = svpsel_c64 (p0, p8, w12 + 2))
+
+/*
+** psel_p13_p13_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p13, p13, p15\.d\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p13_p15_w12m1, svcount_t,
+ p13 = svpsel_c64 (p13, p15, w12 - 1),
+ p13 = svpsel_c64 (p13, p15, w12 - 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c8.c
new file mode 100644
index 0000000..95973a1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/psel_c8.c
@@ -0,0 +1,89 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** psel_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p2_p7_0, svcount_t,
+ p0 = svpsel_c8 (p2, p7, 0),
+ p0 = svpsel_c8 (p2, p7, 0))
+
+/*
+** psel_p2_p0_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p0, p8\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p0_p8_w11, svcount_t,
+ p2 = svpsel_c8 (p0, p8, w11),
+ p2 = svpsel_c8 (p0, p8, w11))
+
+/*
+** psel_p0_p13_p15_w12:
+** psel p0, p13, p15\.b\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p13_p15_w12, svcount_t,
+ p0 = svpsel_c8 (p13, p15, w12),
+ p0 = svpsel_c8 (p13, p15, w12))
+
+/*
+** psel_p13_p0_p8_w15:
+** psel p13, p0, p8\.b\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p0_p8_w15, svcount_t,
+ p13 = svpsel_c8 (p0, p8, w15),
+ p13 = svpsel_c8 (p0, p8, w15))
+
+/*
+** psel_p2_p13_p7_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p2, p13, p7\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p2_p13_p7_w16, svcount_t,
+ p2 = svpsel_c8 (p13, p7, w16),
+ p2 = svpsel_c8 (p13, p7, w16))
+
+/*
+** psel_p0_p13_p8_w12p1:
+** psel p0, p13, p8\.b\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p13_p8_w12p1, svcount_t,
+ p0 = svpsel_c8 (p13, p8, w12 + 1),
+ p0 = svpsel_c8 (p13, p8, w12 + 1))
+
+/*
+** psel_p13_p2_p7_w12p15:
+** psel p13, p2, p7\.b\[w12, 15\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p2_p7_w12p15, svcount_t,
+ p13 = svpsel_c8 (p2, p7, w12 + 15),
+ p13 = svpsel_c8 (p2, p7, w12 + 15))
+
+/*
+** psel_p0_p0_p15_w12p16:
+** add (w[0-9]+), w12, #?16
+** psel p0, p0, p15\.b\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p0_p0_p15_w12p16, svcount_t,
+ p0 = svpsel_c8 (p0, p15, w12 + 16),
+ p0 = svpsel_c8 (p0, p15, w12 + 16))
+
+/*
+** psel_p13_p13_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p13, p13, p15\.b\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_p13_p13_p15_w12m1, svcount_t,
+ p13 = svpsel_c8 (p13, p15, w12 - 1),
+ p13 = svpsel_c8 (p13, p15, w12 - 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c16.c
new file mode 100644
index 0000000..3e157bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c16.c
@@ -0,0 +1,41 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ptrue_pn0:
+** ptrue pn([8-9]|1[0-5])\.h
+** mov p0\.b, p\1\.b
+** ret
+*/
+TEST_PN (ptrue_pn0,
+ pn0 = svptrue_c16 (),
+ pn0 = svptrue_c16 ())
+
+/*
+** ptrue_pn7:
+** ptrue pn([8-9]|1[0-5])\.h
+** mov p7\.b, p\1\.b
+** ret
+*/
+TEST_PN (ptrue_pn7,
+ pn7 = svptrue_c16 (),
+ pn7 = svptrue_c16 ())
+
+/*
+** ptrue_pn8:
+** ptrue pn8\.h
+** ret
+*/
+TEST_PN (ptrue_pn8,
+ pn8 = svptrue_c16 (),
+ pn8 = svptrue_c16 ())
+
+/*
+** ptrue_pn15:
+** ptrue pn15\.h
+** ret
+*/
+TEST_PN (ptrue_pn15,
+ pn15 = svptrue_c16 (),
+ pn15 = svptrue_c16 ())
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c32.c
new file mode 100644
index 0000000..49ad1ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c32.c
@@ -0,0 +1,41 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ptrue_pn0:
+** ptrue pn([8-9]|1[0-5])\.s
+** mov p0\.b, p\1\.b
+** ret
+*/
+TEST_PN (ptrue_pn0,
+ pn0 = svptrue_c32 (),
+ pn0 = svptrue_c32 ())
+
+/*
+** ptrue_pn7:
+** ptrue pn([8-9]|1[0-5])\.s
+** mov p7\.b, p\1\.b
+** ret
+*/
+TEST_PN (ptrue_pn7,
+ pn7 = svptrue_c32 (),
+ pn7 = svptrue_c32 ())
+
+/*
+** ptrue_pn8:
+** ptrue pn8\.s
+** ret
+*/
+TEST_PN (ptrue_pn8,
+ pn8 = svptrue_c32 (),
+ pn8 = svptrue_c32 ())
+
+/*
+** ptrue_pn15:
+** ptrue pn15\.s
+** ret
+*/
+TEST_PN (ptrue_pn15,
+ pn15 = svptrue_c32 (),
+ pn15 = svptrue_c32 ())
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c64.c
new file mode 100644
index 0000000..746b892
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c64.c
@@ -0,0 +1,41 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ptrue_pn0:
+** ptrue pn([8-9]|1[0-5])\.d
+** mov p0\.b, p\1\.b
+** ret
+*/
+TEST_PN (ptrue_pn0,
+ pn0 = svptrue_c64 (),
+ pn0 = svptrue_c64 ())
+
+/*
+** ptrue_pn7:
+** ptrue pn([8-9]|1[0-5])\.d
+** mov p7\.b, p\1\.b
+** ret
+*/
+TEST_PN (ptrue_pn7,
+ pn7 = svptrue_c64 (),
+ pn7 = svptrue_c64 ())
+
+/*
+** ptrue_pn8:
+** ptrue pn8\.d
+** ret
+*/
+TEST_PN (ptrue_pn8,
+ pn8 = svptrue_c64 (),
+ pn8 = svptrue_c64 ())
+
+/*
+** ptrue_pn15:
+** ptrue pn15\.d
+** ret
+*/
+TEST_PN (ptrue_pn15,
+ pn15 = svptrue_c64 (),
+ pn15 = svptrue_c64 ())
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c8.c
new file mode 100644
index 0000000..60b4d72
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/ptrue_c8.c
@@ -0,0 +1,41 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** ptrue_pn0:
+** ptrue pn([8-9]|1[0-5])\.b
+** mov p0\.b, p\1\.b
+** ret
+*/
+TEST_PN (ptrue_pn0,
+ pn0 = svptrue_c8 (),
+ pn0 = svptrue_c8 ())
+
+/*
+** ptrue_pn7:
+** ptrue pn([8-9]|1[0-5])\.b
+** mov p7\.b, p\1\.b
+** ret
+*/
+TEST_PN (ptrue_pn7,
+ pn7 = svptrue_c8 (),
+ pn7 = svptrue_c8 ())
+
+/*
+** ptrue_pn8:
+** ptrue pn8\.b
+** ret
+*/
+TEST_PN (ptrue_pn8,
+ pn8 = svptrue_c8 (),
+ pn8 = svptrue_c8 ())
+
+/*
+** ptrue_pn15:
+** ptrue pn15\.b
+** ret
+*/
+TEST_PN (ptrue_pn15,
+ pn15 = svptrue_c8 (),
+ pn15 = svptrue_c8 ())
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s16_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s16_s32_x2.c
new file mode 100644
index 0000000..aa1b76f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s16_s32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvt_z0_z0:
+** sqcvt z0\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z0_z0, svint32x2_t, svint16_t,
+ z0_res = svqcvt_s16_s32_x2 (z0),
+ z0_res = svqcvt_s16 (z0))
+
+/*
+** qcvt_z0_z6:
+** sqcvt z0\.h, {z6\.s - z7\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z0_z6, svint32x2_t, svint16_t,
+ z0_res = svqcvt_s16_s32_x2 (z6),
+ z0_res = svqcvt_s16 (z6))
+
+/*
+** qcvt_z0_z29:
+** mov [^\n]+
+** mov [^\n]+
+** sqcvt z0\.h, [^\n]+
+** ret
+*/
+TEST_X2_NARROW (qcvt_z0_z29, svint32x2_t, svint16_t,
+ z0_res = svqcvt_s16_s32_x2 (z29),
+ z0_res = svqcvt_s16 (z29))
+
+/*
+** qcvt_z5_z0:
+** sqcvt z5\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z5_z0, svint32x2_t, svint16_t,
+ z5 = svqcvt_s16_s32_x2 (z0),
+ z5 = svqcvt_s16 (z0))
+
+/*
+** qcvt_z22_z16:
+** sqcvt z22\.h, {z16\.s - z17\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z22_z16, svint32x2_t, svint16_t,
+ z22 = svqcvt_s16_s32_x2 (z16),
+ z22 = svqcvt_s16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s16_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s16_s64_x4.c
new file mode 100644
index 0000000..4d0d768
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s16_s64_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvt_z0_z0:
+** sqcvt z0\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z0, svint64x4_t, svint16_t,
+ z0_res = svqcvt_s16_s64_x4 (z0),
+ z0_res = svqcvt_s16 (z0))
+
+/*
+** qcvt_z0_z4:
+** sqcvt z0\.h, {z4\.d - z7\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z4, svint64x4_t, svint16_t,
+ z0_res = svqcvt_s16_s64_x4 (z4),
+ z0_res = svqcvt_s16 (z4))
+
+/*
+** qcvt_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvt z0\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z21, svint64x4_t, svint16_t,
+ z0_res = svqcvt_s16_s64_x4 (z21),
+ z0_res = svqcvt_s16 (z21))
+
+/*
+** qcvt_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvt z25\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z26, svint64x4_t, svint16_t,
+ z25 = svqcvt_s16_s64_x4 (z26),
+ z25 = svqcvt_s16 (z26))
+
+/*
+** qcvt_z25_z0:
+** sqcvt z25\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z0, svint64x4_t, svint16_t,
+ z25 = svqcvt_s16_s64_x4 (z0),
+ z25 = svqcvt_s16 (z0))
+
+/*
+** qcvt_z22_z16:
+** sqcvt z22\.h, {z16\.d - z19\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z22_z16, svint64x4_t, svint16_t,
+ z22_res = svqcvt_s16_s64_x4 (z16),
+ z22_res = svqcvt_s16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s8_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s8_s32_x4.c
new file mode 100644
index 0000000..2b568be2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_s8_s32_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvt_z0_z0:
+** sqcvt z0\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z0, svint32x4_t, svint8_t,
+ z0_res = svqcvt_s8_s32_x4 (z0),
+ z0_res = svqcvt_s8 (z0))
+
+/*
+** qcvt_z0_z4:
+** sqcvt z0\.b, {z4\.s - z7\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z4, svint32x4_t, svint8_t,
+ z0_res = svqcvt_s8_s32_x4 (z4),
+ z0_res = svqcvt_s8 (z4))
+
+/*
+** qcvt_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvt z0\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z21, svint32x4_t, svint8_t,
+ z0_res = svqcvt_s8_s32_x4 (z21),
+ z0_res = svqcvt_s8 (z21))
+
+/*
+** qcvt_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvt z25\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z26, svint32x4_t, svint8_t,
+ z25 = svqcvt_s8_s32_x4 (z26),
+ z25 = svqcvt_s8 (z26))
+
+/*
+** qcvt_z25_z0:
+** sqcvt z25\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z0, svint32x4_t, svint8_t,
+ z25 = svqcvt_s8_s32_x4 (z0),
+ z25 = svqcvt_s8 (z0))
+
+/*
+** qcvt_z22_z16:
+** sqcvt z22\.b, {z16\.s - z19\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z22_z16, svint32x4_t, svint8_t,
+ z22_res = svqcvt_s8_s32_x4 (z16),
+ z22_res = svqcvt_s8 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_s32_x2.c
new file mode 100644
index 0000000..e87e9e4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_s32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvt_z0_z0:
+** sqcvtu z0\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z0_z0, svint32x2_t, svuint16_t,
+ z0_res = svqcvt_u16_s32_x2 (z0),
+ z0_res = svqcvt_u16 (z0))
+
+/*
+** qcvt_z0_z6:
+** sqcvtu z0\.h, {z6\.s - z7\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z0_z6, svint32x2_t, svuint16_t,
+ z0_res = svqcvt_u16_s32_x2 (z6),
+ z0_res = svqcvt_u16 (z6))
+
+/*
+** qcvt_z0_z29:
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtu z0\.h, [^\n]+
+** ret
+*/
+TEST_X2_NARROW (qcvt_z0_z29, svint32x2_t, svuint16_t,
+ z0_res = svqcvt_u16_s32_x2 (z29),
+ z0_res = svqcvt_u16 (z29))
+
+/*
+** qcvt_z5_z0:
+** sqcvtu z5\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z5_z0, svint32x2_t, svuint16_t,
+ z5 = svqcvt_u16_s32_x2 (z0),
+ z5 = svqcvt_u16 (z0))
+
+/*
+** qcvt_z22_z16:
+** sqcvtu z22\.h, {z16\.s - z17\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z22_z16, svint32x2_t, svuint16_t,
+ z22 = svqcvt_u16_s32_x2 (z16),
+ z22 = svqcvt_u16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_s64_x4.c
new file mode 100644
index 0000000..288f966
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_s64_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvt_z0_z0:
+** sqcvtu z0\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z0, svint64x4_t, svuint16_t,
+ z0_res = svqcvt_u16_s64_x4 (z0),
+ z0_res = svqcvt_u16 (z0))
+
+/*
+** qcvt_z0_z4:
+** sqcvtu z0\.h, {z4\.d - z7\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z4, svint64x4_t, svuint16_t,
+ z0_res = svqcvt_u16_s64_x4 (z4),
+ z0_res = svqcvt_u16 (z4))
+
+/*
+** qcvt_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtu z0\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z21, svint64x4_t, svuint16_t,
+ z0_res = svqcvt_u16_s64_x4 (z21),
+ z0_res = svqcvt_u16 (z21))
+
+/*
+** qcvt_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtu z25\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z26, svint64x4_t, svuint16_t,
+ z25 = svqcvt_u16_s64_x4 (z26),
+ z25 = svqcvt_u16 (z26))
+
+/*
+** qcvt_z25_z0:
+** sqcvtu z25\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z0, svint64x4_t, svuint16_t,
+ z25 = svqcvt_u16_s64_x4 (z0),
+ z25 = svqcvt_u16 (z0))
+
+/*
+** qcvt_z22_z16:
+** sqcvtu z22\.h, {z16\.d - z19\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z22_z16, svint64x4_t, svuint16_t,
+ z22_res = svqcvt_u16_s64_x4 (z16),
+ z22_res = svqcvt_u16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_u32_x2.c
new file mode 100644
index 0000000..77c2848
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_u32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvt_z0_z0:
+** uqcvt z0\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z0_z0, svuint32x2_t, svuint16_t,
+ z0_res = svqcvt_u16_u32_x2 (z0),
+ z0_res = svqcvt_u16 (z0))
+
+/*
+** qcvt_z0_z6:
+** uqcvt z0\.h, {z6\.s - z7\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z0_z6, svuint32x2_t, svuint16_t,
+ z0_res = svqcvt_u16_u32_x2 (z6),
+ z0_res = svqcvt_u16 (z6))
+
+/*
+** qcvt_z0_z29:
+** mov [^\n]+
+** mov [^\n]+
+** uqcvt z0\.h, [^\n]+
+** ret
+*/
+TEST_X2_NARROW (qcvt_z0_z29, svuint32x2_t, svuint16_t,
+ z0_res = svqcvt_u16_u32_x2 (z29),
+ z0_res = svqcvt_u16 (z29))
+
+/*
+** qcvt_z5_z0:
+** uqcvt z5\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z5_z0, svuint32x2_t, svuint16_t,
+ z5 = svqcvt_u16_u32_x2 (z0),
+ z5 = svqcvt_u16 (z0))
+
+/*
+** qcvt_z22_z16:
+** uqcvt z22\.h, {z16\.s - z17\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvt_z22_z16, svuint32x2_t, svuint16_t,
+ z22 = svqcvt_u16_u32_x2 (z16),
+ z22 = svqcvt_u16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_u64_x4.c
new file mode 100644
index 0000000..4ff7275
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u16_u64_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvt_z0_z0:
+** uqcvt z0\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z0, svuint64x4_t, svuint16_t,
+ z0_res = svqcvt_u16_u64_x4 (z0),
+ z0_res = svqcvt_u16 (z0))
+
+/*
+** qcvt_z0_z4:
+** uqcvt z0\.h, {z4\.d - z7\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z4, svuint64x4_t, svuint16_t,
+ z0_res = svqcvt_u16_u64_x4 (z4),
+ z0_res = svqcvt_u16 (z4))
+
+/*
+** qcvt_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqcvt z0\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z21, svuint64x4_t, svuint16_t,
+ z0_res = svqcvt_u16_u64_x4 (z21),
+ z0_res = svqcvt_u16 (z21))
+
+/*
+** qcvt_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqcvt z25\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z26, svuint64x4_t, svuint16_t,
+ z25 = svqcvt_u16_u64_x4 (z26),
+ z25 = svqcvt_u16 (z26))
+
+/*
+** qcvt_z25_z0:
+** uqcvt z25\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z0, svuint64x4_t, svuint16_t,
+ z25 = svqcvt_u16_u64_x4 (z0),
+ z25 = svqcvt_u16 (z0))
+
+/*
+** qcvt_z22_z16:
+** uqcvt z22\.h, {z16\.d - z19\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z22_z16, svuint64x4_t, svuint16_t,
+ z22_res = svqcvt_u16_u64_x4 (z16),
+ z22_res = svqcvt_u16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u8_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u8_s32_x4.c
new file mode 100644
index 0000000..c9848ba
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u8_s32_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvt_z0_z0:
+** sqcvtu z0\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z0, svint32x4_t, svuint8_t,
+ z0_res = svqcvt_u8_s32_x4 (z0),
+ z0_res = svqcvt_u8 (z0))
+
+/*
+** qcvt_z0_z4:
+** sqcvtu z0\.b, {z4\.s - z7\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z4, svint32x4_t, svuint8_t,
+ z0_res = svqcvt_u8_s32_x4 (z4),
+ z0_res = svqcvt_u8 (z4))
+
+/*
+** qcvt_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtu z0\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z21, svint32x4_t, svuint8_t,
+ z0_res = svqcvt_u8_s32_x4 (z21),
+ z0_res = svqcvt_u8 (z21))
+
+/*
+** qcvt_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtu z25\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z26, svint32x4_t, svuint8_t,
+ z25 = svqcvt_u8_s32_x4 (z26),
+ z25 = svqcvt_u8 (z26))
+
+/*
+** qcvt_z25_z0:
+** sqcvtu z25\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z0, svint32x4_t, svuint8_t,
+ z25 = svqcvt_u8_s32_x4 (z0),
+ z25 = svqcvt_u8 (z0))
+
+/*
+** qcvt_z22_z16:
+** sqcvtu z22\.b, {z16\.s - z19\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z22_z16, svint32x4_t, svuint8_t,
+ z22_res = svqcvt_u8_s32_x4 (z16),
+ z22_res = svqcvt_u8 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u8_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u8_u32_x4.c
new file mode 100644
index 0000000..8eb7fb6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvt_u8_u32_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvt_z0_z0:
+** uqcvt z0\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z0, svuint32x4_t, svuint8_t,
+ z0_res = svqcvt_u8_u32_x4 (z0),
+ z0_res = svqcvt_u8 (z0))
+
+/*
+** qcvt_z0_z4:
+** uqcvt z0\.b, {z4\.s - z7\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z4, svuint32x4_t, svuint8_t,
+ z0_res = svqcvt_u8_u32_x4 (z4),
+ z0_res = svqcvt_u8 (z4))
+
+/*
+** qcvt_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqcvt z0\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z0_z21, svuint32x4_t, svuint8_t,
+ z0_res = svqcvt_u8_u32_x4 (z21),
+ z0_res = svqcvt_u8 (z21))
+
+/*
+** qcvt_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqcvt z25\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z26, svuint32x4_t, svuint8_t,
+ z25 = svqcvt_u8_u32_x4 (z26),
+ z25 = svqcvt_u8 (z26))
+
+/*
+** qcvt_z25_z0:
+** uqcvt z25\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z25_z0, svuint32x4_t, svuint8_t,
+ z25 = svqcvt_u8_u32_x4 (z0),
+ z25 = svqcvt_u8 (z0))
+
+/*
+** qcvt_z22_z16:
+** uqcvt z22\.b, {z16\.s - z19\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvt_z22_z16, svuint32x4_t, svuint8_t,
+ z22_res = svqcvt_u8_u32_x4 (z16),
+ z22_res = svqcvt_u8 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s16_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s16_s32_x2.c
new file mode 100644
index 0000000..5cee69f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s16_s32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvtn_z0_z0:
+** sqcvtn z0\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z0_z0, svint32x2_t, svint16_t,
+ z0_res = svqcvtn_s16_s32_x2 (z0),
+ z0_res = svqcvtn_s16 (z0))
+
+/*
+** qcvtn_z0_z6:
+** sqcvtn z0\.h, {z6\.s - z7\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z0_z6, svint32x2_t, svint16_t,
+ z0_res = svqcvtn_s16_s32_x2 (z6),
+ z0_res = svqcvtn_s16 (z6))
+
+/*
+** qcvtn_z0_z29:
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtn z0\.h, [^\n]+
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z0_z29, svint32x2_t, svint16_t,
+ z0_res = svqcvtn_s16_s32_x2 (z29),
+ z0_res = svqcvtn_s16 (z29))
+
+/*
+** qcvtn_z5_z0:
+** sqcvtn z5\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z5_z0, svint32x2_t, svint16_t,
+ z5 = svqcvtn_s16_s32_x2 (z0),
+ z5 = svqcvtn_s16 (z0))
+
+/*
+** qcvtn_z22_z16:
+** sqcvtn z22\.h, {z16\.s - z17\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z22_z16, svint32x2_t, svint16_t,
+ z22 = svqcvtn_s16_s32_x2 (z16),
+ z22 = svqcvtn_s16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s16_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s16_s64_x4.c
new file mode 100644
index 0000000..fedb0a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s16_s64_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvtn_z0_z0:
+** sqcvtn z0\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z0, svint64x4_t, svint16_t,
+ z0_res = svqcvtn_s16_s64_x4 (z0),
+ z0_res = svqcvtn_s16 (z0))
+
+/*
+** qcvtn_z0_z4:
+** sqcvtn z0\.h, {z4\.d - z7\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z4, svint64x4_t, svint16_t,
+ z0_res = svqcvtn_s16_s64_x4 (z4),
+ z0_res = svqcvtn_s16 (z4))
+
+/*
+** qcvtn_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtn z0\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z21, svint64x4_t, svint16_t,
+ z0_res = svqcvtn_s16_s64_x4 (z21),
+ z0_res = svqcvtn_s16 (z21))
+
+/*
+** qcvtn_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtn z25\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z26, svint64x4_t, svint16_t,
+ z25 = svqcvtn_s16_s64_x4 (z26),
+ z25 = svqcvtn_s16 (z26))
+
+/*
+** qcvtn_z25_z0:
+** sqcvtn z25\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z0, svint64x4_t, svint16_t,
+ z25 = svqcvtn_s16_s64_x4 (z0),
+ z25 = svqcvtn_s16 (z0))
+
+/*
+** qcvtn_z22_z16:
+** sqcvtn z22\.h, {z16\.d - z19\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z22_z16, svint64x4_t, svint16_t,
+ z22_res = svqcvtn_s16_s64_x4 (z16),
+ z22_res = svqcvtn_s16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s8_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s8_s32_x4.c
new file mode 100644
index 0000000..a03e065
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_s8_s32_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvtn_z0_z0:
+** sqcvtn z0\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z0, svint32x4_t, svint8_t,
+ z0_res = svqcvtn_s8_s32_x4 (z0),
+ z0_res = svqcvtn_s8 (z0))
+
+/*
+** qcvtn_z0_z4:
+** sqcvtn z0\.b, {z4\.s - z7\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z4, svint32x4_t, svint8_t,
+ z0_res = svqcvtn_s8_s32_x4 (z4),
+ z0_res = svqcvtn_s8 (z4))
+
+/*
+** qcvtn_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtn z0\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z21, svint32x4_t, svint8_t,
+ z0_res = svqcvtn_s8_s32_x4 (z21),
+ z0_res = svqcvtn_s8 (z21))
+
+/*
+** qcvtn_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtn z25\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z26, svint32x4_t, svint8_t,
+ z25 = svqcvtn_s8_s32_x4 (z26),
+ z25 = svqcvtn_s8 (z26))
+
+/*
+** qcvtn_z25_z0:
+** sqcvtn z25\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z0, svint32x4_t, svint8_t,
+ z25 = svqcvtn_s8_s32_x4 (z0),
+ z25 = svqcvtn_s8 (z0))
+
+/*
+** qcvtn_z22_z16:
+** sqcvtn z22\.b, {z16\.s - z19\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z22_z16, svint32x4_t, svint8_t,
+ z22_res = svqcvtn_s8_s32_x4 (z16),
+ z22_res = svqcvtn_s8 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_s32_x2.c
new file mode 100644
index 0000000..3033ac3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_s32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvtn_z0_z0:
+** sqcvtun z0\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z0_z0, svint32x2_t, svuint16_t,
+ z0_res = svqcvtn_u16_s32_x2 (z0),
+ z0_res = svqcvtn_u16 (z0))
+
+/*
+** qcvtn_z0_z6:
+** sqcvtun z0\.h, {z6\.s - z7\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z0_z6, svint32x2_t, svuint16_t,
+ z0_res = svqcvtn_u16_s32_x2 (z6),
+ z0_res = svqcvtn_u16 (z6))
+
+/*
+** qcvtn_z0_z29:
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtun z0\.h, [^\n]+
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z0_z29, svint32x2_t, svuint16_t,
+ z0_res = svqcvtn_u16_s32_x2 (z29),
+ z0_res = svqcvtn_u16 (z29))
+
+/*
+** qcvtn_z5_z0:
+** sqcvtun z5\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z5_z0, svint32x2_t, svuint16_t,
+ z5 = svqcvtn_u16_s32_x2 (z0),
+ z5 = svqcvtn_u16 (z0))
+
+/*
+** qcvtn_z22_z16:
+** sqcvtun z22\.h, {z16\.s - z17\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z22_z16, svint32x2_t, svuint16_t,
+ z22 = svqcvtn_u16_s32_x2 (z16),
+ z22 = svqcvtn_u16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_s64_x4.c
new file mode 100644
index 0000000..384c154
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_s64_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvtn_z0_z0:
+** sqcvtun z0\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z0, svint64x4_t, svuint16_t,
+ z0_res = svqcvtn_u16_s64_x4 (z0),
+ z0_res = svqcvtn_u16 (z0))
+
+/*
+** qcvtn_z0_z4:
+** sqcvtun z0\.h, {z4\.d - z7\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z4, svint64x4_t, svuint16_t,
+ z0_res = svqcvtn_u16_s64_x4 (z4),
+ z0_res = svqcvtn_u16 (z4))
+
+/*
+** qcvtn_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtun z0\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z21, svint64x4_t, svuint16_t,
+ z0_res = svqcvtn_u16_s64_x4 (z21),
+ z0_res = svqcvtn_u16 (z21))
+
+/*
+** qcvtn_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtun z25\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z26, svint64x4_t, svuint16_t,
+ z25 = svqcvtn_u16_s64_x4 (z26),
+ z25 = svqcvtn_u16 (z26))
+
+/*
+** qcvtn_z25_z0:
+** sqcvtun z25\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z0, svint64x4_t, svuint16_t,
+ z25 = svqcvtn_u16_s64_x4 (z0),
+ z25 = svqcvtn_u16 (z0))
+
+/*
+** qcvtn_z22_z16:
+** sqcvtun z22\.h, {z16\.d - z19\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z22_z16, svint64x4_t, svuint16_t,
+ z22_res = svqcvtn_u16_s64_x4 (z16),
+ z22_res = svqcvtn_u16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_u32_x2.c
new file mode 100644
index 0000000..8416aec
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_u32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvtn_z0_z0:
+** uqcvtn z0\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z0_z0, svuint32x2_t, svuint16_t,
+ z0_res = svqcvtn_u16_u32_x2 (z0),
+ z0_res = svqcvtn_u16 (z0))
+
+/*
+** qcvtn_z0_z6:
+** uqcvtn z0\.h, {z6\.s - z7\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z0_z6, svuint32x2_t, svuint16_t,
+ z0_res = svqcvtn_u16_u32_x2 (z6),
+ z0_res = svqcvtn_u16 (z6))
+
+/*
+** qcvtn_z0_z29:
+** mov [^\n]+
+** mov [^\n]+
+** uqcvtn z0\.h, [^\n]+
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z0_z29, svuint32x2_t, svuint16_t,
+ z0_res = svqcvtn_u16_u32_x2 (z29),
+ z0_res = svqcvtn_u16 (z29))
+
+/*
+** qcvtn_z5_z0:
+** uqcvtn z5\.h, {z0\.s - z1\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z5_z0, svuint32x2_t, svuint16_t,
+ z5 = svqcvtn_u16_u32_x2 (z0),
+ z5 = svqcvtn_u16 (z0))
+
+/*
+** qcvtn_z22_z16:
+** uqcvtn z22\.h, {z16\.s - z17\.s}
+** ret
+*/
+TEST_X2_NARROW (qcvtn_z22_z16, svuint32x2_t, svuint16_t,
+ z22 = svqcvtn_u16_u32_x2 (z16),
+ z22 = svqcvtn_u16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_u64_x4.c
new file mode 100644
index 0000000..7de742b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u16_u64_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvtn_z0_z0:
+** uqcvtn z0\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z0, svuint64x4_t, svuint16_t,
+ z0_res = svqcvtn_u16_u64_x4 (z0),
+ z0_res = svqcvtn_u16 (z0))
+
+/*
+** qcvtn_z0_z4:
+** uqcvtn z0\.h, {z4\.d - z7\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z4, svuint64x4_t, svuint16_t,
+ z0_res = svqcvtn_u16_u64_x4 (z4),
+ z0_res = svqcvtn_u16 (z4))
+
+/*
+** qcvtn_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqcvtn z0\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z21, svuint64x4_t, svuint16_t,
+ z0_res = svqcvtn_u16_u64_x4 (z21),
+ z0_res = svqcvtn_u16 (z21))
+
+/*
+** qcvtn_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqcvtn z25\.h, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z26, svuint64x4_t, svuint16_t,
+ z25 = svqcvtn_u16_u64_x4 (z26),
+ z25 = svqcvtn_u16 (z26))
+
+/*
+** qcvtn_z25_z0:
+** uqcvtn z25\.h, {z0\.d - z3\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z0, svuint64x4_t, svuint16_t,
+ z25 = svqcvtn_u16_u64_x4 (z0),
+ z25 = svqcvtn_u16 (z0))
+
+/*
+** qcvtn_z22_z16:
+** uqcvtn z22\.h, {z16\.d - z19\.d}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z22_z16, svuint64x4_t, svuint16_t,
+ z22_res = svqcvtn_u16_u64_x4 (z16),
+ z22_res = svqcvtn_u16 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u8_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u8_s32_x4.c
new file mode 100644
index 0000000..6228223
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u8_s32_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvtn_z0_z0:
+** sqcvtun z0\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z0, svint32x4_t, svuint8_t,
+ z0_res = svqcvtn_u8_s32_x4 (z0),
+ z0_res = svqcvtn_u8 (z0))
+
+/*
+** qcvtn_z0_z4:
+** sqcvtun z0\.b, {z4\.s - z7\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z4, svint32x4_t, svuint8_t,
+ z0_res = svqcvtn_u8_s32_x4 (z4),
+ z0_res = svqcvtn_u8 (z4))
+
+/*
+** qcvtn_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtun z0\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z21, svint32x4_t, svuint8_t,
+ z0_res = svqcvtn_u8_s32_x4 (z21),
+ z0_res = svqcvtn_u8 (z21))
+
+/*
+** qcvtn_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqcvtun z25\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z26, svint32x4_t, svuint8_t,
+ z25 = svqcvtn_u8_s32_x4 (z26),
+ z25 = svqcvtn_u8 (z26))
+
+/*
+** qcvtn_z25_z0:
+** sqcvtun z25\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z0, svint32x4_t, svuint8_t,
+ z25 = svqcvtn_u8_s32_x4 (z0),
+ z25 = svqcvtn_u8 (z0))
+
+/*
+** qcvtn_z22_z16:
+** sqcvtun z22\.b, {z16\.s - z19\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z22_z16, svint32x4_t, svuint8_t,
+ z22_res = svqcvtn_u8_s32_x4 (z16),
+ z22_res = svqcvtn_u8 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u8_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u8_u32_x4.c
new file mode 100644
index 0000000..9c721ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qcvtn_u8_u32_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qcvtn_z0_z0:
+** uqcvtn z0\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z0, svuint32x4_t, svuint8_t,
+ z0_res = svqcvtn_u8_u32_x4 (z0),
+ z0_res = svqcvtn_u8 (z0))
+
+/*
+** qcvtn_z0_z4:
+** uqcvtn z0\.b, {z4\.s - z7\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z4, svuint32x4_t, svuint8_t,
+ z0_res = svqcvtn_u8_u32_x4 (z4),
+ z0_res = svqcvtn_u8 (z4))
+
+/*
+** qcvtn_z0_z21:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqcvtn z0\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z0_z21, svuint32x4_t, svuint8_t,
+ z0_res = svqcvtn_u8_u32_x4 (z21),
+ z0_res = svqcvtn_u8 (z21))
+
+/*
+** qcvtn_z25_z26:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqcvtn z25\.b, [^\n]+
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z26, svuint32x4_t, svuint8_t,
+ z25 = svqcvtn_u8_u32_x4 (z26),
+ z25 = svqcvtn_u8 (z26))
+
+/*
+** qcvtn_z25_z0:
+** uqcvtn z25\.b, {z0\.s - z3\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z25_z0, svuint32x4_t, svuint8_t,
+ z25 = svqcvtn_u8_u32_x4 (z0),
+ z25 = svqcvtn_u8 (z0))
+
+/*
+** qcvtn_z22_z16:
+** uqcvtn z22\.b, {z16\.s - z19\.s}
+** ret
+*/
+TEST_X4_NARROW (qcvtn_z22_z16, svuint32x4_t, svuint8_t,
+ z22_res = svqcvtn_u8_u32_x4 (z16),
+ z22_res = svqcvtn_u8 (z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s16_x2.c
new file mode 100644
index 0000000..f8585ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s16_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qdmulh_z0_z0_z4:
+** sqdmulh {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z4, svint16x2_t, z0,
+ svqdmulh_s16_x2 (z0, z4),
+ svqdmulh (z0, z4))
+
+/*
+** qdmulh_z0_z4_z0:
+** sqdmulh {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z0, svint16x2_t, z0,
+ svqdmulh_s16_x2 (z4, z0),
+ svqdmulh (z4, z0))
+
+/*
+** qdmulh_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.h - z29\.h}
+** |
+** sqdmulh [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z28, svint16x2_t, z0,
+ svqdmulh_s16_x2 (z4, z28),
+ svqdmulh (z4, z28))
+
+/*
+** qdmulh_z18_z18_z4:
+** sqdmulh {z18\.h - z19\.h}, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (qdmulh_z18_z18_z4, svint16x2_t, z18,
+ svqdmulh_s16_x2 (z18, z4),
+ svqdmulh (z18, z4))
+
+/*
+** qdmulh_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z23_z23_z18, svint16x2_t, z23,
+ svqdmulh_s16_x2 (z23, z18),
+ svqdmulh (z23, z18))
+
+/*
+** qdmulh_z28_z28_z0:
+** sqdmulh {z28\.h - z29\.h}, {z28\.h - z29\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (qdmulh_z28_z28_z0, svint16x2_t, z28,
+ svqdmulh_s16_x2 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_z0_z0_z18:
+** sqdmulh {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z18, svint16x2_t, z0,
+ svqdmulh_s16_x2 (z0, z18),
+ svqdmulh (z0, z18))
+
+/*
+** qdmulh_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** |
+** sqdmulh {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z4_z4_z23, svint16x2_t, z4,
+ svqdmulh_s16_x2 (z4, z23),
+ svqdmulh (z4, z23))
+
+/*
+** qdmulh_single_z24_z24_z0:
+** sqdmulh {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z0, svint16x2_t, svint16_t, z24,
+ svqdmulh_single_s16_x2 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** sqdmulh {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z28_z0, svint16x2_t, svint16_t, z24,
+ svqdmulh_single_s16_x2 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** sqdmulh {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z1_z0, svint16x2_t, svint16_t, z24,
+ svqdmulh_single_s16_x2 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z1_z24_z0:
+** sqdmulh {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z24_z0, svint16x2_t, svint16_t, z1,
+ svqdmulh_single_s16_x2 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z1_z0, svint16x2_t, svint16_t, z1,
+ svqdmulh_single_s16_x2 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z18_z18_z0:
+** sqdmulh {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z18_z18_z0, svint16x2_t, svint16_t, z18,
+ svqdmulh_single_s16_x2 (z18, z0),
+ svqdmulh (z18, z0))
+
+/*
+** qdmulh_single_awkward:
+** ...
+** sqdmulh ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (qdmulh_single_awkward, svint16x2_t, svint16_t,
+ z0_res = svqdmulh_single_s16_x2 (z1, z0),
+ z0_res = svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z0_z0_z15:
+** ...
+** sqdmulh {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (qdmulh_single_z0_z0_z15, svint16x2_t, svint16_t,
+ z0 = svqdmulh_single_s16_x2 (z0, z15),
+ z0 = svqdmulh (z0, z15))
+
+/*
+** qdmulh_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** sqdmulh {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z16, svint16x2_t, svint16_t, z24,
+ svqdmulh_single_s16_x2 (z24, z16),
+ svqdmulh (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s16_x4.c
new file mode 100644
index 0000000..f33ec05
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s16_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qdmulh_z0_z0_z4:
+** sqdmulh {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z4, svint16x4_t, z0,
+ svqdmulh_s16_x4 (z0, z4),
+ svqdmulh (z0, z4))
+
+/*
+** qdmulh_z0_z4_z0:
+** sqdmulh {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z0, svint16x4_t, z0,
+ svqdmulh_s16_x4 (z4, z0),
+ svqdmulh (z4, z0))
+
+/*
+** qdmulh_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.h - z31\.h}
+** |
+** sqdmulh [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z28, svint16x4_t, z0,
+ svqdmulh_s16_x4 (z4, z28),
+ svqdmulh (z4, z28))
+
+/*
+** qdmulh_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z18_z18_z4, svint16x4_t, z18,
+ svqdmulh_s16_x4 (z18, z4),
+ svqdmulh (z18, z4))
+
+/*
+** qdmulh_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z23_z23_z28, svint16x4_t, z23,
+ svqdmulh_s16_x4 (z23, z28),
+ svqdmulh (z23, z28))
+
+/*
+** qdmulh_z28_z28_z0:
+** sqdmulh {z28\.h - z31\.h}, {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (qdmulh_z28_z28_z0, svint16x4_t, z28,
+ svqdmulh_s16_x4 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** |
+** sqdmulh {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z18, svint16x4_t, z0,
+ svqdmulh_s16_x4 (z0, z18),
+ svqdmulh (z0, z18))
+
+/*
+** qdmulh_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** |
+** sqdmulh {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z4_z4_z23, svint16x4_t, z4,
+ svqdmulh_s16_x4 (z4, z23),
+ svqdmulh (z4, z23))
+
+/*
+** qdmulh_single_z24_z24_z0:
+** sqdmulh {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z0, svint16x4_t, svint16_t, z24,
+ svqdmulh_single_s16_x4 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** sqdmulh {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z28_z0, svint16x4_t, svint16_t, z24,
+ svqdmulh_single_s16_x4 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z1_z0, svint16x4_t, svint16_t, z24,
+ svqdmulh_single_s16_x4 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z1_z24_z0:
+** sqdmulh {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z24_z0, svint16x4_t, svint16_t, z1,
+ svqdmulh_single_s16_x4 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z1_z0, svint16x4_t, svint16_t, z1,
+ svqdmulh_single_s16_x4 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z18_z18_z0, svint16x4_t, svint16_t, z18,
+ svqdmulh_single_s16_x4 (z18, z0),
+ svqdmulh (z18, z0))
+
+/*
+** qdmulh_single_awkward:
+** ...
+** sqdmulh ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (qdmulh_single_awkward, svint16x4_t, svint16_t,
+ z0_res = svqdmulh_single_s16_x4 (z1, z0),
+ z0_res = svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z0_z0_z15:
+** ...
+** sqdmulh {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (qdmulh_single_z0_z0_z15, svint16x4_t, svint16_t,
+ z0 = svqdmulh_single_s16_x4 (z0, z15),
+ z0 = svqdmulh (z0, z15))
+
+/*
+** qdmulh_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** sqdmulh {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z16, svint16x4_t, svint16_t, z24,
+ svqdmulh_single_s16_x4 (z24, z16),
+ svqdmulh (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s32_x2.c
new file mode 100644
index 0000000..973aa4d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s32_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qdmulh_z0_z0_z4:
+** sqdmulh {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z4, svint32x2_t, z0,
+ svqdmulh_s32_x2 (z0, z4),
+ svqdmulh (z0, z4))
+
+/*
+** qdmulh_z0_z4_z0:
+** sqdmulh {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z0, svint32x2_t, z0,
+ svqdmulh_s32_x2 (z4, z0),
+ svqdmulh (z4, z0))
+
+/*
+** qdmulh_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.s - z29\.s}
+** |
+** sqdmulh [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z28, svint32x2_t, z0,
+ svqdmulh_s32_x2 (z4, z28),
+ svqdmulh (z4, z28))
+
+/*
+** qdmulh_z18_z18_z4:
+** sqdmulh {z18\.s - z19\.s}, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (qdmulh_z18_z18_z4, svint32x2_t, z18,
+ svqdmulh_s32_x2 (z18, z4),
+ svqdmulh (z18, z4))
+
+/*
+** qdmulh_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z23_z23_z18, svint32x2_t, z23,
+ svqdmulh_s32_x2 (z23, z18),
+ svqdmulh (z23, z18))
+
+/*
+** qdmulh_z28_z28_z0:
+** sqdmulh {z28\.s - z29\.s}, {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (qdmulh_z28_z28_z0, svint32x2_t, z28,
+ svqdmulh_s32_x2 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_z0_z0_z18:
+** sqdmulh {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z18, svint32x2_t, z0,
+ svqdmulh_s32_x2 (z0, z18),
+ svqdmulh (z0, z18))
+
+/*
+** qdmulh_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** |
+** sqdmulh {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z4_z4_z23, svint32x2_t, z4,
+ svqdmulh_s32_x2 (z4, z23),
+ svqdmulh (z4, z23))
+
+/*
+** qdmulh_single_z24_z24_z0:
+** sqdmulh {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z0, svint32x2_t, svint32_t, z24,
+ svqdmulh_single_s32_x2 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** sqdmulh {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z28_z0, svint32x2_t, svint32_t, z24,
+ svqdmulh_single_s32_x2 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** sqdmulh {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z1_z0, svint32x2_t, svint32_t, z24,
+ svqdmulh_single_s32_x2 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z1_z24_z0:
+** sqdmulh {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z24_z0, svint32x2_t, svint32_t, z1,
+ svqdmulh_single_s32_x2 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z1_z0, svint32x2_t, svint32_t, z1,
+ svqdmulh_single_s32_x2 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z18_z18_z0:
+** sqdmulh {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z18_z18_z0, svint32x2_t, svint32_t, z18,
+ svqdmulh_single_s32_x2 (z18, z0),
+ svqdmulh (z18, z0))
+
+/*
+** qdmulh_single_awkward:
+** ...
+** sqdmulh ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (qdmulh_single_awkward, svint32x2_t, svint32_t,
+ z0_res = svqdmulh_single_s32_x2 (z1, z0),
+ z0_res = svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z0_z0_z15:
+** ...
+** sqdmulh {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (qdmulh_single_z0_z0_z15, svint32x2_t, svint32_t,
+ z0 = svqdmulh_single_s32_x2 (z0, z15),
+ z0 = svqdmulh (z0, z15))
+
+/*
+** qdmulh_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** sqdmulh {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z16, svint32x2_t, svint32_t, z24,
+ svqdmulh_single_s32_x2 (z24, z16),
+ svqdmulh (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s32_x4.c
new file mode 100644
index 0000000..a40b925
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s32_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qdmulh_z0_z0_z4:
+** sqdmulh {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z4, svint32x4_t, z0,
+ svqdmulh_s32_x4 (z0, z4),
+ svqdmulh (z0, z4))
+
+/*
+** qdmulh_z0_z4_z0:
+** sqdmulh {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z0, svint32x4_t, z0,
+ svqdmulh_s32_x4 (z4, z0),
+ svqdmulh (z4, z0))
+
+/*
+** qdmulh_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.s - z31\.s}
+** |
+** sqdmulh [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z28, svint32x4_t, z0,
+ svqdmulh_s32_x4 (z4, z28),
+ svqdmulh (z4, z28))
+
+/*
+** qdmulh_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z18_z18_z4, svint32x4_t, z18,
+ svqdmulh_s32_x4 (z18, z4),
+ svqdmulh (z18, z4))
+
+/*
+** qdmulh_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z23_z23_z28, svint32x4_t, z23,
+ svqdmulh_s32_x4 (z23, z28),
+ svqdmulh (z23, z28))
+
+/*
+** qdmulh_z28_z28_z0:
+** sqdmulh {z28\.s - z31\.s}, {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (qdmulh_z28_z28_z0, svint32x4_t, z28,
+ svqdmulh_s32_x4 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** |
+** sqdmulh {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z18, svint32x4_t, z0,
+ svqdmulh_s32_x4 (z0, z18),
+ svqdmulh (z0, z18))
+
+/*
+** qdmulh_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** |
+** sqdmulh {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z4_z4_z23, svint32x4_t, z4,
+ svqdmulh_s32_x4 (z4, z23),
+ svqdmulh (z4, z23))
+
+/*
+** qdmulh_single_z24_z24_z0:
+** sqdmulh {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z0, svint32x4_t, svint32_t, z24,
+ svqdmulh_single_s32_x4 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** sqdmulh {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z28_z0, svint32x4_t, svint32_t, z24,
+ svqdmulh_single_s32_x4 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z1_z0, svint32x4_t, svint32_t, z24,
+ svqdmulh_single_s32_x4 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z1_z24_z0:
+** sqdmulh {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z24_z0, svint32x4_t, svint32_t, z1,
+ svqdmulh_single_s32_x4 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z1_z0, svint32x4_t, svint32_t, z1,
+ svqdmulh_single_s32_x4 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z18_z18_z0, svint32x4_t, svint32_t, z18,
+ svqdmulh_single_s32_x4 (z18, z0),
+ svqdmulh (z18, z0))
+
+/*
+** qdmulh_single_awkward:
+** ...
+** sqdmulh ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (qdmulh_single_awkward, svint32x4_t, svint32_t,
+ z0_res = svqdmulh_single_s32_x4 (z1, z0),
+ z0_res = svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z0_z0_z15:
+** ...
+** sqdmulh {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (qdmulh_single_z0_z0_z15, svint32x4_t, svint32_t,
+ z0 = svqdmulh_single_s32_x4 (z0, z15),
+ z0 = svqdmulh (z0, z15))
+
+/*
+** qdmulh_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** sqdmulh {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z16, svint32x4_t, svint32_t, z24,
+ svqdmulh_single_s32_x4 (z24, z16),
+ svqdmulh (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s64_x2.c
new file mode 100644
index 0000000..bb85007
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s64_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qdmulh_z0_z0_z4:
+** sqdmulh {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z4, svint64x2_t, z0,
+ svqdmulh_s64_x2 (z0, z4),
+ svqdmulh (z0, z4))
+
+/*
+** qdmulh_z0_z4_z0:
+** sqdmulh {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z0, svint64x2_t, z0,
+ svqdmulh_s64_x2 (z4, z0),
+ svqdmulh (z4, z0))
+
+/*
+** qdmulh_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.d - z29\.d}
+** |
+** sqdmulh [^\n]+, {z28\.d - z29\.d}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z28, svint64x2_t, z0,
+ svqdmulh_s64_x2 (z4, z28),
+ svqdmulh (z4, z28))
+
+/*
+** qdmulh_z18_z18_z4:
+** sqdmulh {z18\.d - z19\.d}, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (qdmulh_z18_z18_z4, svint64x2_t, z18,
+ svqdmulh_s64_x2 (z18, z4),
+ svqdmulh (z18, z4))
+
+/*
+** qdmulh_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z23_z23_z18, svint64x2_t, z23,
+ svqdmulh_s64_x2 (z23, z18),
+ svqdmulh (z23, z18))
+
+/*
+** qdmulh_z28_z28_z0:
+** sqdmulh {z28\.d - z29\.d}, {z28\.d - z29\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (qdmulh_z28_z28_z0, svint64x2_t, z28,
+ svqdmulh_s64_x2 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_z0_z0_z18:
+** sqdmulh {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z18, svint64x2_t, z0,
+ svqdmulh_s64_x2 (z0, z18),
+ svqdmulh (z0, z18))
+
+/*
+** qdmulh_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** |
+** sqdmulh {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z4_z4_z23, svint64x2_t, z4,
+ svqdmulh_s64_x2 (z4, z23),
+ svqdmulh (z4, z23))
+
+/*
+** qdmulh_single_z24_z24_z0:
+** sqdmulh {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z0, svint64x2_t, svint64_t, z24,
+ svqdmulh_single_s64_x2 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** sqdmulh {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z28_z0, svint64x2_t, svint64_t, z24,
+ svqdmulh_single_s64_x2 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** sqdmulh {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z1_z0, svint64x2_t, svint64_t, z24,
+ svqdmulh_single_s64_x2 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z1_z24_z0:
+** sqdmulh {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z24_z0, svint64x2_t, svint64_t, z1,
+ svqdmulh_single_s64_x2 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z1_z0, svint64x2_t, svint64_t, z1,
+ svqdmulh_single_s64_x2 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z18_z18_z0:
+** sqdmulh {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z18_z18_z0, svint64x2_t, svint64_t, z18,
+ svqdmulh_single_s64_x2 (z18, z0),
+ svqdmulh (z18, z0))
+
+/*
+** qdmulh_single_awkward:
+** ...
+** sqdmulh ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (qdmulh_single_awkward, svint64x2_t, svint64_t,
+ z0_res = svqdmulh_single_s64_x2 (z1, z0),
+ z0_res = svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z0_z0_z15:
+** ...
+** sqdmulh {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (qdmulh_single_z0_z0_z15, svint64x2_t, svint64_t,
+ z0 = svqdmulh_single_s64_x2 (z0, z15),
+ z0 = svqdmulh (z0, z15))
+
+/*
+** qdmulh_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** sqdmulh {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z16, svint64x2_t, svint64_t, z24,
+ svqdmulh_single_s64_x2 (z24, z16),
+ svqdmulh (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s64_x4.c
new file mode 100644
index 0000000..00dbc73
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s64_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qdmulh_z0_z0_z4:
+** sqdmulh {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z4, svint64x4_t, z0,
+ svqdmulh_s64_x4 (z0, z4),
+ svqdmulh (z0, z4))
+
+/*
+** qdmulh_z0_z4_z0:
+** sqdmulh {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z0, svint64x4_t, z0,
+ svqdmulh_s64_x4 (z4, z0),
+ svqdmulh (z4, z0))
+
+/*
+** qdmulh_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.d - z31\.d}
+** |
+** sqdmulh [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z28, svint64x4_t, z0,
+ svqdmulh_s64_x4 (z4, z28),
+ svqdmulh (z4, z28))
+
+/*
+** qdmulh_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z18_z18_z4, svint64x4_t, z18,
+ svqdmulh_s64_x4 (z18, z4),
+ svqdmulh (z18, z4))
+
+/*
+** qdmulh_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z23_z23_z28, svint64x4_t, z23,
+ svqdmulh_s64_x4 (z23, z28),
+ svqdmulh (z23, z28))
+
+/*
+** qdmulh_z28_z28_z0:
+** sqdmulh {z28\.d - z31\.d}, {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (qdmulh_z28_z28_z0, svint64x4_t, z28,
+ svqdmulh_s64_x4 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** |
+** sqdmulh {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z18, svint64x4_t, z0,
+ svqdmulh_s64_x4 (z0, z18),
+ svqdmulh (z0, z18))
+
+/*
+** qdmulh_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** |
+** sqdmulh {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z4_z4_z23, svint64x4_t, z4,
+ svqdmulh_s64_x4 (z4, z23),
+ svqdmulh (z4, z23))
+
+/*
+** qdmulh_single_z24_z24_z0:
+** sqdmulh {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z0, svint64x4_t, svint64_t, z24,
+ svqdmulh_single_s64_x4 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** sqdmulh {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z28_z0, svint64x4_t, svint64_t, z24,
+ svqdmulh_single_s64_x4 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z1_z0, svint64x4_t, svint64_t, z24,
+ svqdmulh_single_s64_x4 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z1_z24_z0:
+** sqdmulh {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z24_z0, svint64x4_t, svint64_t, z1,
+ svqdmulh_single_s64_x4 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z1_z0, svint64x4_t, svint64_t, z1,
+ svqdmulh_single_s64_x4 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z18_z18_z0, svint64x4_t, svint64_t, z18,
+ svqdmulh_single_s64_x4 (z18, z0),
+ svqdmulh (z18, z0))
+
+/*
+** qdmulh_single_awkward:
+** ...
+** sqdmulh ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (qdmulh_single_awkward, svint64x4_t, svint64_t,
+ z0_res = svqdmulh_single_s64_x4 (z1, z0),
+ z0_res = svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z0_z0_z15:
+** ...
+** sqdmulh {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (qdmulh_single_z0_z0_z15, svint64x4_t, svint64_t,
+ z0 = svqdmulh_single_s64_x4 (z0, z15),
+ z0 = svqdmulh (z0, z15))
+
+/*
+** qdmulh_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** sqdmulh {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z16, svint64x4_t, svint64_t, z24,
+ svqdmulh_single_s64_x4 (z24, z16),
+ svqdmulh (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s8_x2.c
new file mode 100644
index 0000000..e033534
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s8_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qdmulh_z0_z0_z4:
+** sqdmulh {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z4, svint8x2_t, z0,
+ svqdmulh_s8_x2 (z0, z4),
+ svqdmulh (z0, z4))
+
+/*
+** qdmulh_z0_z4_z0:
+** sqdmulh {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z0, svint8x2_t, z0,
+ svqdmulh_s8_x2 (z4, z0),
+ svqdmulh (z4, z0))
+
+/*
+** qdmulh_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.b - z29\.b}
+** |
+** sqdmulh [^\n]+, {z28\.b - z29\.b}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z28, svint8x2_t, z0,
+ svqdmulh_s8_x2 (z4, z28),
+ svqdmulh (z4, z28))
+
+/*
+** qdmulh_z18_z18_z4:
+** sqdmulh {z18\.b - z19\.b}, {z18\.b - z19\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (qdmulh_z18_z18_z4, svint8x2_t, z18,
+ svqdmulh_s8_x2 (z18, z4),
+ svqdmulh (z18, z4))
+
+/*
+** qdmulh_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z18\.b - z19\.b}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z23_z23_z18, svint8x2_t, z23,
+ svqdmulh_s8_x2 (z23, z18),
+ svqdmulh (z23, z18))
+
+/*
+** qdmulh_z28_z28_z0:
+** sqdmulh {z28\.b - z29\.b}, {z28\.b - z29\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_XN (qdmulh_z28_z28_z0, svint8x2_t, z28,
+ svqdmulh_s8_x2 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_z0_z0_z18:
+** sqdmulh {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z18, svint8x2_t, z0,
+ svqdmulh_s8_x2 (z0, z18),
+ svqdmulh (z0, z18))
+
+/*
+** qdmulh_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** |
+** sqdmulh {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z4_z4_z23, svint8x2_t, z4,
+ svqdmulh_s8_x2 (z4, z23),
+ svqdmulh (z4, z23))
+
+/*
+** qdmulh_single_z24_z24_z0:
+** sqdmulh {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z0, svint8x2_t, svint8_t, z24,
+ svqdmulh_single_s8_x2 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** |
+** sqdmulh {z28\.b - z29\.b}, {z28\.b - z29\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z28_z0, svint8x2_t, svint8_t, z24,
+ svqdmulh_single_s8_x2 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** sqdmulh {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z1_z0, svint8x2_t, svint8_t, z24,
+ svqdmulh_single_s8_x2 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z1_z24_z0:
+** sqdmulh {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z24_z0, svint8x2_t, svint8_t, z1,
+ svqdmulh_single_s8_x2 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z1_z0, svint8x2_t, svint8_t, z1,
+ svqdmulh_single_s8_x2 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z18_z18_z0:
+** sqdmulh {z18\.b - z19\.b}, {z18\.b - z19\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z18_z18_z0, svint8x2_t, svint8_t, z18,
+ svqdmulh_single_s8_x2 (z18, z0),
+ svqdmulh (z18, z0))
+
+/*
+** qdmulh_single_awkward:
+** ...
+** sqdmulh ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (qdmulh_single_awkward, svint8x2_t, svint8_t,
+ z0_res = svqdmulh_single_s8_x2 (z1, z0),
+ z0_res = svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z0_z0_z15:
+** ...
+** sqdmulh {z0\.b - z1\.b}, {z0\.b - z1\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (qdmulh_single_z0_z0_z15, svint8x2_t, svint8_t,
+ z0 = svqdmulh_single_s8_x2 (z0, z15),
+ z0 = svqdmulh (z0, z15))
+
+/*
+** qdmulh_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** sqdmulh {z24\.b - z25\.b}, {z24\.b - z25\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z16, svint8x2_t, svint8_t, z24,
+ svqdmulh_single_s8_x2 (z24, z16),
+ svqdmulh (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s8_x4.c
new file mode 100644
index 0000000..fcdae50
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qdmulh_s8_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qdmulh_z0_z0_z4:
+** sqdmulh {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z4, svint8x4_t, z0,
+ svqdmulh_s8_x4 (z0, z4),
+ svqdmulh (z0, z4))
+
+/*
+** qdmulh_z0_z4_z0:
+** sqdmulh {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z0, svint8x4_t, z0,
+ svqdmulh_s8_x4 (z4, z0),
+ svqdmulh (z4, z0))
+
+/*
+** qdmulh_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.b - z31\.b}
+** |
+** sqdmulh [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z4_z28, svint8x4_t, z0,
+ svqdmulh_s8_x4 (z4, z28),
+ svqdmulh (z4, z28))
+
+/*
+** qdmulh_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z4\.b - z7\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z18_z18_z4, svint8x4_t, z18,
+ svqdmulh_s8_x4 (z18, z4),
+ svqdmulh (z18, z4))
+
+/*
+** qdmulh_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (qdmulh_z23_z23_z28, svint8x4_t, z23,
+ svqdmulh_s8_x4 (z23, z28),
+ svqdmulh (z23, z28))
+
+/*
+** qdmulh_z28_z28_z0:
+** sqdmulh {z28\.b - z31\.b}, {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (qdmulh_z28_z28_z0, svint8x4_t, z28,
+ svqdmulh_s8_x4 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** |
+** sqdmulh {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z0_z0_z18, svint8x4_t, z0,
+ svqdmulh_s8_x4 (z0, z18),
+ svqdmulh (z0, z18))
+
+/*
+** qdmulh_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** |
+** sqdmulh {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (qdmulh_z4_z4_z23, svint8x4_t, z4,
+ svqdmulh_s8_x4 (z4, z23),
+ svqdmulh (z4, z23))
+
+/*
+** qdmulh_single_z24_z24_z0:
+** sqdmulh {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z0, svint8x4_t, svint8_t, z24,
+ svqdmulh_single_s8_x4 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** |
+** sqdmulh {z28\.b - z31\.b}, {z28\.b - z31\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z28_z0, svint8x4_t, svint8_t, z24,
+ svqdmulh_single_s8_x4 (z28, z0),
+ svqdmulh (z28, z0))
+
+/*
+** qdmulh_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z1_z0, svint8x4_t, svint8_t, z24,
+ svqdmulh_single_s8_x4 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z1_z24_z0:
+** sqdmulh {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z24_z0, svint8x4_t, svint8_t, z1,
+ svqdmulh_single_s8_x4 (z24, z0),
+ svqdmulh (z24, z0))
+
+/*
+** qdmulh_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z1_z1_z0, svint8x4_t, svint8_t, z1,
+ svqdmulh_single_s8_x4 (z1, z0),
+ svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqdmulh [^\n]+, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z18_z18_z0, svint8x4_t, svint8_t, z18,
+ svqdmulh_single_s8_x4 (z18, z0),
+ svqdmulh (z18, z0))
+
+/*
+** qdmulh_single_awkward:
+** ...
+** sqdmulh ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (qdmulh_single_awkward, svint8x4_t, svint8_t,
+ z0_res = svqdmulh_single_s8_x4 (z1, z0),
+ z0_res = svqdmulh (z1, z0))
+
+/*
+** qdmulh_single_z0_z0_z15:
+** ...
+** sqdmulh {z0\.b - z3\.b}, {z0\.b - z3\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (qdmulh_single_z0_z0_z15, svint8x4_t, svint8_t,
+ z0 = svqdmulh_single_s8_x4 (z0, z15),
+ z0 = svqdmulh (z0, z15))
+
+/*
+** qdmulh_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** sqdmulh {z24\.b - z27\.b}, {z24\.b - z27\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (qdmulh_single_z24_z24_z16, svint8x4_t, svint8_t, z24,
+ svqdmulh_single_s8_x4 (z24, z16),
+ svqdmulh (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s16_x2.c
new file mode 100644
index 0000000..00377b7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshr_z0_z0_1:
+** sqrshr z0\.h, {z0\.s - z1\.s}, #1
+** ret
+*/
+TEST_X2_NARROW (qrshr_z0_z0_1, svint32x2_t, svint16_t,
+ z0_res = svqrshr_n_s16_s32_x2 (z0, 1),
+ z0_res = svqrshr_s16 (z0, 1))
+
+/*
+** qrshr_z0_z6_16:
+** sqrshr z0\.h, {z6\.s - z7\.s}, #16
+** ret
+*/
+TEST_X2_NARROW (qrshr_z0_z6_16, svint32x2_t, svint16_t,
+ z0_res = svqrshr_n_s16_s32_x2 (z6, 16),
+ z0_res = svqrshr_s16 (z6, 16))
+
+/*
+** qrshr_z0_z29_13:
+** mov [^\n]+
+** mov [^\n]+
+** sqrshr z0\.h, [^\n]+, #13
+** ret
+*/
+TEST_X2_NARROW (qrshr_z0_z29_13, svint32x2_t, svint16_t,
+ z0_res = svqrshr_n_s16_s32_x2 (z29, 13),
+ z0_res = svqrshr_s16 (z29, 13))
+
+/*
+** qrshr_z5_z0_11:
+** sqrshr z5\.h, {z0\.s - z1\.s}, #11
+** ret
+*/
+TEST_X2_NARROW (qrshr_z5_z0_11, svint32x2_t, svint16_t,
+ z5 = svqrshr_n_s16_s32_x2 (z0, 11),
+ z5 = svqrshr_s16 (z0, 11))
+
+/*
+** qrshr_z22_z16_15:
+** sqrshr z22\.h, {z16\.s - z17\.s}, #15
+** ret
+*/
+TEST_X2_NARROW (qrshr_z22_z16_15, svint32x2_t, svint16_t,
+ z22 = svqrshr_n_s16_s32_x2 (z16, 15),
+ z22 = svqrshr_s16 (z16, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s16_x4.c
new file mode 100644
index 0000000..42b083c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s16_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshr_z0_z0_1:
+** sqrshr z0\.h, {z0\.d - z3\.d}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z0_1, svint64x4_t, svint16_t,
+ z0_res = svqrshr_n_s16_s64_x4 (z0, 1),
+ z0_res = svqrshr_s16 (z0, 1))
+
+/*
+** qrshr_z0_z4_64:
+** sqrshr z0\.h, {z4\.d - z7\.d}, #64
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z4_64, svint64x4_t, svint16_t,
+ z0_res = svqrshr_n_s16_s64_x4 (z4, 64),
+ z0_res = svqrshr_s16 (z4, 64))
+
+/*
+** qrshr_z0_z21_33:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshr z0\.h, [^\n]+, #33
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z21_33, svint64x4_t, svint16_t,
+ z0_res = svqrshr_n_s16_s64_x4 (z21, 33),
+ z0_res = svqrshr_s16 (z21, 33))
+
+/*
+** qrshr_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshr z25\.h, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshr_z25_z26_12, svint64x4_t, svint16_t,
+ z25 = svqrshr_n_s16_s64_x4 (z26, 12),
+ z25 = svqrshr_s16 (z26, 12))
+
+/*
+** qrshr_z25_z0_32:
+** sqrshr z25\.h, {z0\.d - z3\.d}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshr_z25_z0_32, svint64x4_t, svint16_t,
+ z25 = svqrshr_n_s16_s64_x4 (z0, 32),
+ z25 = svqrshr_s16 (z0, 32))
+
+/*
+** qrshr_z22_z16_63:
+** sqrshr z22\.h, {z16\.d - z19\.d}, #63
+** ret
+*/
+TEST_X4_NARROW (qrshr_z22_z16_63, svint64x4_t, svint16_t,
+ z22_res = svqrshr_n_s16_s64_x4 (z16, 63),
+ z22_res = svqrshr_s16 (z16, 63))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s8_x4.c
new file mode 100644
index 0000000..f01ace7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_s8_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshr_z0_z0_1:
+** sqrshr z0\.b, {z0\.s - z3\.s}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z0_1, svint32x4_t, svint8_t,
+ z0_res = svqrshr_n_s8_s32_x4 (z0, 1),
+ z0_res = svqrshr_s8 (z0, 1))
+
+/*
+** qrshr_z0_z4_32:
+** sqrshr z0\.b, {z4\.s - z7\.s}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z4_32, svint32x4_t, svint8_t,
+ z0_res = svqrshr_n_s8_s32_x4 (z4, 32),
+ z0_res = svqrshr_s8 (z4, 32))
+
+/*
+** qrshr_z0_z21_2:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshr z0\.b, [^\n]+, #2
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z21_2, svint32x4_t, svint8_t,
+ z0_res = svqrshr_n_s8_s32_x4 (z21, 2),
+ z0_res = svqrshr_s8 (z21, 2))
+
+/*
+** qrshr_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshr z25\.b, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshr_z25_z26_12, svint32x4_t, svint8_t,
+ z25 = svqrshr_n_s8_s32_x4 (z26, 12),
+ z25 = svqrshr_s8 (z26, 12))
+
+/*
+** qrshr_z25_z0_16:
+** sqrshr z25\.b, {z0\.s - z3\.s}, #16
+** ret
+*/
+TEST_X4_NARROW (qrshr_z25_z0_16, svint32x4_t, svint8_t,
+ z25 = svqrshr_n_s8_s32_x4 (z0, 16),
+ z25 = svqrshr_s8 (z0, 16))
+
+/*
+** qrshr_z22_z16_31:
+** sqrshr z22\.b, {z16\.s - z19\.s}, #31
+** ret
+*/
+TEST_X4_NARROW (qrshr_z22_z16_31, svint32x4_t, svint8_t,
+ z22_res = svqrshr_n_s8_s32_x4 (z16, 31),
+ z22_res = svqrshr_s8 (z16, 31))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u16_x2.c
new file mode 100644
index 0000000..aa035f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshr_z0_z0_1:
+** uqrshr z0\.h, {z0\.s - z1\.s}, #1
+** ret
+*/
+TEST_X2_NARROW (qrshr_z0_z0_1, svuint32x2_t, svuint16_t,
+ z0_res = svqrshr_n_u16_u32_x2 (z0, 1),
+ z0_res = svqrshr_u16 (z0, 1))
+
+/*
+** qrshr_z0_z6_16:
+** uqrshr z0\.h, {z6\.s - z7\.s}, #16
+** ret
+*/
+TEST_X2_NARROW (qrshr_z0_z6_16, svuint32x2_t, svuint16_t,
+ z0_res = svqrshr_n_u16_u32_x2 (z6, 16),
+ z0_res = svqrshr_u16 (z6, 16))
+
+/*
+** qrshr_z0_z29_13:
+** mov [^\n]+
+** mov [^\n]+
+** uqrshr z0\.h, [^\n]+, #13
+** ret
+*/
+TEST_X2_NARROW (qrshr_z0_z29_13, svuint32x2_t, svuint16_t,
+ z0_res = svqrshr_n_u16_u32_x2 (z29, 13),
+ z0_res = svqrshr_u16 (z29, 13))
+
+/*
+** qrshr_z5_z0_11:
+** uqrshr z5\.h, {z0\.s - z1\.s}, #11
+** ret
+*/
+TEST_X2_NARROW (qrshr_z5_z0_11, svuint32x2_t, svuint16_t,
+ z5 = svqrshr_n_u16_u32_x2 (z0, 11),
+ z5 = svqrshr_u16 (z0, 11))
+
+/*
+** qrshr_z22_z16_15:
+** uqrshr z22\.h, {z16\.s - z17\.s}, #15
+** ret
+*/
+TEST_X2_NARROW (qrshr_z22_z16_15, svuint32x2_t, svuint16_t,
+ z22 = svqrshr_n_u16_u32_x2 (z16, 15),
+ z22 = svqrshr_u16 (z16, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u16_x4.c
new file mode 100644
index 0000000..0065a41
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u16_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshr_z0_z0_1:
+** uqrshr z0\.h, {z0\.d - z3\.d}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z0_1, svuint64x4_t, svuint16_t,
+ z0_res = svqrshr_n_u16_u64_x4 (z0, 1),
+ z0_res = svqrshr_u16 (z0, 1))
+
+/*
+** qrshr_z0_z4_64:
+** uqrshr z0\.h, {z4\.d - z7\.d}, #64
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z4_64, svuint64x4_t, svuint16_t,
+ z0_res = svqrshr_n_u16_u64_x4 (z4, 64),
+ z0_res = svqrshr_u16 (z4, 64))
+
+/*
+** qrshr_z0_z21_33:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqrshr z0\.h, [^\n]+, #33
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z21_33, svuint64x4_t, svuint16_t,
+ z0_res = svqrshr_n_u16_u64_x4 (z21, 33),
+ z0_res = svqrshr_u16 (z21, 33))
+
+/*
+** qrshr_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqrshr z25\.h, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshr_z25_z26_12, svuint64x4_t, svuint16_t,
+ z25 = svqrshr_n_u16_u64_x4 (z26, 12),
+ z25 = svqrshr_u16 (z26, 12))
+
+/*
+** qrshr_z25_z0_32:
+** uqrshr z25\.h, {z0\.d - z3\.d}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshr_z25_z0_32, svuint64x4_t, svuint16_t,
+ z25 = svqrshr_n_u16_u64_x4 (z0, 32),
+ z25 = svqrshr_u16 (z0, 32))
+
+/*
+** qrshr_z22_z16_63:
+** uqrshr z22\.h, {z16\.d - z19\.d}, #63
+** ret
+*/
+TEST_X4_NARROW (qrshr_z22_z16_63, svuint64x4_t, svuint16_t,
+ z22_res = svqrshr_n_u16_u64_x4 (z16, 63),
+ z22_res = svqrshr_u16 (z16, 63))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u8_x4.c
new file mode 100644
index 0000000..6b73b3a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshr_u8_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshr_z0_z0_1:
+** uqrshr z0\.b, {z0\.s - z3\.s}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z0_1, svuint32x4_t, svuint8_t,
+ z0_res = svqrshr_n_u8_u32_x4 (z0, 1),
+ z0_res = svqrshr_u8 (z0, 1))
+
+/*
+** qrshr_z0_z4_32:
+** uqrshr z0\.b, {z4\.s - z7\.s}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z4_32, svuint32x4_t, svuint8_t,
+ z0_res = svqrshr_n_u8_u32_x4 (z4, 32),
+ z0_res = svqrshr_u8 (z4, 32))
+
+/*
+** qrshr_z0_z21_2:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqrshr z0\.b, [^\n]+, #2
+** ret
+*/
+TEST_X4_NARROW (qrshr_z0_z21_2, svuint32x4_t, svuint8_t,
+ z0_res = svqrshr_n_u8_u32_x4 (z21, 2),
+ z0_res = svqrshr_u8 (z21, 2))
+
+/*
+** qrshr_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqrshr z25\.b, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshr_z25_z26_12, svuint32x4_t, svuint8_t,
+ z25 = svqrshr_n_u8_u32_x4 (z26, 12),
+ z25 = svqrshr_u8 (z26, 12))
+
+/*
+** qrshr_z25_z0_16:
+** uqrshr z25\.b, {z0\.s - z3\.s}, #16
+** ret
+*/
+TEST_X4_NARROW (qrshr_z25_z0_16, svuint32x4_t, svuint8_t,
+ z25 = svqrshr_n_u8_u32_x4 (z0, 16),
+ z25 = svqrshr_u8 (z0, 16))
+
+/*
+** qrshr_z22_z16_31:
+** uqrshr z22\.b, {z16\.s - z19\.s}, #31
+** ret
+*/
+TEST_X4_NARROW (qrshr_z22_z16_31, svuint32x4_t, svuint8_t,
+ z22_res = svqrshr_n_u8_u32_x4 (z16, 31),
+ z22_res = svqrshr_u8 (z16, 31))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s16_x2.c
new file mode 100644
index 0000000..208b7e4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshrn_z0_z0_1:
+** sqrshrn z0\.h, {z0\.s - z1\.s}, #1
+** ret
+*/
+TEST_X2_NARROW (qrshrn_z0_z0_1, svint32x2_t, svint16_t,
+ z0_res = svqrshrn_n_s16_s32_x2 (z0, 1),
+ z0_res = svqrshrn_s16 (z0, 1))
+
+/*
+** qrshrn_z0_z6_16:
+** sqrshrn z0\.h, {z6\.s - z7\.s}, #16
+** ret
+*/
+TEST_X2_NARROW (qrshrn_z0_z6_16, svint32x2_t, svint16_t,
+ z0_res = svqrshrn_n_s16_s32_x2 (z6, 16),
+ z0_res = svqrshrn_s16 (z6, 16))
+
+/*
+** qrshrn_z0_z29_13:
+** mov [^\n]+
+** mov [^\n]+
+** sqrshrn z0\.h, [^\n]+, #13
+** ret
+*/
+TEST_X2_NARROW (qrshrn_z0_z29_13, svint32x2_t, svint16_t,
+ z0_res = svqrshrn_n_s16_s32_x2 (z29, 13),
+ z0_res = svqrshrn_s16 (z29, 13))
+
+/*
+** qrshrn_z5_z0_11:
+** sqrshrn z5\.h, {z0\.s - z1\.s}, #11
+** ret
+*/
+TEST_X2_NARROW (qrshrn_z5_z0_11, svint32x2_t, svint16_t,
+ z5 = svqrshrn_n_s16_s32_x2 (z0, 11),
+ z5 = svqrshrn_s16 (z0, 11))
+
+/*
+** qrshrn_z22_z16_15:
+** sqrshrn z22\.h, {z16\.s - z17\.s}, #15
+** ret
+*/
+TEST_X2_NARROW (qrshrn_z22_z16_15, svint32x2_t, svint16_t,
+ z22 = svqrshrn_n_s16_s32_x2 (z16, 15),
+ z22 = svqrshrn_s16 (z16, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s16_x4.c
new file mode 100644
index 0000000..c3f84ce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s16_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshrn_z0_z0_1:
+** sqrshrn z0\.h, {z0\.d - z3\.d}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z0_1, svint64x4_t, svint16_t,
+ z0_res = svqrshrn_n_s16_s64_x4 (z0, 1),
+ z0_res = svqrshrn_s16 (z0, 1))
+
+/*
+** qrshrn_z0_z4_64:
+** sqrshrn z0\.h, {z4\.d - z7\.d}, #64
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z4_64, svint64x4_t, svint16_t,
+ z0_res = svqrshrn_n_s16_s64_x4 (z4, 64),
+ z0_res = svqrshrn_s16 (z4, 64))
+
+/*
+** qrshrn_z0_z21_33:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshrn z0\.h, [^\n]+, #33
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z21_33, svint64x4_t, svint16_t,
+ z0_res = svqrshrn_n_s16_s64_x4 (z21, 33),
+ z0_res = svqrshrn_s16 (z21, 33))
+
+/*
+** qrshrn_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshrn z25\.h, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z25_z26_12, svint64x4_t, svint16_t,
+ z25 = svqrshrn_n_s16_s64_x4 (z26, 12),
+ z25 = svqrshrn_s16 (z26, 12))
+
+/*
+** qrshrn_z25_z0_32:
+** sqrshrn z25\.h, {z0\.d - z3\.d}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z25_z0_32, svint64x4_t, svint16_t,
+ z25 = svqrshrn_n_s16_s64_x4 (z0, 32),
+ z25 = svqrshrn_s16 (z0, 32))
+
+/*
+** qrshrn_z22_z16_63:
+** sqrshrn z22\.h, {z16\.d - z19\.d}, #63
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z22_z16_63, svint64x4_t, svint16_t,
+ z22_res = svqrshrn_n_s16_s64_x4 (z16, 63),
+ z22_res = svqrshrn_s16 (z16, 63))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s8_x4.c
new file mode 100644
index 0000000..c2fde52
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_s8_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshrn_z0_z0_1:
+** sqrshrn z0\.b, {z0\.s - z3\.s}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z0_1, svint32x4_t, svint8_t,
+ z0_res = svqrshrn_n_s8_s32_x4 (z0, 1),
+ z0_res = svqrshrn_s8 (z0, 1))
+
+/*
+** qrshrn_z0_z4_32:
+** sqrshrn z0\.b, {z4\.s - z7\.s}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z4_32, svint32x4_t, svint8_t,
+ z0_res = svqrshrn_n_s8_s32_x4 (z4, 32),
+ z0_res = svqrshrn_s8 (z4, 32))
+
+/*
+** qrshrn_z0_z21_2:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshrn z0\.b, [^\n]+, #2
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z21_2, svint32x4_t, svint8_t,
+ z0_res = svqrshrn_n_s8_s32_x4 (z21, 2),
+ z0_res = svqrshrn_s8 (z21, 2))
+
+/*
+** qrshrn_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshrn z25\.b, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z25_z26_12, svint32x4_t, svint8_t,
+ z25 = svqrshrn_n_s8_s32_x4 (z26, 12),
+ z25 = svqrshrn_s8 (z26, 12))
+
+/*
+** qrshrn_z25_z0_16:
+** sqrshrn z25\.b, {z0\.s - z3\.s}, #16
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z25_z0_16, svint32x4_t, svint8_t,
+ z25 = svqrshrn_n_s8_s32_x4 (z0, 16),
+ z25 = svqrshrn_s8 (z0, 16))
+
+/*
+** qrshrn_z22_z16_31:
+** sqrshrn z22\.b, {z16\.s - z19\.s}, #31
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z22_z16_31, svint32x4_t, svint8_t,
+ z22_res = svqrshrn_n_s8_s32_x4 (z16, 31),
+ z22_res = svqrshrn_s8 (z16, 31))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u16_x2.c
new file mode 100644
index 0000000..66029c6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshrn_z0_z0_1:
+** uqrshrn z0\.h, {z0\.s - z1\.s}, #1
+** ret
+*/
+TEST_X2_NARROW (qrshrn_z0_z0_1, svuint32x2_t, svuint16_t,
+ z0_res = svqrshrn_n_u16_u32_x2 (z0, 1),
+ z0_res = svqrshrn_u16 (z0, 1))
+
+/*
+** qrshrn_z0_z6_16:
+** uqrshrn z0\.h, {z6\.s - z7\.s}, #16
+** ret
+*/
+TEST_X2_NARROW (qrshrn_z0_z6_16, svuint32x2_t, svuint16_t,
+ z0_res = svqrshrn_n_u16_u32_x2 (z6, 16),
+ z0_res = svqrshrn_u16 (z6, 16))
+
+/*
+** qrshrn_z0_z29_13:
+** mov [^\n]+
+** mov [^\n]+
+** uqrshrn z0\.h, [^\n]+, #13
+** ret
+*/
+TEST_X2_NARROW (qrshrn_z0_z29_13, svuint32x2_t, svuint16_t,
+ z0_res = svqrshrn_n_u16_u32_x2 (z29, 13),
+ z0_res = svqrshrn_u16 (z29, 13))
+
+/*
+** qrshrn_z5_z0_11:
+** uqrshrn z5\.h, {z0\.s - z1\.s}, #11
+** ret
+*/
+TEST_X2_NARROW (qrshrn_z5_z0_11, svuint32x2_t, svuint16_t,
+ z5 = svqrshrn_n_u16_u32_x2 (z0, 11),
+ z5 = svqrshrn_u16 (z0, 11))
+
+/*
+** qrshrn_z22_z16_15:
+** uqrshrn z22\.h, {z16\.s - z17\.s}, #15
+** ret
+*/
+TEST_X2_NARROW (qrshrn_z22_z16_15, svuint32x2_t, svuint16_t,
+ z22 = svqrshrn_n_u16_u32_x2 (z16, 15),
+ z22 = svqrshrn_u16 (z16, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u16_x4.c
new file mode 100644
index 0000000..6680a90
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u16_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshrn_z0_z0_1:
+** uqrshrn z0\.h, {z0\.d - z3\.d}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z0_1, svuint64x4_t, svuint16_t,
+ z0_res = svqrshrn_n_u16_u64_x4 (z0, 1),
+ z0_res = svqrshrn_u16 (z0, 1))
+
+/*
+** qrshrn_z0_z4_64:
+** uqrshrn z0\.h, {z4\.d - z7\.d}, #64
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z4_64, svuint64x4_t, svuint16_t,
+ z0_res = svqrshrn_n_u16_u64_x4 (z4, 64),
+ z0_res = svqrshrn_u16 (z4, 64))
+
+/*
+** qrshrn_z0_z21_33:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqrshrn z0\.h, [^\n]+, #33
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z21_33, svuint64x4_t, svuint16_t,
+ z0_res = svqrshrn_n_u16_u64_x4 (z21, 33),
+ z0_res = svqrshrn_u16 (z21, 33))
+
+/*
+** qrshrn_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqrshrn z25\.h, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z25_z26_12, svuint64x4_t, svuint16_t,
+ z25 = svqrshrn_n_u16_u64_x4 (z26, 12),
+ z25 = svqrshrn_u16 (z26, 12))
+
+/*
+** qrshrn_z25_z0_32:
+** uqrshrn z25\.h, {z0\.d - z3\.d}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z25_z0_32, svuint64x4_t, svuint16_t,
+ z25 = svqrshrn_n_u16_u64_x4 (z0, 32),
+ z25 = svqrshrn_u16 (z0, 32))
+
+/*
+** qrshrn_z22_z16_63:
+** uqrshrn z22\.h, {z16\.d - z19\.d}, #63
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z22_z16_63, svuint64x4_t, svuint16_t,
+ z22_res = svqrshrn_n_u16_u64_x4 (z16, 63),
+ z22_res = svqrshrn_u16 (z16, 63))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u8_x4.c
new file mode 100644
index 0000000..7755f95
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrn_u8_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshrn_z0_z0_1:
+** uqrshrn z0\.b, {z0\.s - z3\.s}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z0_1, svuint32x4_t, svuint8_t,
+ z0_res = svqrshrn_n_u8_u32_x4 (z0, 1),
+ z0_res = svqrshrn_u8 (z0, 1))
+
+/*
+** qrshrn_z0_z4_32:
+** uqrshrn z0\.b, {z4\.s - z7\.s}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z4_32, svuint32x4_t, svuint8_t,
+ z0_res = svqrshrn_n_u8_u32_x4 (z4, 32),
+ z0_res = svqrshrn_u8 (z4, 32))
+
+/*
+** qrshrn_z0_z21_2:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqrshrn z0\.b, [^\n]+, #2
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z0_z21_2, svuint32x4_t, svuint8_t,
+ z0_res = svqrshrn_n_u8_u32_x4 (z21, 2),
+ z0_res = svqrshrn_u8 (z21, 2))
+
+/*
+** qrshrn_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uqrshrn z25\.b, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z25_z26_12, svuint32x4_t, svuint8_t,
+ z25 = svqrshrn_n_u8_u32_x4 (z26, 12),
+ z25 = svqrshrn_u8 (z26, 12))
+
+/*
+** qrshrn_z25_z0_16:
+** uqrshrn z25\.b, {z0\.s - z3\.s}, #16
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z25_z0_16, svuint32x4_t, svuint8_t,
+ z25 = svqrshrn_n_u8_u32_x4 (z0, 16),
+ z25 = svqrshrn_u8 (z0, 16))
+
+/*
+** qrshrn_z22_z16_31:
+** uqrshrn z22\.b, {z16\.s - z19\.s}, #31
+** ret
+*/
+TEST_X4_NARROW (qrshrn_z22_z16_31, svuint32x4_t, svuint8_t,
+ z22_res = svqrshrn_n_u8_u32_x4 (z16, 31),
+ z22_res = svqrshrn_u8 (z16, 31))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u16_x2.c
new file mode 100644
index 0000000..872ce7a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshru_z0_z0_1:
+** sqrshru z0\.h, {z0\.s - z1\.s}, #1
+** ret
+*/
+TEST_X2_NARROW (qrshru_z0_z0_1, svint32x2_t, svuint16_t,
+ z0_res = svqrshru_n_u16_s32_x2 (z0, 1),
+ z0_res = svqrshru_u16 (z0, 1))
+
+/*
+** qrshru_z0_z6_16:
+** sqrshru z0\.h, {z6\.s - z7\.s}, #16
+** ret
+*/
+TEST_X2_NARROW (qrshru_z0_z6_16, svint32x2_t, svuint16_t,
+ z0_res = svqrshru_n_u16_s32_x2 (z6, 16),
+ z0_res = svqrshru_u16 (z6, 16))
+
+/*
+** qrshru_z0_z29_13:
+** mov [^\n]+
+** mov [^\n]+
+** sqrshru z0\.h, [^\n]+, #13
+** ret
+*/
+TEST_X2_NARROW (qrshru_z0_z29_13, svint32x2_t, svuint16_t,
+ z0_res = svqrshru_n_u16_s32_x2 (z29, 13),
+ z0_res = svqrshru_u16 (z29, 13))
+
+/*
+** qrshru_z5_z0_11:
+** sqrshru z5\.h, {z0\.s - z1\.s}, #11
+** ret
+*/
+TEST_X2_NARROW (qrshru_z5_z0_11, svint32x2_t, svuint16_t,
+ z5 = svqrshru_n_u16_s32_x2 (z0, 11),
+ z5 = svqrshru_u16 (z0, 11))
+
+/*
+** qrshru_z22_z16_15:
+** sqrshru z22\.h, {z16\.s - z17\.s}, #15
+** ret
+*/
+TEST_X2_NARROW (qrshru_z22_z16_15, svint32x2_t, svuint16_t,
+ z22 = svqrshru_n_u16_s32_x2 (z16, 15),
+ z22 = svqrshru_u16 (z16, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u16_x4.c
new file mode 100644
index 0000000..dc830b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u16_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshru_z0_z0_1:
+** sqrshru z0\.h, {z0\.d - z3\.d}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshru_z0_z0_1, svint64x4_t, svuint16_t,
+ z0_res = svqrshru_n_u16_s64_x4 (z0, 1),
+ z0_res = svqrshru_u16 (z0, 1))
+
+/*
+** qrshru_z0_z4_64:
+** sqrshru z0\.h, {z4\.d - z7\.d}, #64
+** ret
+*/
+TEST_X4_NARROW (qrshru_z0_z4_64, svint64x4_t, svuint16_t,
+ z0_res = svqrshru_n_u16_s64_x4 (z4, 64),
+ z0_res = svqrshru_u16 (z4, 64))
+
+/*
+** qrshru_z0_z21_33:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshru z0\.h, [^\n]+, #33
+** ret
+*/
+TEST_X4_NARROW (qrshru_z0_z21_33, svint64x4_t, svuint16_t,
+ z0_res = svqrshru_n_u16_s64_x4 (z21, 33),
+ z0_res = svqrshru_u16 (z21, 33))
+
+/*
+** qrshru_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshru z25\.h, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshru_z25_z26_12, svint64x4_t, svuint16_t,
+ z25 = svqrshru_n_u16_s64_x4 (z26, 12),
+ z25 = svqrshru_u16 (z26, 12))
+
+/*
+** qrshru_z25_z0_32:
+** sqrshru z25\.h, {z0\.d - z3\.d}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshru_z25_z0_32, svint64x4_t, svuint16_t,
+ z25 = svqrshru_n_u16_s64_x4 (z0, 32),
+ z25 = svqrshru_u16 (z0, 32))
+
+/*
+** qrshru_z22_z16_63:
+** sqrshru z22\.h, {z16\.d - z19\.d}, #63
+** ret
+*/
+TEST_X4_NARROW (qrshru_z22_z16_63, svint64x4_t, svuint16_t,
+ z22_res = svqrshru_n_u16_s64_x4 (z16, 63),
+ z22_res = svqrshru_u16 (z16, 63))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u8_x4.c
new file mode 100644
index 0000000..3adaa18
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshru_u8_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshru_z0_z0_1:
+** sqrshru z0\.b, {z0\.s - z3\.s}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshru_z0_z0_1, svint32x4_t, svuint8_t,
+ z0_res = svqrshru_n_u8_s32_x4 (z0, 1),
+ z0_res = svqrshru_u8 (z0, 1))
+
+/*
+** qrshru_z0_z4_32:
+** sqrshru z0\.b, {z4\.s - z7\.s}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshru_z0_z4_32, svint32x4_t, svuint8_t,
+ z0_res = svqrshru_n_u8_s32_x4 (z4, 32),
+ z0_res = svqrshru_u8 (z4, 32))
+
+/*
+** qrshru_z0_z21_2:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshru z0\.b, [^\n]+, #2
+** ret
+*/
+TEST_X4_NARROW (qrshru_z0_z21_2, svint32x4_t, svuint8_t,
+ z0_res = svqrshru_n_u8_s32_x4 (z21, 2),
+ z0_res = svqrshru_u8 (z21, 2))
+
+/*
+** qrshru_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshru z25\.b, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshru_z25_z26_12, svint32x4_t, svuint8_t,
+ z25 = svqrshru_n_u8_s32_x4 (z26, 12),
+ z25 = svqrshru_u8 (z26, 12))
+
+/*
+** qrshru_z25_z0_16:
+** sqrshru z25\.b, {z0\.s - z3\.s}, #16
+** ret
+*/
+TEST_X4_NARROW (qrshru_z25_z0_16, svint32x4_t, svuint8_t,
+ z25 = svqrshru_n_u8_s32_x4 (z0, 16),
+ z25 = svqrshru_u8 (z0, 16))
+
+/*
+** qrshru_z22_z16_31:
+** sqrshru z22\.b, {z16\.s - z19\.s}, #31
+** ret
+*/
+TEST_X4_NARROW (qrshru_z22_z16_31, svint32x4_t, svuint8_t,
+ z22_res = svqrshru_n_u8_s32_x4 (z16, 31),
+ z22_res = svqrshru_u8 (z16, 31))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u16_x2.c
new file mode 100644
index 0000000..a6dfa23
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshrun_z0_z0_1:
+** sqrshrun z0\.h, {z0\.s - z1\.s}, #1
+** ret
+*/
+TEST_X2_NARROW (qrshrun_z0_z0_1, svint32x2_t, svuint16_t,
+ z0_res = svqrshrun_n_u16_s32_x2 (z0, 1),
+ z0_res = svqrshrun_u16 (z0, 1))
+
+/*
+** qrshrun_z0_z6_16:
+** sqrshrun z0\.h, {z6\.s - z7\.s}, #16
+** ret
+*/
+TEST_X2_NARROW (qrshrun_z0_z6_16, svint32x2_t, svuint16_t,
+ z0_res = svqrshrun_n_u16_s32_x2 (z6, 16),
+ z0_res = svqrshrun_u16 (z6, 16))
+
+/*
+** qrshrun_z0_z29_13:
+** mov [^\n]+
+** mov [^\n]+
+** sqrshrun z0\.h, [^\n]+, #13
+** ret
+*/
+TEST_X2_NARROW (qrshrun_z0_z29_13, svint32x2_t, svuint16_t,
+ z0_res = svqrshrun_n_u16_s32_x2 (z29, 13),
+ z0_res = svqrshrun_u16 (z29, 13))
+
+/*
+** qrshrun_z5_z0_11:
+** sqrshrun z5\.h, {z0\.s - z1\.s}, #11
+** ret
+*/
+TEST_X2_NARROW (qrshrun_z5_z0_11, svint32x2_t, svuint16_t,
+ z5 = svqrshrun_n_u16_s32_x2 (z0, 11),
+ z5 = svqrshrun_u16 (z0, 11))
+
+/*
+** qrshrun_z22_z16_15:
+** sqrshrun z22\.h, {z16\.s - z17\.s}, #15
+** ret
+*/
+TEST_X2_NARROW (qrshrun_z22_z16_15, svint32x2_t, svuint16_t,
+ z22 = svqrshrun_n_u16_s32_x2 (z16, 15),
+ z22 = svqrshrun_u16 (z16, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u16_x4.c
new file mode 100644
index 0000000..4226f7f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u16_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshrun_z0_z0_1:
+** sqrshrun z0\.h, {z0\.d - z3\.d}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z0_z0_1, svint64x4_t, svuint16_t,
+ z0_res = svqrshrun_n_u16_s64_x4 (z0, 1),
+ z0_res = svqrshrun_u16 (z0, 1))
+
+/*
+** qrshrun_z0_z4_64:
+** sqrshrun z0\.h, {z4\.d - z7\.d}, #64
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z0_z4_64, svint64x4_t, svuint16_t,
+ z0_res = svqrshrun_n_u16_s64_x4 (z4, 64),
+ z0_res = svqrshrun_u16 (z4, 64))
+
+/*
+** qrshrun_z0_z21_33:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshrun z0\.h, [^\n]+, #33
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z0_z21_33, svint64x4_t, svuint16_t,
+ z0_res = svqrshrun_n_u16_s64_x4 (z21, 33),
+ z0_res = svqrshrun_u16 (z21, 33))
+
+/*
+** qrshrun_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshrun z25\.h, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z25_z26_12, svint64x4_t, svuint16_t,
+ z25 = svqrshrun_n_u16_s64_x4 (z26, 12),
+ z25 = svqrshrun_u16 (z26, 12))
+
+/*
+** qrshrun_z25_z0_32:
+** sqrshrun z25\.h, {z0\.d - z3\.d}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z25_z0_32, svint64x4_t, svuint16_t,
+ z25 = svqrshrun_n_u16_s64_x4 (z0, 32),
+ z25 = svqrshrun_u16 (z0, 32))
+
+/*
+** qrshrun_z22_z16_63:
+** sqrshrun z22\.h, {z16\.d - z19\.d}, #63
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z22_z16_63, svint64x4_t, svuint16_t,
+ z22_res = svqrshrun_n_u16_s64_x4 (z16, 63),
+ z22_res = svqrshrun_u16 (z16, 63))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u8_x4.c
new file mode 100644
index 0000000..6a75201
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/qrshrun_u8_x4.c
@@ -0,0 +1,65 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** qrshrun_z0_z0_1:
+** sqrshrun z0\.b, {z0\.s - z3\.s}, #1
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z0_z0_1, svint32x4_t, svuint8_t,
+ z0_res = svqrshrun_n_u8_s32_x4 (z0, 1),
+ z0_res = svqrshrun_u8 (z0, 1))
+
+/*
+** qrshrun_z0_z4_32:
+** sqrshrun z0\.b, {z4\.s - z7\.s}, #32
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z0_z4_32, svint32x4_t, svuint8_t,
+ z0_res = svqrshrun_n_u8_s32_x4 (z4, 32),
+ z0_res = svqrshrun_u8 (z4, 32))
+
+/*
+** qrshrun_z0_z21_2:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshrun z0\.b, [^\n]+, #2
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z0_z21_2, svint32x4_t, svuint8_t,
+ z0_res = svqrshrun_n_u8_s32_x4 (z21, 2),
+ z0_res = svqrshrun_u8 (z21, 2))
+
+/*
+** qrshrun_z25_z26_12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sqrshrun z25\.b, [^\n]+, #12
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z25_z26_12, svint32x4_t, svuint8_t,
+ z25 = svqrshrun_n_u8_s32_x4 (z26, 12),
+ z25 = svqrshrun_u8 (z26, 12))
+
+/*
+** qrshrun_z25_z0_16:
+** sqrshrun z25\.b, {z0\.s - z3\.s}, #16
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z25_z0_16, svint32x4_t, svuint8_t,
+ z25 = svqrshrun_n_u8_s32_x4 (z0, 16),
+ z25 = svqrshrun_u8 (z0, 16))
+
+/*
+** qrshrun_z22_z16_31:
+** sqrshrun z22\.b, {z16\.s - z19\.s}, #31
+** ret
+*/
+TEST_X4_NARROW (qrshrun_z22_z16_31, svint32x4_t, svuint8_t,
+ z22_res = svqrshrun_n_u8_s32_x4 (z16, 31),
+ z22_res = svqrshrun_u8 (z16, 31))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za16_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za16_vg2.c
new file mode 100644
index 0000000..a24b848
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za16_vg2.c
@@ -0,0 +1,140 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za16_s16_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.h - z1\.h}, za0h\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z0_0_0, svint16x2_t,
+ z0 = svread_hor_za16_s16_vg2 (0, 0),
+ z0 = svread_hor_za16_s16_vg2 (0, 0))
+
+/*
+** read_za16_u16_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.h - z5\.h}, za1h\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z4_1_1, svuint16x2_t,
+ z4 = svread_hor_za16_u16_vg2 (1, 1),
+ z4 = svread_hor_za16_u16_vg2 (1, 1))
+
+/*
+** read_za16_f16_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.h - z29\.h}, za0h\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_f16_z28_0_w11, svfloat16x2_t,
+ z28 = svread_hor_za16_f16_vg2 (0, w11),
+ z28 = svread_hor_za16_f16_vg2 (0, w11))
+
+/*
+** read_za16_bf16_z0_1_w12:
+** mova {z0\.h - z1\.h}, za1h\.h\[w12, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_bf16_z0_1_w12, svbfloat16x2_t,
+ z0 = svread_hor_za16_bf16_vg2 (1, w12),
+ z0 = svread_hor_za16_bf16_vg2 (1, w12))
+
+/*
+** read_za16_u16_z18_0_w15:
+** mova {z18\.h - z19\.h}, za0h\.h\[w15, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z18_0_w15, svuint16x2_t,
+ z18 = svread_hor_za16_u16_vg2 (0, w15),
+ z18 = svread_hor_za16_u16_vg2 (0, w15))
+
+/*
+** read_za16_s16_z23_1_w12p6:
+** mova {[^\n]+}, za1h\.h\[w12, 6:7\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z23_1_w12p6, svint16x2_t,
+ z23 = svread_hor_za16_s16_vg2 (1, w12 + 6),
+ z23 = svread_hor_za16_s16_vg2 (1, w12 + 6))
+
+/*
+** read_za16_f16_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.h - z5\.h}, za0h\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_f16_z4_0_w12p1, svfloat16x2_t,
+ z4 = svread_hor_za16_f16_vg2 (0, w12 + 1),
+ z4 = svread_hor_za16_f16_vg2 (0, w12 + 1))
+
+/*
+** read_za16_s16_z28_1_w12p2:
+** mova {z28\.h - z29\.h}, za1h\.h\[w12, 2:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z28_1_w12p2, svint16x2_t,
+ z28 = svread_hor_za16_s16_vg2 (1, w12 + 2),
+ z28 = svread_hor_za16_s16_vg2 (1, w12 + 2))
+
+/*
+** read_za16_u16_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.h - z1\.h}, za0h\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z0_0_w15p3, svuint16x2_t,
+ z0 = svread_hor_za16_u16_vg2 (0, w15 + 3),
+ z0 = svread_hor_za16_u16_vg2 (0, w15 + 3))
+
+/*
+** read_za16_bf16_z4_1_w15p4:
+** mova {z4\.h - z5\.h}, za1h\.h\[w15, 4:5\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_bf16_z4_1_w15p4, svbfloat16x2_t,
+ z4 = svread_hor_za16_bf16_vg2 (1, w15 + 4),
+ z4 = svread_hor_za16_bf16_vg2 (1, w15 + 4))
+
+/*
+** read_za16_u16_z28_0_w12p7:
+** add (w[0-9]+), w12, #?7
+** mova {z28\.h - z29\.h}, za0h\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z28_0_w12p7, svuint16x2_t,
+ z28 = svread_hor_za16_u16_vg2 (0, w12 + 7),
+ z28 = svread_hor_za16_u16_vg2 (0, w12 + 7))
+
+/*
+** read_za16_s16_z0_1_w15p8:
+** add (w[0-9]+), w15, #?8
+** mova {z0\.h - z1\.h}, za1h\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z0_1_w15p8, svint16x2_t,
+ z0 = svread_hor_za16_s16_vg2 (1, w15 + 8),
+ z0 = svread_hor_za16_s16_vg2 (1, w15 + 8))
+
+/*
+** read_za16_u16_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.h - z5\.h}, za0h\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z4_0_w12m1, svuint16x2_t,
+ z4 = svread_hor_za16_u16_vg2 (0, w12 - 1),
+ z4 = svread_hor_za16_u16_vg2 (0, w12 - 1))
+
+/*
+** read_za16_u16_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova {z18\.h - z19\.h}, za1h\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z18_1_w16, svuint16x2_t,
+ z18 = svread_hor_za16_u16_vg2 (1, w16),
+ z18 = svread_hor_za16_u16_vg2 (1, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za16_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za16_vg4.c
new file mode 100644
index 0000000..c6c8d14
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za16_vg4.c
@@ -0,0 +1,138 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za16_s16_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.h - z3\.h}, za0h\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z0_0_0, svint16x4_t,
+ z0 = svread_hor_za16_s16_vg4 (0, 0),
+ z0 = svread_hor_za16_s16_vg4 (0, 0))
+
+/*
+** read_za16_u16_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.h - z7\.h}, za1h\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z4_1_1, svuint16x4_t,
+ z4 = svread_hor_za16_u16_vg4 (1, 1),
+ z4 = svread_hor_za16_u16_vg4 (1, 1))
+
+/*
+** read_za16_f16_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.h - z31\.h}, za0h\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_f16_z28_0_w11, svfloat16x4_t,
+ z28 = svread_hor_za16_f16_vg4 (0, w11),
+ z28 = svread_hor_za16_f16_vg4 (0, w11))
+
+/*
+** read_za16_s16_z0_1_w12:
+** mova {z0\.h - z3\.h}, za1h\.h\[w12, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z0_1_w12, svint16x4_t,
+ z0 = svread_hor_za16_s16_vg4 (1, w12),
+ z0 = svread_hor_za16_s16_vg4 (1, w12))
+
+/*
+** read_za16_u16_z18_0_w15:
+** mova {[^\n]+}, za0h\.h\[w15, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z18_0_w15, svuint16x4_t,
+ z18 = svread_hor_za16_u16_vg4 (0, w15),
+ z18 = svread_hor_za16_u16_vg4 (0, w15))
+
+/*
+** read_za16_bf16_z23_1_w12p4:
+** mova {[^\n]+}, za1h\.h\[w12, 4:7\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_bf16_z23_1_w12p4, svbfloat16x4_t,
+ z23 = svread_hor_za16_bf16_vg4 (1, w12 + 4),
+ z23 = svread_hor_za16_bf16_vg4 (1, w12 + 4))
+
+/*
+** read_za16_u16_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.h - z7\.h}, za0h\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z4_0_w12p1, svuint16x4_t,
+ z4 = svread_hor_za16_u16_vg4 (0, w12 + 1),
+ z4 = svread_hor_za16_u16_vg4 (0, w12 + 1))
+
+/*
+** read_za16_s16_z28_1_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova {z28\.h - z31\.h}, za1h\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z28_1_w12p2, svint16x4_t,
+ z28 = svread_hor_za16_s16_vg4 (1, w12 + 2),
+ z28 = svread_hor_za16_s16_vg4 (1, w12 + 2))
+
+/*
+** read_za16_f16_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.h - z3\.h}, za0h\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_f16_z0_0_w15p3, svfloat16x4_t,
+ z0 = svread_hor_za16_f16_vg4 (0, w15 + 3),
+ z0 = svread_hor_za16_f16_vg4 (0, w15 + 3))
+
+/*
+** read_za16_u16_z28_1_w12p6:
+** add (w[0-9]+), w12, #?6
+** mova {z28\.h - z31\.h}, za1h\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z28_1_w12p6, svuint16x4_t,
+ z28 = svread_hor_za16_u16_vg4 (1, w12 + 6),
+ z28 = svread_hor_za16_u16_vg4 (1, w12 + 6))
+
+/*
+** read_za16_s16_z0_0_w15p8:
+** add (w[0-9]+), w15, #?8
+** mova {z0\.h - z3\.h}, za0h\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z0_0_w15p8, svint16x4_t,
+ z0 = svread_hor_za16_s16_vg4 (0, w15 + 8),
+ z0 = svread_hor_za16_s16_vg4 (0, w15 + 8))
+
+/*
+** read_za16_bf16_z4_1_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.h - z7\.h}, za1h\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_bf16_z4_1_w12m1, svbfloat16x4_t,
+ z4 = svread_hor_za16_bf16_vg4 (1, w12 - 1),
+ z4 = svread_hor_za16_bf16_vg4 (1, w12 - 1))
+
+/*
+** read_za16_u16_z28_0_w16:
+** mov (w1[2-5]), w16
+** mova {z28\.h - z31\.h}, za0h\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z28_0_w16, svuint16x4_t,
+ z28 = svread_hor_za16_u16_vg4 (0, w16),
+ z28 = svread_hor_za16_u16_vg4 (0, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za32_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za32_vg2.c
new file mode 100644
index 0000000..ce2ac70
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za32_vg2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za32_s32_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.s - z1\.s}, za0h\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z0_0_0, svint32x2_t,
+ z0 = svread_hor_za32_s32_vg2 (0, 0),
+ z0 = svread_hor_za32_s32_vg2 (0, 0))
+
+/*
+** read_za32_u32_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.s - z5\.s}, za1h\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z4_1_1, svuint32x2_t,
+ z4 = svread_hor_za32_u32_vg2 (1, 1),
+ z4 = svread_hor_za32_u32_vg2 (1, 1))
+
+/*
+** read_za32_f32_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.s - z29\.s}, za2h\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z28_2_w11, svfloat32x2_t,
+ z28 = svread_hor_za32_f32_vg2 (2, w11),
+ z28 = svread_hor_za32_f32_vg2 (2, w11))
+
+/*
+** read_za32_f32_z0_3_w12:
+** mova {z0\.s - z1\.s}, za3h\.s\[w12, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z0_3_w12, svfloat32x2_t,
+ z0 = svread_hor_za32_f32_vg2 (3, w12),
+ z0 = svread_hor_za32_f32_vg2 (3, w12))
+
+/*
+** read_za32_u32_z18_0_w15:
+** mova {z18\.s - z19\.s}, za0h\.s\[w15, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z18_0_w15, svuint32x2_t,
+ z18 = svread_hor_za32_u32_vg2 (0, w15),
+ z18 = svread_hor_za32_u32_vg2 (0, w15))
+
+/*
+** read_za32_s32_z23_1_w12p2:
+** mova {[^\n]+}, za1h\.s\[w12, 2:3\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z23_1_w12p2, svint32x2_t,
+ z23 = svread_hor_za32_s32_vg2 (1, w12 + 2),
+ z23 = svread_hor_za32_s32_vg2 (1, w12 + 2))
+
+/*
+** read_za32_f32_z4_2_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.s - z5\.s}, za2h\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z4_2_w12p1, svfloat32x2_t,
+ z4 = svread_hor_za32_f32_vg2 (2, w12 + 1),
+ z4 = svread_hor_za32_f32_vg2 (2, w12 + 1))
+
+/*
+** read_za32_u32_z0_3_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.s - z1\.s}, za3h\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z0_3_w15p3, svuint32x2_t,
+ z0 = svread_hor_za32_u32_vg2 (3, w15 + 3),
+ z0 = svread_hor_za32_u32_vg2 (3, w15 + 3))
+
+/*
+** read_za32_s32_z0_1_w15p4:
+** add (w[0-9]+), w15, #?4
+** mova {z0\.s - z1\.s}, za1h\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z0_1_w15p4, svint32x2_t,
+ z0 = svread_hor_za32_s32_vg2 (1, w15 + 4),
+ z0 = svread_hor_za32_s32_vg2 (1, w15 + 4))
+
+/*
+** read_za32_u32_z4_3_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.s - z5\.s}, za3h\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z4_3_w12m1, svuint32x2_t,
+ z4 = svread_hor_za32_u32_vg2 (3, w12 - 1),
+ z4 = svread_hor_za32_u32_vg2 (3, w12 - 1))
+
+/*
+** read_za32_u32_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova {z18\.s - z19\.s}, za1h\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z18_1_w16, svuint32x2_t,
+ z18 = svread_hor_za32_u32_vg2 (1, w16),
+ z18 = svread_hor_za32_u32_vg2 (1, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za32_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za32_vg4.c
new file mode 100644
index 0000000..2e8eb8d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za32_vg4.c
@@ -0,0 +1,129 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za32_s32_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.s - z3\.s}, za0h\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z0_0_0, svint32x4_t,
+ z0 = svread_hor_za32_s32_vg4 (0, 0),
+ z0 = svread_hor_za32_s32_vg4 (0, 0))
+
+/*
+** read_za32_u32_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.s - z7\.s}, za1h\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z4_1_1, svuint32x4_t,
+ z4 = svread_hor_za32_u32_vg4 (1, 1),
+ z4 = svread_hor_za32_u32_vg4 (1, 1))
+
+/*
+** read_za32_f32_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.s - z31\.s}, za2h\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z28_2_w11, svfloat32x4_t,
+ z28 = svread_hor_za32_f32_vg4 (2, w11),
+ z28 = svread_hor_za32_f32_vg4 (2, w11))
+
+/*
+** read_za32_s32_z0_3_w12:
+** mova {z0\.s - z3\.s}, za3h\.s\[w12, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z0_3_w12, svint32x4_t,
+ z0 = svread_hor_za32_s32_vg4 (3, w12),
+ z0 = svread_hor_za32_s32_vg4 (3, w12))
+
+/*
+** read_za32_u32_z18_0_w15:
+** mova {[^\n]+}, za0h\.s\[w15, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z18_0_w15, svuint32x4_t,
+ z18 = svread_hor_za32_u32_vg4 (0, w15),
+ z18 = svread_hor_za32_u32_vg4 (0, w15))
+
+/*
+** read_za32_f32_z23_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova {[^\n]+}, za1h\.s\[\1, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z23_1_w12p4, svfloat32x4_t,
+ z23 = svread_hor_za32_f32_vg4 (1, w12 + 4),
+ z23 = svread_hor_za32_f32_vg4 (1, w12 + 4))
+
+/*
+** read_za32_u32_z4_2_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.s - z7\.s}, za2h\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z4_2_w12p1, svuint32x4_t,
+ z4 = svread_hor_za32_u32_vg4 (2, w12 + 1),
+ z4 = svread_hor_za32_u32_vg4 (2, w12 + 1))
+
+/*
+** read_za32_s32_z28_3_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova {z28\.s - z31\.s}, za3h\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z28_3_w12p2, svint32x4_t,
+ z28 = svread_hor_za32_s32_vg4 (3, w12 + 2),
+ z28 = svread_hor_za32_s32_vg4 (3, w12 + 2))
+
+/*
+** read_za32_f32_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.s - z3\.s}, za0h\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z0_0_w15p3, svfloat32x4_t,
+ z0 = svread_hor_za32_f32_vg4 (0, w15 + 3),
+ z0 = svread_hor_za32_f32_vg4 (0, w15 + 3))
+
+/*
+** read_za32_u32_z28_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova {z28\.s - z31\.s}, za1h\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z28_1_w12p4, svuint32x4_t,
+ z28 = svread_hor_za32_u32_vg4 (1, w12 + 4),
+ z28 = svread_hor_za32_u32_vg4 (1, w12 + 4))
+
+/*
+** read_za32_f32_z4_2_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.s - z7\.s}, za2h\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z4_2_w12m1, svfloat32x4_t,
+ z4 = svread_hor_za32_f32_vg4 (2, w12 - 1),
+ z4 = svread_hor_za32_f32_vg4 (2, w12 - 1))
+
+/*
+** read_za32_u32_z28_3_w16:
+** mov (w1[2-5]), w16
+** mova {z28\.s - z31\.s}, za3h\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z28_3_w16, svuint32x4_t,
+ z28 = svread_hor_za32_u32_vg4 (3, w16),
+ z28 = svread_hor_za32_u32_vg4 (3, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za64_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za64_vg2.c
new file mode 100644
index 0000000..d18468e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za64_vg2.c
@@ -0,0 +1,113 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za64_s64_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.d - z1\.d}, za0h\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z0_0_0, svint64x2_t,
+ z0 = svread_hor_za64_s64_vg2 (0, 0),
+ z0 = svread_hor_za64_s64_vg2 (0, 0))
+
+/*
+** read_za64_u64_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.d - z5\.d}, za1h\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z4_1_1, svuint64x2_t,
+ z4 = svread_hor_za64_u64_vg2 (1, 1),
+ z4 = svread_hor_za64_u64_vg2 (1, 1))
+
+/*
+** read_za64_f64_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.d - z29\.d}, za2h\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z28_2_w11, svfloat64x2_t,
+ z28 = svread_hor_za64_f64_vg2 (2, w11),
+ z28 = svread_hor_za64_f64_vg2 (2, w11))
+
+/*
+** read_za64_f64_z0_3_w12:
+** mova {z0\.d - z1\.d}, za3h\.d\[w12, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z0_3_w12, svfloat64x2_t,
+ z0 = svread_hor_za64_f64_vg2 (3, w12),
+ z0 = svread_hor_za64_f64_vg2 (3, w12))
+
+/*
+** read_za64_u64_z18_4_w15:
+** mova {z18\.d - z19\.d}, za4h\.d\[w15, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z18_4_w15, svuint64x2_t,
+ z18 = svread_hor_za64_u64_vg2 (4, w15),
+ z18 = svread_hor_za64_u64_vg2 (4, w15))
+
+/*
+** read_za64_s64_z23_5_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova {[^\n]+}, za5h\.d\[\1, 0:1\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z23_5_w12p2, svint64x2_t,
+ z23 = svread_hor_za64_s64_vg2 (5, w12 + 2),
+ z23 = svread_hor_za64_s64_vg2 (5, w12 + 2))
+
+/*
+** read_za64_f64_z4_6_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.d - z5\.d}, za6h\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z4_6_w12p1, svfloat64x2_t,
+ z4 = svread_hor_za64_f64_vg2 (6, w12 + 1),
+ z4 = svread_hor_za64_f64_vg2 (6, w12 + 1))
+
+/*
+** read_za64_u64_z0_7_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.d - z1\.d}, za7h\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z0_7_w15p3, svuint64x2_t,
+ z0 = svread_hor_za64_u64_vg2 (7, w15 + 3),
+ z0 = svread_hor_za64_u64_vg2 (7, w15 + 3))
+
+/*
+** read_za64_s64_z0_1_w15p4:
+** add (w[0-9]+), w15, #?4
+** mova {z0\.d - z1\.d}, za1h\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z0_1_w15p4, svint64x2_t,
+ z0 = svread_hor_za64_s64_vg2 (1, w15 + 4),
+ z0 = svread_hor_za64_s64_vg2 (1, w15 + 4))
+
+/*
+** read_za64_u64_z4_3_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.d - z5\.d}, za3h\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z4_3_w12m1, svuint64x2_t,
+ z4 = svread_hor_za64_u64_vg2 (3, w12 - 1),
+ z4 = svread_hor_za64_u64_vg2 (3, w12 - 1))
+
+/*
+** read_za64_u64_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova {z18\.d - z19\.d}, za1h\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z18_1_w16, svuint64x2_t,
+ z18 = svread_hor_za64_u64_vg2 (1, w16),
+ z18 = svread_hor_za64_u64_vg2 (1, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za64_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za64_vg4.c
new file mode 100644
index 0000000..1dff701
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za64_vg4.c
@@ -0,0 +1,129 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za64_s64_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.d - z3\.d}, za0h\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z0_0_0, svint64x4_t,
+ z0 = svread_hor_za64_s64_vg4 (0, 0),
+ z0 = svread_hor_za64_s64_vg4 (0, 0))
+
+/*
+** read_za64_u64_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.d - z7\.d}, za1h\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z4_1_1, svuint64x4_t,
+ z4 = svread_hor_za64_u64_vg4 (1, 1),
+ z4 = svread_hor_za64_u64_vg4 (1, 1))
+
+/*
+** read_za64_f64_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.d - z31\.d}, za2h\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z28_2_w11, svfloat64x4_t,
+ z28 = svread_hor_za64_f64_vg4 (2, w11),
+ z28 = svread_hor_za64_f64_vg4 (2, w11))
+
+/*
+** read_za64_s64_z0_3_w12:
+** mova {z0\.d - z3\.d}, za3h\.d\[w12, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z0_3_w12, svint64x4_t,
+ z0 = svread_hor_za64_s64_vg4 (3, w12),
+ z0 = svread_hor_za64_s64_vg4 (3, w12))
+
+/*
+** read_za64_u64_z18_4_w15:
+** mova {[^\n]+}, za4h\.d\[w15, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z18_4_w15, svuint64x4_t,
+ z18 = svread_hor_za64_u64_vg4 (4, w15),
+ z18 = svread_hor_za64_u64_vg4 (4, w15))
+
+/*
+** read_za64_f64_z23_5_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova {[^\n]+}, za5h\.d\[\1, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z23_5_w12p4, svfloat64x4_t,
+ z23 = svread_hor_za64_f64_vg4 (5, w12 + 4),
+ z23 = svread_hor_za64_f64_vg4 (5, w12 + 4))
+
+/*
+** read_za64_u64_z4_6_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.d - z7\.d}, za6h\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z4_6_w12p1, svuint64x4_t,
+ z4 = svread_hor_za64_u64_vg4 (6, w12 + 1),
+ z4 = svread_hor_za64_u64_vg4 (6, w12 + 1))
+
+/*
+** read_za64_s64_z28_7_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova {z28\.d - z31\.d}, za7h\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z28_7_w12p2, svint64x4_t,
+ z28 = svread_hor_za64_s64_vg4 (7, w12 + 2),
+ z28 = svread_hor_za64_s64_vg4 (7, w12 + 2))
+
+/*
+** read_za64_f64_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.d - z3\.d}, za0h\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z0_0_w15p3, svfloat64x4_t,
+ z0 = svread_hor_za64_f64_vg4 (0, w15 + 3),
+ z0 = svread_hor_za64_f64_vg4 (0, w15 + 3))
+
+/*
+** read_za64_u64_z28_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova {z28\.d - z31\.d}, za1h\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z28_1_w12p4, svuint64x4_t,
+ z28 = svread_hor_za64_u64_vg4 (1, w12 + 4),
+ z28 = svread_hor_za64_u64_vg4 (1, w12 + 4))
+
+/*
+** read_za64_f64_z4_2_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.d - z7\.d}, za2h\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z4_2_w12m1, svfloat64x4_t,
+ z4 = svread_hor_za64_f64_vg4 (2, w12 - 1),
+ z4 = svread_hor_za64_f64_vg4 (2, w12 - 1))
+
+/*
+** read_za64_u64_z28_3_w16:
+** mov (w1[2-5]), w16
+** mova {z28\.d - z31\.d}, za3h\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z28_3_w16, svuint64x4_t,
+ z28 = svread_hor_za64_u64_vg4 (3, w16),
+ z28 = svread_hor_za64_u64_vg4 (3, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za8_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za8_vg2.c
new file mode 100644
index 0000000..ec31a68
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za8_vg2.c
@@ -0,0 +1,140 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za8_s8_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.b - z1\.b}, za0h\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_0, svint8x2_t,
+ z0 = svread_hor_za8_s8_vg2 (0, 0),
+ z0 = svread_hor_za8_s8_vg2 (0, 0))
+
+/*
+** read_za8_u8_z4_0_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.b - z5\.b}, za0h\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_1, svuint8x2_t,
+ z4 = svread_hor_za8_u8_vg2 (0, 1),
+ z4 = svread_hor_za8_u8_vg2 (0, 1))
+
+/*
+** read_za8_s8_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.b - z29\.b}, za0h\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z28_0_w11, svint8x2_t,
+ z28 = svread_hor_za8_s8_vg2 (0, w11),
+ z28 = svread_hor_za8_s8_vg2 (0, w11))
+
+/*
+** read_za8_s8_z0_0_w12:
+** mova {z0\.b - z1\.b}, za0h\.b\[w12, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_w12, svint8x2_t,
+ z0 = svread_hor_za8_s8_vg2 (0, w12),
+ z0 = svread_hor_za8_s8_vg2 (0, w12))
+
+/*
+** read_za8_u8_z18_0_w15:
+** mova {z18\.b - z19\.b}, za0h\.b\[w15, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z18_0_w15, svuint8x2_t,
+ z18 = svread_hor_za8_u8_vg2 (0, w15),
+ z18 = svread_hor_za8_u8_vg2 (0, w15))
+
+/*
+** read_za8_s8_z23_0_w12p14:
+** mova {[^\n]+}, za0h\.b\[w12, 14:15\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z23_0_w12p14, svint8x2_t,
+ z23 = svread_hor_za8_s8_vg2 (0, w12 + 14),
+ z23 = svread_hor_za8_s8_vg2 (0, w12 + 14))
+
+/*
+** read_za8_u8_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.b - z5\.b}, za0h\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w12p1, svuint8x2_t,
+ z4 = svread_hor_za8_u8_vg2 (0, w12 + 1),
+ z4 = svread_hor_za8_u8_vg2 (0, w12 + 1))
+
+/*
+** read_za8_s8_z28_0_w12p2:
+** mova {z28\.b - z29\.b}, za0h\.b\[w12, 2:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z28_0_w12p2, svint8x2_t,
+ z28 = svread_hor_za8_s8_vg2 (0, w12 + 2),
+ z28 = svread_hor_za8_s8_vg2 (0, w12 + 2))
+
+/*
+** read_za8_u8_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.b - z1\.b}, za0h\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z0_0_w15p3, svuint8x2_t,
+ z0 = svread_hor_za8_u8_vg2 (0, w15 + 3),
+ z0 = svread_hor_za8_u8_vg2 (0, w15 + 3))
+
+/*
+** read_za8_u8_z4_0_w15p12:
+** mova {z4\.b - z5\.b}, za0h\.b\[w15, 12:13\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w15p12, svuint8x2_t,
+ z4 = svread_hor_za8_u8_vg2 (0, w15 + 12),
+ z4 = svread_hor_za8_u8_vg2 (0, w15 + 12))
+
+/*
+** read_za8_u8_z28_0_w12p15:
+** add (w[0-9]+), w12, #?15
+** mova {z28\.b - z29\.b}, za0h\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z28_0_w12p15, svuint8x2_t,
+ z28 = svread_hor_za8_u8_vg2 (0, w12 + 15),
+ z28 = svread_hor_za8_u8_vg2 (0, w12 + 15))
+
+/*
+** read_za8_s8_z0_0_w15p16:
+** add (w[0-9]+), w15, #?16
+** mova {z0\.b - z1\.b}, za0h\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_w15p16, svint8x2_t,
+ z0 = svread_hor_za8_s8_vg2 (0, w15 + 16),
+ z0 = svread_hor_za8_s8_vg2 (0, w15 + 16))
+
+/*
+** read_za8_u8_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.b - z5\.b}, za0h\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w12m1, svuint8x2_t,
+ z4 = svread_hor_za8_u8_vg2 (0, w12 - 1),
+ z4 = svread_hor_za8_u8_vg2 (0, w12 - 1))
+
+/*
+** read_za8_u8_z18_0_w16:
+** mov (w1[2-5]), w16
+** mova {z18\.b - z19\.b}, za0h\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z18_0_w16, svuint8x2_t,
+ z18 = svread_hor_za8_u8_vg2 (0, w16),
+ z18 = svread_hor_za8_u8_vg2 (0, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za8_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za8_vg4.c
new file mode 100644
index 0000000..261cbea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_hor_za8_vg4.c
@@ -0,0 +1,156 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za8_s8_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.b - z3\.b}, za0h\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_0, svint8x4_t,
+ z0 = svread_hor_za8_s8_vg4 (0, 0),
+ z0 = svread_hor_za8_s8_vg4 (0, 0))
+
+/*
+** read_za8_u8_z4_0_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.b - z7\.b}, za0h\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_1, svuint8x4_t,
+ z4 = svread_hor_za8_u8_vg4 (0, 1),
+ z4 = svread_hor_za8_u8_vg4 (0, 1))
+
+/*
+** read_za8_s8_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.b - z31\.b}, za0h\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z28_0_w11, svint8x4_t,
+ z28 = svread_hor_za8_s8_vg4 (0, w11),
+ z28 = svread_hor_za8_s8_vg4 (0, w11))
+
+/*
+** read_za8_s8_z0_0_w12:
+** mova {z0\.b - z3\.b}, za0h\.b\[w12, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_w12, svint8x4_t,
+ z0 = svread_hor_za8_s8_vg4 (0, w12),
+ z0 = svread_hor_za8_s8_vg4 (0, w12))
+
+/*
+** read_za8_u8_z18_0_w15:
+** mova {[^\n]+}, za0h\.b\[w15, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z18_0_w15, svuint8x4_t,
+ z18 = svread_hor_za8_u8_vg4 (0, w15),
+ z18 = svread_hor_za8_u8_vg4 (0, w15))
+
+/*
+** read_za8_s8_z23_0_w12p12:
+** mova {[^\n]+}, za0h\.b\[w12, 12:15\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z23_0_w12p12, svint8x4_t,
+ z23 = svread_hor_za8_s8_vg4 (0, w12 + 12),
+ z23 = svread_hor_za8_s8_vg4 (0, w12 + 12))
+
+/*
+** read_za8_u8_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.b - z7\.b}, za0h\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w12p1, svuint8x4_t,
+ z4 = svread_hor_za8_u8_vg4 (0, w12 + 1),
+ z4 = svread_hor_za8_u8_vg4 (0, w12 + 1))
+
+/*
+** read_za8_s8_z28_0_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova {z28\.b - z31\.b}, za0h\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z28_0_w12p2, svint8x4_t,
+ z28 = svread_hor_za8_s8_vg4 (0, w12 + 2),
+ z28 = svread_hor_za8_s8_vg4 (0, w12 + 2))
+
+/*
+** read_za8_u8_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.b - z3\.b}, za0h\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z0_0_w15p3, svuint8x4_t,
+ z0 = svread_hor_za8_u8_vg4 (0, w15 + 3),
+ z0 = svread_hor_za8_u8_vg4 (0, w15 + 3))
+
+/*
+** read_za8_u8_z0_0_w12p4:
+** mova {z0\.b - z3\.b}, za0h\.b\[w12, 4:7\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z0_0_w12p4, svuint8x4_t,
+ z0 = svread_hor_za8_u8_vg4 (0, w12 + 4),
+ z0 = svread_hor_za8_u8_vg4 (0, w12 + 4))
+
+/*
+** read_za8_u8_z4_0_w15p12:
+** mova {z4\.b - z7\.b}, za0h\.b\[w15, 12:15\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w15p12, svuint8x4_t,
+ z4 = svread_hor_za8_u8_vg4 (0, w15 + 12),
+ z4 = svread_hor_za8_u8_vg4 (0, w15 + 12))
+
+/*
+** read_za8_u8_z28_0_w12p14:
+** add (w[0-9]+), w12, #?14
+** mova {z28\.b - z31\.b}, za0h\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z28_0_w12p14, svuint8x4_t,
+ z28 = svread_hor_za8_u8_vg4 (0, w12 + 14),
+ z28 = svread_hor_za8_u8_vg4 (0, w12 + 14))
+
+/*
+** read_za8_s8_z0_0_w15p16:
+** add (w[0-9]+), w15, #?16
+** mova {z0\.b - z3\.b}, za0h\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_w15p16, svint8x4_t,
+ z0 = svread_hor_za8_s8_vg4 (0, w15 + 16),
+ z0 = svread_hor_za8_s8_vg4 (0, w15 + 16))
+
+/*
+** read_za8_u8_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.b - z7\.b}, za0h\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w12m1, svuint8x4_t,
+ z4 = svread_hor_za8_u8_vg4 (0, w12 - 1),
+ z4 = svread_hor_za8_u8_vg4 (0, w12 - 1))
+
+/*
+** read_za8_u8_z28_0_w16:
+** mov (w1[2-5]), w16
+** mova {z28\.b - z31\.b}, za0h\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z28_0_w16, svuint8x4_t,
+ z28 = svread_hor_za8_u8_vg4 (0, w16),
+ z28 = svread_hor_za8_u8_vg4 (0, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za16_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za16_vg2.c
new file mode 100644
index 0000000..a71d4d0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za16_vg2.c
@@ -0,0 +1,140 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za16_s16_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.h - z1\.h}, za0v\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z0_0_0, svint16x2_t,
+ z0 = svread_ver_za16_s16_vg2 (0, 0),
+ z0 = svread_ver_za16_s16_vg2 (0, 0))
+
+/*
+** read_za16_u16_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.h - z5\.h}, za1v\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z4_1_1, svuint16x2_t,
+ z4 = svread_ver_za16_u16_vg2 (1, 1),
+ z4 = svread_ver_za16_u16_vg2 (1, 1))
+
+/*
+** read_za16_f16_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.h - z29\.h}, za0v\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_f16_z28_0_w11, svfloat16x2_t,
+ z28 = svread_ver_za16_f16_vg2 (0, w11),
+ z28 = svread_ver_za16_f16_vg2 (0, w11))
+
+/*
+** read_za16_bf16_z0_1_w12:
+** mova {z0\.h - z1\.h}, za1v\.h\[w12, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_bf16_z0_1_w12, svbfloat16x2_t,
+ z0 = svread_ver_za16_bf16_vg2 (1, w12),
+ z0 = svread_ver_za16_bf16_vg2 (1, w12))
+
+/*
+** read_za16_u16_z18_0_w15:
+** mova {z18\.h - z19\.h}, za0v\.h\[w15, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z18_0_w15, svuint16x2_t,
+ z18 = svread_ver_za16_u16_vg2 (0, w15),
+ z18 = svread_ver_za16_u16_vg2 (0, w15))
+
+/*
+** read_za16_s16_z23_1_w12p6:
+** mova {[^\n]+}, za1v\.h\[w12, 6:7\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z23_1_w12p6, svint16x2_t,
+ z23 = svread_ver_za16_s16_vg2 (1, w12 + 6),
+ z23 = svread_ver_za16_s16_vg2 (1, w12 + 6))
+
+/*
+** read_za16_f16_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.h - z5\.h}, za0v\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_f16_z4_0_w12p1, svfloat16x2_t,
+ z4 = svread_ver_za16_f16_vg2 (0, w12 + 1),
+ z4 = svread_ver_za16_f16_vg2 (0, w12 + 1))
+
+/*
+** read_za16_s16_z28_1_w12p2:
+** mova {z28\.h - z29\.h}, za1v\.h\[w12, 2:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z28_1_w12p2, svint16x2_t,
+ z28 = svread_ver_za16_s16_vg2 (1, w12 + 2),
+ z28 = svread_ver_za16_s16_vg2 (1, w12 + 2))
+
+/*
+** read_za16_u16_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.h - z1\.h}, za0v\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z0_0_w15p3, svuint16x2_t,
+ z0 = svread_ver_za16_u16_vg2 (0, w15 + 3),
+ z0 = svread_ver_za16_u16_vg2 (0, w15 + 3))
+
+/*
+** read_za16_bf16_z4_1_w15p4:
+** mova {z4\.h - z5\.h}, za1v\.h\[w15, 4:5\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_bf16_z4_1_w15p4, svbfloat16x2_t,
+ z4 = svread_ver_za16_bf16_vg2 (1, w15 + 4),
+ z4 = svread_ver_za16_bf16_vg2 (1, w15 + 4))
+
+/*
+** read_za16_u16_z28_0_w12p7:
+** add (w[0-9]+), w12, #?7
+** mova {z28\.h - z29\.h}, za0v\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z28_0_w12p7, svuint16x2_t,
+ z28 = svread_ver_za16_u16_vg2 (0, w12 + 7),
+ z28 = svread_ver_za16_u16_vg2 (0, w12 + 7))
+
+/*
+** read_za16_s16_z0_1_w15p8:
+** add (w[0-9]+), w15, #?8
+** mova {z0\.h - z1\.h}, za1v\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z0_1_w15p8, svint16x2_t,
+ z0 = svread_ver_za16_s16_vg2 (1, w15 + 8),
+ z0 = svread_ver_za16_s16_vg2 (1, w15 + 8))
+
+/*
+** read_za16_u16_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.h - z5\.h}, za0v\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z4_0_w12m1, svuint16x2_t,
+ z4 = svread_ver_za16_u16_vg2 (0, w12 - 1),
+ z4 = svread_ver_za16_u16_vg2 (0, w12 - 1))
+
+/*
+** read_za16_u16_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova {z18\.h - z19\.h}, za1v\.h\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z18_1_w16, svuint16x2_t,
+ z18 = svread_ver_za16_u16_vg2 (1, w16),
+ z18 = svread_ver_za16_u16_vg2 (1, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za16_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za16_vg4.c
new file mode 100644
index 0000000..792351d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za16_vg4.c
@@ -0,0 +1,138 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za16_s16_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.h - z3\.h}, za0v\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z0_0_0, svint16x4_t,
+ z0 = svread_ver_za16_s16_vg4 (0, 0),
+ z0 = svread_ver_za16_s16_vg4 (0, 0))
+
+/*
+** read_za16_u16_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.h - z7\.h}, za1v\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z4_1_1, svuint16x4_t,
+ z4 = svread_ver_za16_u16_vg4 (1, 1),
+ z4 = svread_ver_za16_u16_vg4 (1, 1))
+
+/*
+** read_za16_f16_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.h - z31\.h}, za0v\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_f16_z28_0_w11, svfloat16x4_t,
+ z28 = svread_ver_za16_f16_vg4 (0, w11),
+ z28 = svread_ver_za16_f16_vg4 (0, w11))
+
+/*
+** read_za16_s16_z0_1_w12:
+** mova {z0\.h - z3\.h}, za1v\.h\[w12, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z0_1_w12, svint16x4_t,
+ z0 = svread_ver_za16_s16_vg4 (1, w12),
+ z0 = svread_ver_za16_s16_vg4 (1, w12))
+
+/*
+** read_za16_u16_z18_0_w15:
+** mova {[^\n]+}, za0v\.h\[w15, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z18_0_w15, svuint16x4_t,
+ z18 = svread_ver_za16_u16_vg4 (0, w15),
+ z18 = svread_ver_za16_u16_vg4 (0, w15))
+
+/*
+** read_za16_bf16_z23_1_w12p4:
+** mova {[^\n]+}, za1v\.h\[w12, 4:7\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_bf16_z23_1_w12p4, svbfloat16x4_t,
+ z23 = svread_ver_za16_bf16_vg4 (1, w12 + 4),
+ z23 = svread_ver_za16_bf16_vg4 (1, w12 + 4))
+
+/*
+** read_za16_u16_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.h - z7\.h}, za0v\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z4_0_w12p1, svuint16x4_t,
+ z4 = svread_ver_za16_u16_vg4 (0, w12 + 1),
+ z4 = svread_ver_za16_u16_vg4 (0, w12 + 1))
+
+/*
+** read_za16_s16_z28_1_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova {z28\.h - z31\.h}, za1v\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z28_1_w12p2, svint16x4_t,
+ z28 = svread_ver_za16_s16_vg4 (1, w12 + 2),
+ z28 = svread_ver_za16_s16_vg4 (1, w12 + 2))
+
+/*
+** read_za16_f16_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.h - z3\.h}, za0v\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_f16_z0_0_w15p3, svfloat16x4_t,
+ z0 = svread_ver_za16_f16_vg4 (0, w15 + 3),
+ z0 = svread_ver_za16_f16_vg4 (0, w15 + 3))
+
+/*
+** read_za16_u16_z28_1_w12p6:
+** add (w[0-9]+), w12, #?6
+** mova {z28\.h - z31\.h}, za1v\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z28_1_w12p6, svuint16x4_t,
+ z28 = svread_ver_za16_u16_vg4 (1, w12 + 6),
+ z28 = svread_ver_za16_u16_vg4 (1, w12 + 6))
+
+/*
+** read_za16_s16_z0_0_w15p8:
+** add (w[0-9]+), w15, #?8
+** mova {z0\.h - z3\.h}, za0v\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_s16_z0_0_w15p8, svint16x4_t,
+ z0 = svread_ver_za16_s16_vg4 (0, w15 + 8),
+ z0 = svread_ver_za16_s16_vg4 (0, w15 + 8))
+
+/*
+** read_za16_bf16_z4_1_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.h - z7\.h}, za1v\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_bf16_z4_1_w12m1, svbfloat16x4_t,
+ z4 = svread_ver_za16_bf16_vg4 (1, w12 - 1),
+ z4 = svread_ver_za16_bf16_vg4 (1, w12 - 1))
+
+/*
+** read_za16_u16_z28_0_w16:
+** mov (w1[2-5]), w16
+** mova {z28\.h - z31\.h}, za0v\.h\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za16_u16_z28_0_w16, svuint16x4_t,
+ z28 = svread_ver_za16_u16_vg4 (0, w16),
+ z28 = svread_ver_za16_u16_vg4 (0, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za32_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za32_vg2.c
new file mode 100644
index 0000000..85fc7e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za32_vg2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za32_s32_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.s - z1\.s}, za0v\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z0_0_0, svint32x2_t,
+ z0 = svread_ver_za32_s32_vg2 (0, 0),
+ z0 = svread_ver_za32_s32_vg2 (0, 0))
+
+/*
+** read_za32_u32_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.s - z5\.s}, za1v\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z4_1_1, svuint32x2_t,
+ z4 = svread_ver_za32_u32_vg2 (1, 1),
+ z4 = svread_ver_za32_u32_vg2 (1, 1))
+
+/*
+** read_za32_f32_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.s - z29\.s}, za2v\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z28_2_w11, svfloat32x2_t,
+ z28 = svread_ver_za32_f32_vg2 (2, w11),
+ z28 = svread_ver_za32_f32_vg2 (2, w11))
+
+/*
+** read_za32_f32_z0_3_w12:
+** mova {z0\.s - z1\.s}, za3v\.s\[w12, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z0_3_w12, svfloat32x2_t,
+ z0 = svread_ver_za32_f32_vg2 (3, w12),
+ z0 = svread_ver_za32_f32_vg2 (3, w12))
+
+/*
+** read_za32_u32_z18_0_w15:
+** mova {z18\.s - z19\.s}, za0v\.s\[w15, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z18_0_w15, svuint32x2_t,
+ z18 = svread_ver_za32_u32_vg2 (0, w15),
+ z18 = svread_ver_za32_u32_vg2 (0, w15))
+
+/*
+** read_za32_s32_z23_1_w12p2:
+** mova {[^\n]+}, za1v\.s\[w12, 2:3\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z23_1_w12p2, svint32x2_t,
+ z23 = svread_ver_za32_s32_vg2 (1, w12 + 2),
+ z23 = svread_ver_za32_s32_vg2 (1, w12 + 2))
+
+/*
+** read_za32_f32_z4_2_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.s - z5\.s}, za2v\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z4_2_w12p1, svfloat32x2_t,
+ z4 = svread_ver_za32_f32_vg2 (2, w12 + 1),
+ z4 = svread_ver_za32_f32_vg2 (2, w12 + 1))
+
+/*
+** read_za32_u32_z0_3_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.s - z1\.s}, za3v\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z0_3_w15p3, svuint32x2_t,
+ z0 = svread_ver_za32_u32_vg2 (3, w15 + 3),
+ z0 = svread_ver_za32_u32_vg2 (3, w15 + 3))
+
+/*
+** read_za32_s32_z0_1_w15p4:
+** add (w[0-9]+), w15, #?4
+** mova {z0\.s - z1\.s}, za1v\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z0_1_w15p4, svint32x2_t,
+ z0 = svread_ver_za32_s32_vg2 (1, w15 + 4),
+ z0 = svread_ver_za32_s32_vg2 (1, w15 + 4))
+
+/*
+** read_za32_u32_z4_3_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.s - z5\.s}, za3v\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z4_3_w12m1, svuint32x2_t,
+ z4 = svread_ver_za32_u32_vg2 (3, w12 - 1),
+ z4 = svread_ver_za32_u32_vg2 (3, w12 - 1))
+
+/*
+** read_za32_u32_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova {z18\.s - z19\.s}, za1v\.s\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z18_1_w16, svuint32x2_t,
+ z18 = svread_ver_za32_u32_vg2 (1, w16),
+ z18 = svread_ver_za32_u32_vg2 (1, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za32_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za32_vg4.c
new file mode 100644
index 0000000..a7924ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za32_vg4.c
@@ -0,0 +1,129 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za32_s32_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.s - z3\.s}, za0v\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z0_0_0, svint32x4_t,
+ z0 = svread_ver_za32_s32_vg4 (0, 0),
+ z0 = svread_ver_za32_s32_vg4 (0, 0))
+
+/*
+** read_za32_u32_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.s - z7\.s}, za1v\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z4_1_1, svuint32x4_t,
+ z4 = svread_ver_za32_u32_vg4 (1, 1),
+ z4 = svread_ver_za32_u32_vg4 (1, 1))
+
+/*
+** read_za32_f32_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.s - z31\.s}, za2v\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z28_2_w11, svfloat32x4_t,
+ z28 = svread_ver_za32_f32_vg4 (2, w11),
+ z28 = svread_ver_za32_f32_vg4 (2, w11))
+
+/*
+** read_za32_s32_z0_3_w12:
+** mova {z0\.s - z3\.s}, za3v\.s\[w12, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z0_3_w12, svint32x4_t,
+ z0 = svread_ver_za32_s32_vg4 (3, w12),
+ z0 = svread_ver_za32_s32_vg4 (3, w12))
+
+/*
+** read_za32_u32_z18_0_w15:
+** mova {[^\n]+}, za0v\.s\[w15, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z18_0_w15, svuint32x4_t,
+ z18 = svread_ver_za32_u32_vg4 (0, w15),
+ z18 = svread_ver_za32_u32_vg4 (0, w15))
+
+/*
+** read_za32_f32_z23_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova {[^\n]+}, za1v\.s\[\1, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z23_1_w12p4, svfloat32x4_t,
+ z23 = svread_ver_za32_f32_vg4 (1, w12 + 4),
+ z23 = svread_ver_za32_f32_vg4 (1, w12 + 4))
+
+/*
+** read_za32_u32_z4_2_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.s - z7\.s}, za2v\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z4_2_w12p1, svuint32x4_t,
+ z4 = svread_ver_za32_u32_vg4 (2, w12 + 1),
+ z4 = svread_ver_za32_u32_vg4 (2, w12 + 1))
+
+/*
+** read_za32_s32_z28_3_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova {z28\.s - z31\.s}, za3v\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_s32_z28_3_w12p2, svint32x4_t,
+ z28 = svread_ver_za32_s32_vg4 (3, w12 + 2),
+ z28 = svread_ver_za32_s32_vg4 (3, w12 + 2))
+
+/*
+** read_za32_f32_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.s - z3\.s}, za0v\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z0_0_w15p3, svfloat32x4_t,
+ z0 = svread_ver_za32_f32_vg4 (0, w15 + 3),
+ z0 = svread_ver_za32_f32_vg4 (0, w15 + 3))
+
+/*
+** read_za32_u32_z28_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova {z28\.s - z31\.s}, za1v\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z28_1_w12p4, svuint32x4_t,
+ z28 = svread_ver_za32_u32_vg4 (1, w12 + 4),
+ z28 = svread_ver_za32_u32_vg4 (1, w12 + 4))
+
+/*
+** read_za32_f32_z4_2_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.s - z7\.s}, za2v\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_f32_z4_2_w12m1, svfloat32x4_t,
+ z4 = svread_ver_za32_f32_vg4 (2, w12 - 1),
+ z4 = svread_ver_za32_f32_vg4 (2, w12 - 1))
+
+/*
+** read_za32_u32_z28_3_w16:
+** mov (w1[2-5]), w16
+** mova {z28\.s - z31\.s}, za3v\.s\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za32_u32_z28_3_w16, svuint32x4_t,
+ z28 = svread_ver_za32_u32_vg4 (3, w16),
+ z28 = svread_ver_za32_u32_vg4 (3, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za64_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za64_vg2.c
new file mode 100644
index 0000000..6636eb1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za64_vg2.c
@@ -0,0 +1,113 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za64_s64_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.d - z1\.d}, za0v\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z0_0_0, svint64x2_t,
+ z0 = svread_ver_za64_s64_vg2 (0, 0),
+ z0 = svread_ver_za64_s64_vg2 (0, 0))
+
+/*
+** read_za64_u64_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.d - z5\.d}, za1v\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z4_1_1, svuint64x2_t,
+ z4 = svread_ver_za64_u64_vg2 (1, 1),
+ z4 = svread_ver_za64_u64_vg2 (1, 1))
+
+/*
+** read_za64_f64_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.d - z29\.d}, za2v\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z28_2_w11, svfloat64x2_t,
+ z28 = svread_ver_za64_f64_vg2 (2, w11),
+ z28 = svread_ver_za64_f64_vg2 (2, w11))
+
+/*
+** read_za64_f64_z0_3_w12:
+** mova {z0\.d - z1\.d}, za3v\.d\[w12, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z0_3_w12, svfloat64x2_t,
+ z0 = svread_ver_za64_f64_vg2 (3, w12),
+ z0 = svread_ver_za64_f64_vg2 (3, w12))
+
+/*
+** read_za64_u64_z18_4_w15:
+** mova {z18\.d - z19\.d}, za4v\.d\[w15, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z18_4_w15, svuint64x2_t,
+ z18 = svread_ver_za64_u64_vg2 (4, w15),
+ z18 = svread_ver_za64_u64_vg2 (4, w15))
+
+/*
+** read_za64_s64_z23_5_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova {[^\n]+}, za5v\.d\[\1, 0:1\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z23_5_w12p2, svint64x2_t,
+ z23 = svread_ver_za64_s64_vg2 (5, w12 + 2),
+ z23 = svread_ver_za64_s64_vg2 (5, w12 + 2))
+
+/*
+** read_za64_f64_z4_6_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.d - z5\.d}, za6v\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z4_6_w12p1, svfloat64x2_t,
+ z4 = svread_ver_za64_f64_vg2 (6, w12 + 1),
+ z4 = svread_ver_za64_f64_vg2 (6, w12 + 1))
+
+/*
+** read_za64_u64_z0_7_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.d - z1\.d}, za7v\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z0_7_w15p3, svuint64x2_t,
+ z0 = svread_ver_za64_u64_vg2 (7, w15 + 3),
+ z0 = svread_ver_za64_u64_vg2 (7, w15 + 3))
+
+/*
+** read_za64_s64_z0_1_w15p4:
+** add (w[0-9]+), w15, #?4
+** mova {z0\.d - z1\.d}, za1v\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z0_1_w15p4, svint64x2_t,
+ z0 = svread_ver_za64_s64_vg2 (1, w15 + 4),
+ z0 = svread_ver_za64_s64_vg2 (1, w15 + 4))
+
+/*
+** read_za64_u64_z4_3_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.d - z5\.d}, za3v\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z4_3_w12m1, svuint64x2_t,
+ z4 = svread_ver_za64_u64_vg2 (3, w12 - 1),
+ z4 = svread_ver_za64_u64_vg2 (3, w12 - 1))
+
+/*
+** read_za64_u64_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova {z18\.d - z19\.d}, za1v\.d\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z18_1_w16, svuint64x2_t,
+ z18 = svread_ver_za64_u64_vg2 (1, w16),
+ z18 = svread_ver_za64_u64_vg2 (1, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za64_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za64_vg4.c
new file mode 100644
index 0000000..2531f47
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za64_vg4.c
@@ -0,0 +1,129 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za64_s64_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.d - z3\.d}, za0v\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z0_0_0, svint64x4_t,
+ z0 = svread_ver_za64_s64_vg4 (0, 0),
+ z0 = svread_ver_za64_s64_vg4 (0, 0))
+
+/*
+** read_za64_u64_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.d - z7\.d}, za1v\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z4_1_1, svuint64x4_t,
+ z4 = svread_ver_za64_u64_vg4 (1, 1),
+ z4 = svread_ver_za64_u64_vg4 (1, 1))
+
+/*
+** read_za64_f64_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.d - z31\.d}, za2v\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z28_2_w11, svfloat64x4_t,
+ z28 = svread_ver_za64_f64_vg4 (2, w11),
+ z28 = svread_ver_za64_f64_vg4 (2, w11))
+
+/*
+** read_za64_s64_z0_3_w12:
+** mova {z0\.d - z3\.d}, za3v\.d\[w12, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z0_3_w12, svint64x4_t,
+ z0 = svread_ver_za64_s64_vg4 (3, w12),
+ z0 = svread_ver_za64_s64_vg4 (3, w12))
+
+/*
+** read_za64_u64_z18_4_w15:
+** mova {[^\n]+}, za4v\.d\[w15, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z18_4_w15, svuint64x4_t,
+ z18 = svread_ver_za64_u64_vg4 (4, w15),
+ z18 = svread_ver_za64_u64_vg4 (4, w15))
+
+/*
+** read_za64_f64_z23_5_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova {[^\n]+}, za5v\.d\[\1, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z23_5_w12p4, svfloat64x4_t,
+ z23 = svread_ver_za64_f64_vg4 (5, w12 + 4),
+ z23 = svread_ver_za64_f64_vg4 (5, w12 + 4))
+
+/*
+** read_za64_u64_z4_6_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.d - z7\.d}, za6v\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z4_6_w12p1, svuint64x4_t,
+ z4 = svread_ver_za64_u64_vg4 (6, w12 + 1),
+ z4 = svread_ver_za64_u64_vg4 (6, w12 + 1))
+
+/*
+** read_za64_s64_z28_7_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova {z28\.d - z31\.d}, za7v\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_s64_z28_7_w12p2, svint64x4_t,
+ z28 = svread_ver_za64_s64_vg4 (7, w12 + 2),
+ z28 = svread_ver_za64_s64_vg4 (7, w12 + 2))
+
+/*
+** read_za64_f64_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.d - z3\.d}, za0v\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z0_0_w15p3, svfloat64x4_t,
+ z0 = svread_ver_za64_f64_vg4 (0, w15 + 3),
+ z0 = svread_ver_za64_f64_vg4 (0, w15 + 3))
+
+/*
+** read_za64_u64_z28_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova {z28\.d - z31\.d}, za1v\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z28_1_w12p4, svuint64x4_t,
+ z28 = svread_ver_za64_u64_vg4 (1, w12 + 4),
+ z28 = svread_ver_za64_u64_vg4 (1, w12 + 4))
+
+/*
+** read_za64_f64_z4_2_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.d - z7\.d}, za2v\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_f64_z4_2_w12m1, svfloat64x4_t,
+ z4 = svread_ver_za64_f64_vg4 (2, w12 - 1),
+ z4 = svread_ver_za64_f64_vg4 (2, w12 - 1))
+
+/*
+** read_za64_u64_z28_3_w16:
+** mov (w1[2-5]), w16
+** mova {z28\.d - z31\.d}, za3v\.d\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za64_u64_z28_3_w16, svuint64x4_t,
+ z28 = svread_ver_za64_u64_vg4 (3, w16),
+ z28 = svread_ver_za64_u64_vg4 (3, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za8_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za8_vg2.c
new file mode 100644
index 0000000..5597061
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za8_vg2.c
@@ -0,0 +1,140 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za8_s8_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.b - z1\.b}, za0v\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_0, svint8x2_t,
+ z0 = svread_ver_za8_s8_vg2 (0, 0),
+ z0 = svread_ver_za8_s8_vg2 (0, 0))
+
+/*
+** read_za8_u8_z4_0_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.b - z5\.b}, za0v\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_1, svuint8x2_t,
+ z4 = svread_ver_za8_u8_vg2 (0, 1),
+ z4 = svread_ver_za8_u8_vg2 (0, 1))
+
+/*
+** read_za8_s8_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.b - z29\.b}, za0v\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z28_0_w11, svint8x2_t,
+ z28 = svread_ver_za8_s8_vg2 (0, w11),
+ z28 = svread_ver_za8_s8_vg2 (0, w11))
+
+/*
+** read_za8_s8_z0_0_w12:
+** mova {z0\.b - z1\.b}, za0v\.b\[w12, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_w12, svint8x2_t,
+ z0 = svread_ver_za8_s8_vg2 (0, w12),
+ z0 = svread_ver_za8_s8_vg2 (0, w12))
+
+/*
+** read_za8_u8_z18_0_w15:
+** mova {z18\.b - z19\.b}, za0v\.b\[w15, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z18_0_w15, svuint8x2_t,
+ z18 = svread_ver_za8_u8_vg2 (0, w15),
+ z18 = svread_ver_za8_u8_vg2 (0, w15))
+
+/*
+** read_za8_s8_z23_0_w12p14:
+** mova {[^\n]+}, za0v\.b\[w12, 14:15\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z23_0_w12p14, svint8x2_t,
+ z23 = svread_ver_za8_s8_vg2 (0, w12 + 14),
+ z23 = svread_ver_za8_s8_vg2 (0, w12 + 14))
+
+/*
+** read_za8_u8_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.b - z5\.b}, za0v\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w12p1, svuint8x2_t,
+ z4 = svread_ver_za8_u8_vg2 (0, w12 + 1),
+ z4 = svread_ver_za8_u8_vg2 (0, w12 + 1))
+
+/*
+** read_za8_s8_z28_0_w12p2:
+** mova {z28\.b - z29\.b}, za0v\.b\[w12, 2:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z28_0_w12p2, svint8x2_t,
+ z28 = svread_ver_za8_s8_vg2 (0, w12 + 2),
+ z28 = svread_ver_za8_s8_vg2 (0, w12 + 2))
+
+/*
+** read_za8_u8_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.b - z1\.b}, za0v\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z0_0_w15p3, svuint8x2_t,
+ z0 = svread_ver_za8_u8_vg2 (0, w15 + 3),
+ z0 = svread_ver_za8_u8_vg2 (0, w15 + 3))
+
+/*
+** read_za8_u8_z4_0_w15p12:
+** mova {z4\.b - z5\.b}, za0v\.b\[w15, 12:13\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w15p12, svuint8x2_t,
+ z4 = svread_ver_za8_u8_vg2 (0, w15 + 12),
+ z4 = svread_ver_za8_u8_vg2 (0, w15 + 12))
+
+/*
+** read_za8_u8_z28_0_w12p15:
+** add (w[0-9]+), w12, #?15
+** mova {z28\.b - z29\.b}, za0v\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z28_0_w12p15, svuint8x2_t,
+ z28 = svread_ver_za8_u8_vg2 (0, w12 + 15),
+ z28 = svread_ver_za8_u8_vg2 (0, w12 + 15))
+
+/*
+** read_za8_s8_z0_0_w15p16:
+** add (w[0-9]+), w15, #?16
+** mova {z0\.b - z1\.b}, za0v\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_w15p16, svint8x2_t,
+ z0 = svread_ver_za8_s8_vg2 (0, w15 + 16),
+ z0 = svread_ver_za8_s8_vg2 (0, w15 + 16))
+
+/*
+** read_za8_u8_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.b - z5\.b}, za0v\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w12m1, svuint8x2_t,
+ z4 = svread_ver_za8_u8_vg2 (0, w12 - 1),
+ z4 = svread_ver_za8_u8_vg2 (0, w12 - 1))
+
+/*
+** read_za8_u8_z18_0_w16:
+** mov (w1[2-5]), w16
+** mova {z18\.b - z19\.b}, za0v\.b\[\1, 0:1\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z18_0_w16, svuint8x2_t,
+ z18 = svread_ver_za8_u8_vg2 (0, w16),
+ z18 = svread_ver_za8_u8_vg2 (0, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za8_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za8_vg4.c
new file mode 100644
index 0000000..6fd8a97
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_ver_za8_vg4.c
@@ -0,0 +1,156 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_za8_s8_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova {z0\.b - z3\.b}, za0v\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_0, svint8x4_t,
+ z0 = svread_ver_za8_s8_vg4 (0, 0),
+ z0 = svread_ver_za8_s8_vg4 (0, 0))
+
+/*
+** read_za8_u8_z4_0_1:
+** mov (w1[2-5]), #?1
+** mova {z4\.b - z7\.b}, za0v\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_1, svuint8x4_t,
+ z4 = svread_ver_za8_u8_vg4 (0, 1),
+ z4 = svread_ver_za8_u8_vg4 (0, 1))
+
+/*
+** read_za8_s8_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova {z28\.b - z31\.b}, za0v\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z28_0_w11, svint8x4_t,
+ z28 = svread_ver_za8_s8_vg4 (0, w11),
+ z28 = svread_ver_za8_s8_vg4 (0, w11))
+
+/*
+** read_za8_s8_z0_0_w12:
+** mova {z0\.b - z3\.b}, za0v\.b\[w12, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_w12, svint8x4_t,
+ z0 = svread_ver_za8_s8_vg4 (0, w12),
+ z0 = svread_ver_za8_s8_vg4 (0, w12))
+
+/*
+** read_za8_u8_z18_0_w15:
+** mova {[^\n]+}, za0v\.b\[w15, 0:3\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z18_0_w15, svuint8x4_t,
+ z18 = svread_ver_za8_u8_vg4 (0, w15),
+ z18 = svread_ver_za8_u8_vg4 (0, w15))
+
+/*
+** read_za8_s8_z23_0_w12p12:
+** mova {[^\n]+}, za0v\.b\[w12, 12:15\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z23_0_w12p12, svint8x4_t,
+ z23 = svread_ver_za8_s8_vg4 (0, w12 + 12),
+ z23 = svread_ver_za8_s8_vg4 (0, w12 + 12))
+
+/*
+** read_za8_u8_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova {z4\.b - z7\.b}, za0v\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w12p1, svuint8x4_t,
+ z4 = svread_ver_za8_u8_vg4 (0, w12 + 1),
+ z4 = svread_ver_za8_u8_vg4 (0, w12 + 1))
+
+/*
+** read_za8_s8_z28_0_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova {z28\.b - z31\.b}, za0v\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z28_0_w12p2, svint8x4_t,
+ z28 = svread_ver_za8_s8_vg4 (0, w12 + 2),
+ z28 = svread_ver_za8_s8_vg4 (0, w12 + 2))
+
+/*
+** read_za8_u8_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova {z0\.b - z3\.b}, za0v\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z0_0_w15p3, svuint8x4_t,
+ z0 = svread_ver_za8_u8_vg4 (0, w15 + 3),
+ z0 = svread_ver_za8_u8_vg4 (0, w15 + 3))
+
+/*
+** read_za8_u8_z0_0_w12p4:
+** mova {z0\.b - z3\.b}, za0v\.b\[w12, 4:7\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z0_0_w12p4, svuint8x4_t,
+ z0 = svread_ver_za8_u8_vg4 (0, w12 + 4),
+ z0 = svread_ver_za8_u8_vg4 (0, w12 + 4))
+
+/*
+** read_za8_u8_z4_0_w15p12:
+** mova {z4\.b - z7\.b}, za0v\.b\[w15, 12:15\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w15p12, svuint8x4_t,
+ z4 = svread_ver_za8_u8_vg4 (0, w15 + 12),
+ z4 = svread_ver_za8_u8_vg4 (0, w15 + 12))
+
+/*
+** read_za8_u8_z28_0_w12p14:
+** add (w[0-9]+), w12, #?14
+** mova {z28\.b - z31\.b}, za0v\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z28_0_w12p14, svuint8x4_t,
+ z28 = svread_ver_za8_u8_vg4 (0, w12 + 14),
+ z28 = svread_ver_za8_u8_vg4 (0, w12 + 14))
+
+/*
+** read_za8_s8_z0_0_w15p16:
+** add (w[0-9]+), w15, #?16
+** mova {z0\.b - z3\.b}, za0v\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_s8_z0_0_w15p16, svint8x4_t,
+ z0 = svread_ver_za8_s8_vg4 (0, w15 + 16),
+ z0 = svread_ver_za8_s8_vg4 (0, w15 + 16))
+
+/*
+** read_za8_u8_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova {z4\.b - z7\.b}, za0v\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z4_0_w12m1, svuint8x4_t,
+ z4 = svread_ver_za8_u8_vg4 (0, w12 - 1),
+ z4 = svread_ver_za8_u8_vg4 (0, w12 - 1))
+
+/*
+** read_za8_u8_z28_0_w16:
+** mov (w1[2-5]), w16
+** mova {z28\.b - z31\.b}, za0v\.b\[\1, 0:3\]
+** ret
+*/
+TEST_READ_ZA_XN (read_za8_u8_z28_0_w16, svuint8x4_t,
+ z28 = svread_ver_za8_u8_vg4 (0, w16),
+ z28 = svread_ver_za8_u8_vg4 (0, w16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za16_vg1x2.c
new file mode 100644
index 0000000..f8a9316
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za16_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_0_z0, svfloat16x2_t,
+ z0 = svread_za16_f16_vg1x2 (0),
+ z0 = svread_za16_f16_vg1x2 (0))
+
+/*
+** read_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w0_z0, svint16x2_t,
+ z0 = svread_za16_s16_vg1x2 (w0),
+ z0 = svread_za16_s16_vg1x2 (w0))
+
+/*
+** read_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w7_z0, svuint16x2_t,
+ z0 = svread_za16_u16_vg1x2 (w7),
+ z0 = svread_za16_u16_vg1x2 (w7))
+
+/*
+** read_w8_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z0, svbfloat16x2_t,
+ z0 = svread_za16_bf16_vg1x2 (w8),
+ z0 = svread_za16_bf16_vg1x2 (w8))
+
+/*
+** read_w11_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w11, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w11_z0, svint16x2_t,
+ z0 = svread_za16_s16_vg1x2 (w11),
+ z0 = svread_za16_s16_vg1x2 (w11))
+
+
+/*
+** read_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w12_z0, svuint16x2_t,
+ z0 = svread_za16_u16_vg1x2 (w12),
+ z0 = svread_za16_u16_vg1x2 (w12))
+
+/*
+** read_w8p7_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w8, 7, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p7_z0, svfloat16x2_t,
+ z0 = svread_za16_f16_vg1x2 (w8 + 7),
+ z0 = svread_za16_f16_vg1x2 (w8 + 7))
+
+/*
+** read_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p8_z0, svint16x2_t,
+ z0 = svread_za16_s16_vg1x2 (w8 + 8),
+ z0 = svread_za16_s16_vg1x2 (w8 + 8))
+
+/*
+** read_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8m1_z0, svuint16x2_t,
+ z0 = svread_za16_u16_vg1x2 (w8 - 1),
+ z0 = svread_za16_u16_vg1x2 (w8 - 1))
+
+/*
+** read_w8_z18:
+** mova {z18\.d - z19\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z18, svfloat16x2_t,
+ z18 = svread_za16_f16_vg1x2 (w8),
+ z18 = svread_za16_f16_vg1x2 (w8))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** read_w8_z23:
+** mova [^\n]+, za\.d\[w8, 0, vgx2\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z23, svint16x2_t,
+ z23 = svread_za16_s16_vg1x2 (w8),
+ z23 = svread_za16_s16_vg1x2 (w8))
+
+/*
+** read_w8_z28:
+** mova {z28\.d - z29\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z28, svbfloat16x2_t,
+ z28 = svread_za16_bf16_vg1x2 (w8),
+ z28 = svread_za16_bf16_vg1x2 (w8))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za16_vg1x4.c
new file mode 100644
index 0000000..c983f8b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za16_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_0_z0, svint16x4_t,
+ z0 = svread_za16_s16_vg1x4 (0),
+ z0 = svread_za16_s16_vg1x4 (0))
+
+/*
+** read_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w0_z0, svuint16x4_t,
+ z0 = svread_za16_u16_vg1x4 (w0),
+ z0 = svread_za16_u16_vg1x4 (w0))
+
+/*
+** read_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w7_z0, svfloat16x4_t,
+ z0 = svread_za16_f16_vg1x4 (w7),
+ z0 = svread_za16_f16_vg1x4 (w7))
+
+/*
+** read_w8_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z0, svint16x4_t,
+ z0 = svread_za16_s16_vg1x4 (w8),
+ z0 = svread_za16_s16_vg1x4 (w8))
+
+/*
+** read_w11_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w11, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w11_z0, svuint16x4_t,
+ z0 = svread_za16_u16_vg1x4 (w11),
+ z0 = svread_za16_u16_vg1x4 (w11))
+
+
+/*
+** read_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w12_z0, svbfloat16x4_t,
+ z0 = svread_za16_bf16_vg1x4 (w12),
+ z0 = svread_za16_bf16_vg1x4 (w12))
+
+/*
+** read_w8p7_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w8, 7, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p7_z0, svint16x4_t,
+ z0 = svread_za16_s16_vg1x4 (w8 + 7),
+ z0 = svread_za16_s16_vg1x4 (w8 + 7))
+
+/*
+** read_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p8_z0, svuint16x4_t,
+ z0 = svread_za16_u16_vg1x4 (w8 + 8),
+ z0 = svread_za16_u16_vg1x4 (w8 + 8))
+
+/*
+** read_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8m1_z0, svfloat16x4_t,
+ z0 = svread_za16_f16_vg1x4 (w8 - 1),
+ z0 = svread_za16_f16_vg1x4 (w8 - 1))
+
+/*
+** read_w8_z4:
+** mova {z4\.d - z7\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z4, svint16x4_t,
+ z4 = svread_za16_s16_vg1x4 (w8),
+ z4 = svread_za16_s16_vg1x4 (w8))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** read_w8_z18:
+** mova [^\n]+, za\.d\[w8, 0, vgx4\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z18, svuint16x4_t,
+ z18 = svread_za16_u16_vg1x4 (w8),
+ z18 = svread_za16_u16_vg1x4 (w8))
+
+/*
+** read_w8_z23:
+** mova [^\n]+, za\.d\[w8, 0, vgx4\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z23, svbfloat16x4_t,
+ z23 = svread_za16_bf16_vg1x4 (w8),
+ z23 = svread_za16_bf16_vg1x4 (w8))
+
+/*
+** read_w8_z28:
+** mova {z28\.d - z31\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z28, svint16x4_t,
+ z28 = svread_za16_s16_vg1x4 (w8),
+ z28 = svread_za16_s16_vg1x4 (w8))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za32_vg1x2.c
new file mode 100644
index 0000000..667f482
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za32_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_0_z0, svfloat32x2_t,
+ z0 = svread_za32_f32_vg1x2 (0),
+ z0 = svread_za32_f32_vg1x2 (0))
+
+/*
+** read_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w0_z0, svint32x2_t,
+ z0 = svread_za32_s32_vg1x2 (w0),
+ z0 = svread_za32_s32_vg1x2 (w0))
+
+/*
+** read_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w7_z0, svuint32x2_t,
+ z0 = svread_za32_u32_vg1x2 (w7),
+ z0 = svread_za32_u32_vg1x2 (w7))
+
+/*
+** read_w8_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z0, svfloat32x2_t,
+ z0 = svread_za32_f32_vg1x2 (w8),
+ z0 = svread_za32_f32_vg1x2 (w8))
+
+/*
+** read_w11_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w11, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w11_z0, svint32x2_t,
+ z0 = svread_za32_s32_vg1x2 (w11),
+ z0 = svread_za32_s32_vg1x2 (w11))
+
+
+/*
+** read_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w12_z0, svuint32x2_t,
+ z0 = svread_za32_u32_vg1x2 (w12),
+ z0 = svread_za32_u32_vg1x2 (w12))
+
+/*
+** read_w8p7_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w8, 7, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p7_z0, svfloat32x2_t,
+ z0 = svread_za32_f32_vg1x2 (w8 + 7),
+ z0 = svread_za32_f32_vg1x2 (w8 + 7))
+
+/*
+** read_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p8_z0, svint32x2_t,
+ z0 = svread_za32_s32_vg1x2 (w8 + 8),
+ z0 = svread_za32_s32_vg1x2 (w8 + 8))
+
+/*
+** read_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8m1_z0, svuint32x2_t,
+ z0 = svread_za32_u32_vg1x2 (w8 - 1),
+ z0 = svread_za32_u32_vg1x2 (w8 - 1))
+
+/*
+** read_w8_z18:
+** mova {z18\.d - z19\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z18, svfloat32x2_t,
+ z18 = svread_za32_f32_vg1x2 (w8),
+ z18 = svread_za32_f32_vg1x2 (w8))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** read_w8_z23:
+** mova [^\n]+, za\.d\[w8, 0, vgx2\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z23, svint32x2_t,
+ z23 = svread_za32_s32_vg1x2 (w8),
+ z23 = svread_za32_s32_vg1x2 (w8))
+
+/*
+** read_w8_z28:
+** mova {z28\.d - z29\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z28, svuint32x2_t,
+ z28 = svread_za32_u32_vg1x2 (w8),
+ z28 = svread_za32_u32_vg1x2 (w8))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za32_vg1x4.c
new file mode 100644
index 0000000..c2579e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za32_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_0_z0, svint32x4_t,
+ z0 = svread_za32_s32_vg1x4 (0),
+ z0 = svread_za32_s32_vg1x4 (0))
+
+/*
+** read_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w0_z0, svuint32x4_t,
+ z0 = svread_za32_u32_vg1x4 (w0),
+ z0 = svread_za32_u32_vg1x4 (w0))
+
+/*
+** read_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w7_z0, svfloat32x4_t,
+ z0 = svread_za32_f32_vg1x4 (w7),
+ z0 = svread_za32_f32_vg1x4 (w7))
+
+/*
+** read_w8_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z0, svint32x4_t,
+ z0 = svread_za32_s32_vg1x4 (w8),
+ z0 = svread_za32_s32_vg1x4 (w8))
+
+/*
+** read_w11_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w11, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w11_z0, svuint32x4_t,
+ z0 = svread_za32_u32_vg1x4 (w11),
+ z0 = svread_za32_u32_vg1x4 (w11))
+
+
+/*
+** read_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w12_z0, svfloat32x4_t,
+ z0 = svread_za32_f32_vg1x4 (w12),
+ z0 = svread_za32_f32_vg1x4 (w12))
+
+/*
+** read_w8p7_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w8, 7, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p7_z0, svint32x4_t,
+ z0 = svread_za32_s32_vg1x4 (w8 + 7),
+ z0 = svread_za32_s32_vg1x4 (w8 + 7))
+
+/*
+** read_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p8_z0, svuint32x4_t,
+ z0 = svread_za32_u32_vg1x4 (w8 + 8),
+ z0 = svread_za32_u32_vg1x4 (w8 + 8))
+
+/*
+** read_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8m1_z0, svfloat32x4_t,
+ z0 = svread_za32_f32_vg1x4 (w8 - 1),
+ z0 = svread_za32_f32_vg1x4 (w8 - 1))
+
+/*
+** read_w8_z4:
+** mova {z4\.d - z7\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z4, svint32x4_t,
+ z4 = svread_za32_s32_vg1x4 (w8),
+ z4 = svread_za32_s32_vg1x4 (w8))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** read_w8_z18:
+** mova [^\n]+, za\.d\[w8, 0, vgx4\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z18, svuint32x4_t,
+ z18 = svread_za32_u32_vg1x4 (w8),
+ z18 = svread_za32_u32_vg1x4 (w8))
+
+/*
+** read_w8_z23:
+** mova [^\n]+, za\.d\[w8, 0, vgx4\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z23, svfloat32x4_t,
+ z23 = svread_za32_f32_vg1x4 (w8),
+ z23 = svread_za32_f32_vg1x4 (w8))
+
+/*
+** read_w8_z28:
+** mova {z28\.d - z31\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z28, svint32x4_t,
+ z28 = svread_za32_s32_vg1x4 (w8),
+ z28 = svread_za32_s32_vg1x4 (w8))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za64_vg1x2.c
new file mode 100644
index 0000000..15ca800
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za64_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_0_z0, svfloat64x2_t,
+ z0 = svread_za64_f64_vg1x2 (0),
+ z0 = svread_za64_f64_vg1x2 (0))
+
+/*
+** read_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w0_z0, svint64x2_t,
+ z0 = svread_za64_s64_vg1x2 (w0),
+ z0 = svread_za64_s64_vg1x2 (w0))
+
+/*
+** read_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w7_z0, svuint64x2_t,
+ z0 = svread_za64_u64_vg1x2 (w7),
+ z0 = svread_za64_u64_vg1x2 (w7))
+
+/*
+** read_w8_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z0, svfloat64x2_t,
+ z0 = svread_za64_f64_vg1x2 (w8),
+ z0 = svread_za64_f64_vg1x2 (w8))
+
+/*
+** read_w11_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w11, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w11_z0, svint64x2_t,
+ z0 = svread_za64_s64_vg1x2 (w11),
+ z0 = svread_za64_s64_vg1x2 (w11))
+
+
+/*
+** read_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w12_z0, svuint64x2_t,
+ z0 = svread_za64_u64_vg1x2 (w12),
+ z0 = svread_za64_u64_vg1x2 (w12))
+
+/*
+** read_w8p7_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w8, 7, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p7_z0, svfloat64x2_t,
+ z0 = svread_za64_f64_vg1x2 (w8 + 7),
+ z0 = svread_za64_f64_vg1x2 (w8 + 7))
+
+/*
+** read_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p8_z0, svint64x2_t,
+ z0 = svread_za64_s64_vg1x2 (w8 + 8),
+ z0 = svread_za64_s64_vg1x2 (w8 + 8))
+
+/*
+** read_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8m1_z0, svuint64x2_t,
+ z0 = svread_za64_u64_vg1x2 (w8 - 1),
+ z0 = svread_za64_u64_vg1x2 (w8 - 1))
+
+/*
+** read_w8_z18:
+** mova {z18\.d - z19\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z18, svfloat64x2_t,
+ z18 = svread_za64_f64_vg1x2 (w8),
+ z18 = svread_za64_f64_vg1x2 (w8))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** read_w8_z23:
+** mova [^\n]+, za\.d\[w8, 0, vgx2\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z23, svint64x2_t,
+ z23 = svread_za64_s64_vg1x2 (w8),
+ z23 = svread_za64_s64_vg1x2 (w8))
+
+/*
+** read_w8_z28:
+** mova {z28\.d - z29\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z28, svuint64x2_t,
+ z28 = svread_za64_u64_vg1x2 (w8),
+ z28 = svread_za64_u64_vg1x2 (w8))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za64_vg1x4.c
new file mode 100644
index 0000000..5b7684e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za64_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_0_z0, svint64x4_t,
+ z0 = svread_za64_s64_vg1x4 (0),
+ z0 = svread_za64_s64_vg1x4 (0))
+
+/*
+** read_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w0_z0, svuint64x4_t,
+ z0 = svread_za64_u64_vg1x4 (w0),
+ z0 = svread_za64_u64_vg1x4 (w0))
+
+/*
+** read_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w7_z0, svfloat64x4_t,
+ z0 = svread_za64_f64_vg1x4 (w7),
+ z0 = svread_za64_f64_vg1x4 (w7))
+
+/*
+** read_w8_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z0, svint64x4_t,
+ z0 = svread_za64_s64_vg1x4 (w8),
+ z0 = svread_za64_s64_vg1x4 (w8))
+
+/*
+** read_w11_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w11, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w11_z0, svuint64x4_t,
+ z0 = svread_za64_u64_vg1x4 (w11),
+ z0 = svread_za64_u64_vg1x4 (w11))
+
+
+/*
+** read_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w12_z0, svfloat64x4_t,
+ z0 = svread_za64_f64_vg1x4 (w12),
+ z0 = svread_za64_f64_vg1x4 (w12))
+
+/*
+** read_w8p7_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w8, 7, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p7_z0, svint64x4_t,
+ z0 = svread_za64_s64_vg1x4 (w8 + 7),
+ z0 = svread_za64_s64_vg1x4 (w8 + 7))
+
+/*
+** read_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p8_z0, svuint64x4_t,
+ z0 = svread_za64_u64_vg1x4 (w8 + 8),
+ z0 = svread_za64_u64_vg1x4 (w8 + 8))
+
+/*
+** read_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8m1_z0, svfloat64x4_t,
+ z0 = svread_za64_f64_vg1x4 (w8 - 1),
+ z0 = svread_za64_f64_vg1x4 (w8 - 1))
+
+/*
+** read_w8_z4:
+** mova {z4\.d - z7\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z4, svint64x4_t,
+ z4 = svread_za64_s64_vg1x4 (w8),
+ z4 = svread_za64_s64_vg1x4 (w8))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** read_w8_z18:
+** mova [^\n]+, za\.d\[w8, 0, vgx4\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z18, svuint64x4_t,
+ z18 = svread_za64_u64_vg1x4 (w8),
+ z18 = svread_za64_u64_vg1x4 (w8))
+
+/*
+** read_w8_z23:
+** mova [^\n]+, za\.d\[w8, 0, vgx4\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z23, svfloat64x4_t,
+ z23 = svread_za64_f64_vg1x4 (w8),
+ z23 = svread_za64_f64_vg1x4 (w8))
+
+/*
+** read_w8_z28:
+** mova {z28\.d - z31\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z28, svint64x4_t,
+ z28 = svread_za64_s64_vg1x4 (w8),
+ z28 = svread_za64_s64_vg1x4 (w8))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za8_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za8_vg1x2.c
new file mode 100644
index 0000000..9b151ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za8_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_0_z0, svint8x2_t,
+ z0 = svread_za8_s8_vg1x2 (0),
+ z0 = svread_za8_s8_vg1x2 (0))
+
+/*
+** read_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w0_z0, svint8x2_t,
+ z0 = svread_za8_s8_vg1x2 (w0),
+ z0 = svread_za8_s8_vg1x2 (w0))
+
+/*
+** read_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w7_z0, svuint8x2_t,
+ z0 = svread_za8_u8_vg1x2 (w7),
+ z0 = svread_za8_u8_vg1x2 (w7))
+
+/*
+** read_w8_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z0, svint8x2_t,
+ z0 = svread_za8_s8_vg1x2 (w8),
+ z0 = svread_za8_s8_vg1x2 (w8))
+
+/*
+** read_w11_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w11, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w11_z0, svint8x2_t,
+ z0 = svread_za8_s8_vg1x2 (w11),
+ z0 = svread_za8_s8_vg1x2 (w11))
+
+
+/*
+** read_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w12_z0, svuint8x2_t,
+ z0 = svread_za8_u8_vg1x2 (w12),
+ z0 = svread_za8_u8_vg1x2 (w12))
+
+/*
+** read_w8p7_z0:
+** mova {z0\.d - z1\.d}, za\.d\[w8, 7, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p7_z0, svint8x2_t,
+ z0 = svread_za8_s8_vg1x2 (w8 + 7),
+ z0 = svread_za8_s8_vg1x2 (w8 + 7))
+
+/*
+** read_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p8_z0, svint8x2_t,
+ z0 = svread_za8_s8_vg1x2 (w8 + 8),
+ z0 = svread_za8_s8_vg1x2 (w8 + 8))
+
+/*
+** read_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova {z0\.d - z1\.d}, za\.d\[\1, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8m1_z0, svuint8x2_t,
+ z0 = svread_za8_u8_vg1x2 (w8 - 1),
+ z0 = svread_za8_u8_vg1x2 (w8 - 1))
+
+/*
+** read_w8_z18:
+** mova {z18\.d - z19\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z18, svuint8x2_t,
+ z18 = svread_za8_u8_vg1x2 (w8),
+ z18 = svread_za8_u8_vg1x2 (w8))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** read_w8_z23:
+** mova [^\n]+, za\.d\[w8, 0, vgx2\]
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z23, svint8x2_t,
+ z23 = svread_za8_s8_vg1x2 (w8),
+ z23 = svread_za8_s8_vg1x2 (w8))
+
+/*
+** read_w8_z28:
+** mova {z28\.d - z29\.d}, za\.d\[w8, 0, vgx2\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z28, svuint8x2_t,
+ z28 = svread_za8_u8_vg1x2 (w8),
+ z28 = svread_za8_u8_vg1x2 (w8))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za8_vg1x4.c
new file mode 100644
index 0000000..80c81dd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/read_za8_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** read_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_0_z0, svint8x4_t,
+ z0 = svread_za8_s8_vg1x4 (0),
+ z0 = svread_za8_s8_vg1x4 (0))
+
+/*
+** read_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w0_z0, svuint8x4_t,
+ z0 = svread_za8_u8_vg1x4 (w0),
+ z0 = svread_za8_u8_vg1x4 (w0))
+
+/*
+** read_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w7_z0, svint8x4_t,
+ z0 = svread_za8_s8_vg1x4 (w7),
+ z0 = svread_za8_s8_vg1x4 (w7))
+
+/*
+** read_w8_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z0, svint8x4_t,
+ z0 = svread_za8_s8_vg1x4 (w8),
+ z0 = svread_za8_s8_vg1x4 (w8))
+
+/*
+** read_w11_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w11, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w11_z0, svuint8x4_t,
+ z0 = svread_za8_u8_vg1x4 (w11),
+ z0 = svread_za8_u8_vg1x4 (w11))
+
+
+/*
+** read_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w12_z0, svint8x4_t,
+ z0 = svread_za8_s8_vg1x4 (w12),
+ z0 = svread_za8_s8_vg1x4 (w12))
+
+/*
+** read_w8p7_z0:
+** mova {z0\.d - z3\.d}, za\.d\[w8, 7, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p7_z0, svint8x4_t,
+ z0 = svread_za8_s8_vg1x4 (w8 + 7),
+ z0 = svread_za8_s8_vg1x4 (w8 + 7))
+
+/*
+** read_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8p8_z0, svuint8x4_t,
+ z0 = svread_za8_u8_vg1x4 (w8 + 8),
+ z0 = svread_za8_u8_vg1x4 (w8 + 8))
+
+/*
+** read_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova {z0\.d - z3\.d}, za\.d\[\1, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8m1_z0, svint8x4_t,
+ z0 = svread_za8_s8_vg1x4 (w8 - 1),
+ z0 = svread_za8_s8_vg1x4 (w8 - 1))
+
+/*
+** read_w8_z4:
+** mova {z4\.d - z7\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z4, svint8x4_t,
+ z4 = svread_za8_s8_vg1x4 (w8),
+ z4 = svread_za8_s8_vg1x4 (w8))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** read_w8_z18:
+** mova [^\n]+, za\.d\[w8, 0, vgx4\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z18, svuint8x4_t,
+ z18 = svread_za8_u8_vg1x4 (w8),
+ z18 = svread_za8_u8_vg1x4 (w8))
+
+/*
+** read_w8_z23:
+** mova [^\n]+, za\.d\[w8, 0, vgx4\]
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z23, svuint8x4_t,
+ z23 = svread_za8_u8_vg1x4 (w8),
+ z23 = svread_za8_u8_vg1x4 (w8))
+
+/*
+** read_w8_z28:
+** mova {z28\.d - z31\.d}, za\.d\[w8, 0, vgx4\]
+** ret
+*/
+TEST_READ_ZA_XN (read_w8_z28, svint8x4_t,
+ z28 = svread_za8_s8_vg1x4 (w8),
+ z28 = svread_za8_s8_vg1x4 (w8))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rinta_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rinta_s32_x2.c
new file mode 100644
index 0000000..48017f0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rinta_s32_x2.c
@@ -0,0 +1,61 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rinta_z0_z0:
+** frinta {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (rinta_z0_z0, svfloat32x2_t, z0,
+ svrinta_f32_x2 (z0),
+ svrinta (z0))
+
+/*
+** rinta_z0_z4:
+** frinta {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (rinta_z0_z4, svfloat32x2_t, z0,
+ svrinta_f32_x2 (z4),
+ svrinta (z4))
+
+/*
+** rinta_z4_z18:
+** frinta {z4\.s - z5\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (rinta_z4_z18, svfloat32x2_t, z4,
+ svrinta_f32_x2 (z18),
+ svrinta (z18))
+
+/*
+** rinta_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** frinta {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_XN (rinta_z18_z23, svfloat32x2_t, z18,
+ svrinta_f32_x2 (z23),
+ svrinta (z23))
+
+/*
+** rinta_z23_z28:
+** frinta [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rinta_z23_z28, svfloat32x2_t, z23,
+ svrinta_f32_x2 (z28),
+ svrinta (z28))
+
+/*
+** rinta_z28_z0:
+** frinta {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (rinta_z28_z0, svfloat32x2_t, z28,
+ svrinta_f32_x2 (z0),
+ svrinta (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rinta_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rinta_s32_x4.c
new file mode 100644
index 0000000..94e3a06
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rinta_s32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rinta_z0_z0:
+** frinta {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (rinta_z0_z0, svfloat32x4_t, z0,
+ svrinta_f32_x4 (z0),
+ svrinta (z0))
+
+/*
+** rinta_z0_z4:
+** frinta {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (rinta_z0_z4, svfloat32x4_t, z0,
+ svrinta_f32_x4 (z4),
+ svrinta (z4))
+
+/*
+** rinta_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** frinta {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_XN (rinta_z4_z18, svfloat32x4_t, z4,
+ svrinta_f32_x4 (z18),
+ svrinta (z18))
+
+/*
+** rinta_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** frinta {z[^\n]+}, {z.*}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rinta_z18_z23, svfloat32x4_t, z18,
+ svrinta_f32_x4 (z23),
+ svrinta (z23))
+
+/*
+** rinta_z23_z28:
+** frinta [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rinta_z23_z28, svfloat32x4_t, z23,
+ svrinta_f32_x4 (z28),
+ svrinta (z28))
+
+/*
+** rinta_z28_z0:
+** frinta {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (rinta_z28_z0, svfloat32x4_t, z28,
+ svrinta_f32_x4 (z0),
+ svrinta (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintm_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintm_u32_x2.c
new file mode 100644
index 0000000..db41ef2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintm_u32_x2.c
@@ -0,0 +1,61 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rintm_z0_z0:
+** frintm {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (rintm_z0_z0, svfloat32x2_t, z0,
+ svrintm_f32_x2 (z0),
+ svrintm (z0))
+
+/*
+** rintm_z0_z4:
+** frintm {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (rintm_z0_z4, svfloat32x2_t, z0,
+ svrintm_f32_x2 (z4),
+ svrintm (z4))
+
+/*
+** rintm_z4_z18:
+** frintm {z4\.s - z5\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (rintm_z4_z18, svfloat32x2_t, z4,
+ svrintm_f32_x2 (z18),
+ svrintm (z18))
+
+/*
+** rintm_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** frintm {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_XN (rintm_z18_z23, svfloat32x2_t, z18,
+ svrintm_f32_x2 (z23),
+ svrintm (z23))
+
+/*
+** rintm_z23_z28:
+** frintm [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rintm_z23_z28, svfloat32x2_t, z23,
+ svrintm_f32_x2 (z28),
+ svrintm (z28))
+
+/*
+** rintm_z28_z0:
+** frintm {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (rintm_z28_z0, svfloat32x2_t, z28,
+ svrintm_f32_x2 (z0),
+ svrintm (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintm_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintm_u32_x4.c
new file mode 100644
index 0000000..be1efe3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintm_u32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rintm_z0_z0:
+** frintm {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (rintm_z0_z0, svfloat32x4_t, z0,
+ svrintm_f32_x4 (z0),
+ svrintm (z0))
+
+/*
+** rintm_z0_z4:
+** frintm {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (rintm_z0_z4, svfloat32x4_t, z0,
+ svrintm_f32_x4 (z4),
+ svrintm (z4))
+
+/*
+** rintm_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** frintm {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_XN (rintm_z4_z18, svfloat32x4_t, z4,
+ svrintm_f32_x4 (z18),
+ svrintm (z18))
+
+/*
+** rintm_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** frintm {z[^\n]+}, {z.*}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rintm_z18_z23, svfloat32x4_t, z18,
+ svrintm_f32_x4 (z23),
+ svrintm (z23))
+
+/*
+** rintm_z23_z28:
+** frintm [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rintm_z23_z28, svfloat32x4_t, z23,
+ svrintm_f32_x4 (z28),
+ svrintm (z28))
+
+/*
+** rintm_z28_z0:
+** frintm {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (rintm_z28_z0, svfloat32x4_t, z28,
+ svrintm_f32_x4 (z0),
+ svrintm (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintn_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintn_u32_x2.c
new file mode 100644
index 0000000..c54541b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintn_u32_x2.c
@@ -0,0 +1,61 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rintn_z0_z0:
+** frintn {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (rintn_z0_z0, svfloat32x2_t, z0,
+ svrintn_f32_x2 (z0),
+ svrintn (z0))
+
+/*
+** rintn_z0_z4:
+** frintn {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (rintn_z0_z4, svfloat32x2_t, z0,
+ svrintn_f32_x2 (z4),
+ svrintn (z4))
+
+/*
+** rintn_z4_z18:
+** frintn {z4\.s - z5\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (rintn_z4_z18, svfloat32x2_t, z4,
+ svrintn_f32_x2 (z18),
+ svrintn (z18))
+
+/*
+** rintn_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** frintn {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_XN (rintn_z18_z23, svfloat32x2_t, z18,
+ svrintn_f32_x2 (z23),
+ svrintn (z23))
+
+/*
+** rintn_z23_z28:
+** frintn [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rintn_z23_z28, svfloat32x2_t, z23,
+ svrintn_f32_x2 (z28),
+ svrintn (z28))
+
+/*
+** rintn_z28_z0:
+** frintn {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (rintn_z28_z0, svfloat32x2_t, z28,
+ svrintn_f32_x2 (z0),
+ svrintn (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintn_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintn_u32_x4.c
new file mode 100644
index 0000000..ff6e55b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintn_u32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rintn_z0_z0:
+** frintn {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (rintn_z0_z0, svfloat32x4_t, z0,
+ svrintn_f32_x4 (z0),
+ svrintn (z0))
+
+/*
+** rintn_z0_z4:
+** frintn {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (rintn_z0_z4, svfloat32x4_t, z0,
+ svrintn_f32_x4 (z4),
+ svrintn (z4))
+
+/*
+** rintn_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** frintn {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_XN (rintn_z4_z18, svfloat32x4_t, z4,
+ svrintn_f32_x4 (z18),
+ svrintn (z18))
+
+/*
+** rintn_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** frintn {z[^\n]+}, {z.*}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rintn_z18_z23, svfloat32x4_t, z18,
+ svrintn_f32_x4 (z23),
+ svrintn (z23))
+
+/*
+** rintn_z23_z28:
+** frintn [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rintn_z23_z28, svfloat32x4_t, z23,
+ svrintn_f32_x4 (z28),
+ svrintn (z28))
+
+/*
+** rintn_z28_z0:
+** frintn {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (rintn_z28_z0, svfloat32x4_t, z28,
+ svrintn_f32_x4 (z0),
+ svrintn (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintp_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintp_u32_x2.c
new file mode 100644
index 0000000..722c085
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintp_u32_x2.c
@@ -0,0 +1,61 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rintp_z0_z0:
+** frintp {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (rintp_z0_z0, svfloat32x2_t, z0,
+ svrintp_f32_x2 (z0),
+ svrintp (z0))
+
+/*
+** rintp_z0_z4:
+** frintp {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (rintp_z0_z4, svfloat32x2_t, z0,
+ svrintp_f32_x2 (z4),
+ svrintp (z4))
+
+/*
+** rintp_z4_z18:
+** frintp {z4\.s - z5\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (rintp_z4_z18, svfloat32x2_t, z4,
+ svrintp_f32_x2 (z18),
+ svrintp (z18))
+
+/*
+** rintp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** frintp {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_XN (rintp_z18_z23, svfloat32x2_t, z18,
+ svrintp_f32_x2 (z23),
+ svrintp (z23))
+
+/*
+** rintp_z23_z28:
+** frintp [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rintp_z23_z28, svfloat32x2_t, z23,
+ svrintp_f32_x2 (z28),
+ svrintp (z28))
+
+/*
+** rintp_z28_z0:
+** frintp {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (rintp_z28_z0, svfloat32x2_t, z28,
+ svrintp_f32_x2 (z0),
+ svrintp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintp_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintp_u32_x4.c
new file mode 100644
index 0000000..7c18257
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rintp_u32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rintp_z0_z0:
+** frintp {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (rintp_z0_z0, svfloat32x4_t, z0,
+ svrintp_f32_x4 (z0),
+ svrintp (z0))
+
+/*
+** rintp_z0_z4:
+** frintp {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (rintp_z0_z4, svfloat32x4_t, z0,
+ svrintp_f32_x4 (z4),
+ svrintp (z4))
+
+/*
+** rintp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** frintp {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_XN (rintp_z4_z18, svfloat32x4_t, z4,
+ svrintp_f32_x4 (z18),
+ svrintp (z18))
+
+/*
+** rintp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** frintp {z[^\n]+}, {z.*}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rintp_z18_z23, svfloat32x4_t, z18,
+ svrintp_f32_x4 (z23),
+ svrintp (z23))
+
+/*
+** rintp_z23_z28:
+** frintp [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rintp_z23_z28, svfloat32x4_t, z23,
+ svrintp_f32_x4 (z28),
+ svrintp (z28))
+
+/*
+** rintp_z28_z0:
+** frintp {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (rintp_z28_z0, svfloat32x4_t, z28,
+ svrintp_f32_x4 (z0),
+ svrintp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s16_x2.c
new file mode 100644
index 0000000..28fe768
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s16_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** srshl {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z4, svint16x2_t, z0,
+ svrshl_s16_x2 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z0_z4_z0:
+** srshl {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (rshl_z0_z4_z0, svint16x2_t, z0,
+ svrshl_s16_x2 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.h - z29\.h}
+** |
+** srshl [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z4_z28, svint16x2_t, z0,
+ svrshl_s16_x2 (z4, z28),
+ svrshl (z4, z28))
+
+/*
+** rshl_z18_z18_z4:
+** srshl {z18\.h - z19\.h}, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (rshl_z18_z18_z4, svint16x2_t, z18,
+ svrshl_s16_x2 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z23_z23_z18, svint16x2_t, z23,
+ svrshl_s16_x2 (z23, z18),
+ svrshl (z23, z18))
+
+/*
+** rshl_z28_z28_z0:
+** srshl {z28\.h - z29\.h}, {z28\.h - z29\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (rshl_z28_z28_z0, svint16x2_t, z28,
+ svrshl_s16_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_z0_z0_z18:
+** srshl {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z18, svint16x2_t, z0,
+ svrshl_s16_x2 (z0, z18),
+ svrshl (z0, z18))
+
+/*
+** rshl_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** |
+** srshl {z4\.h - z5\.h}, {z4\.h - z5\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z4_z4_z23, svint16x2_t, z4,
+ svrshl_s16_x2 (z4, z23),
+ svrshl (z4, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** srshl {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svint16x2_t, svint16_t, z24,
+ svrshl_single_s16_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** srshl {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svint16x2_t, svint16_t, z24,
+ svrshl_single_s16_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** srshl {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svint16x2_t, svint16_t, z24,
+ svrshl_single_s16_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** srshl {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svint16x2_t, svint16_t, z1,
+ svrshl_single_s16_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** srshl ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svint16x2_t, svint16_t, z1,
+ svrshl_single_s16_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** srshl {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svint16x2_t, svint16_t, z18,
+ svrshl_single_s16_x2 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** srshl ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svint16x2_t, svint16_t,
+ z0_res = svrshl_single_s16_x2 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** srshl {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svint16x2_t, svint16_t,
+ z0 = svrshl_single_s16_x2 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** srshl {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svint16x2_t, svint16_t, z24,
+ svrshl_single_s16_x2 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s16_x4.c
new file mode 100644
index 0000000..afa3ba9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s16_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** srshl {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z4, svint16x4_t, z0,
+ svrshl_s16_x4 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z0_z4_z0:
+** srshl {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (rshl_z0_z4_z0, svint16x4_t, z0,
+ svrshl_s16_x4 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.h - z31\.h}
+** |
+** srshl [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z4_z28, svint16x4_t, z0,
+ svrshl_s16_x4 (z4, z28),
+ svrshl (z4, z28))
+
+/*
+** rshl_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z18_z18_z4, svint16x4_t, z18,
+ svrshl_s16_x4 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z23_z23_z28, svint16x4_t, z23,
+ svrshl_s16_x4 (z23, z28),
+ svrshl (z23, z28))
+
+/*
+** rshl_z28_z28_z0:
+** srshl {z28\.h - z31\.h}, {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (rshl_z28_z28_z0, svint16x4_t, z28,
+ svrshl_s16_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** |
+** srshl {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z0_z18, svint16x4_t, z0,
+ svrshl_s16_x4 (z0, z18),
+ svrshl (z0, z18))
+
+/*
+** rshl_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** |
+** srshl {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z4_z4_z23, svint16x4_t, z4,
+ svrshl_s16_x4 (z4, z23),
+ svrshl (z4, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** srshl {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svint16x4_t, svint16_t, z24,
+ svrshl_single_s16_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** srshl {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svint16x4_t, svint16_t, z24,
+ svrshl_single_s16_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svint16x4_t, svint16_t, z24,
+ svrshl_single_s16_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** srshl {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svint16x4_t, svint16_t, z1,
+ svrshl_single_s16_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svint16x4_t, svint16_t, z1,
+ svrshl_single_s16_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svint16x4_t, svint16_t, z18,
+ svrshl_single_s16_x4 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** srshl ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svint16x4_t, svint16_t,
+ z0_res = svrshl_single_s16_x4 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** srshl {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svint16x4_t, svint16_t,
+ z0 = svrshl_single_s16_x4 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** srshl {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svint16x4_t, svint16_t, z24,
+ svrshl_single_s16_x4 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s32_x2.c
new file mode 100644
index 0000000..e67d14f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s32_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** srshl {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z4, svint32x2_t, z0,
+ svrshl_s32_x2 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z0_z4_z0:
+** srshl {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (rshl_z0_z4_z0, svint32x2_t, z0,
+ svrshl_s32_x2 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.s - z29\.s}
+** |
+** srshl [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z4_z28, svint32x2_t, z0,
+ svrshl_s32_x2 (z4, z28),
+ svrshl (z4, z28))
+
+/*
+** rshl_z18_z18_z4:
+** srshl {z18\.s - z19\.s}, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (rshl_z18_z18_z4, svint32x2_t, z18,
+ svrshl_s32_x2 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z23_z23_z18, svint32x2_t, z23,
+ svrshl_s32_x2 (z23, z18),
+ svrshl (z23, z18))
+
+/*
+** rshl_z28_z28_z0:
+** srshl {z28\.s - z29\.s}, {z28\.s - z29\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (rshl_z28_z28_z0, svint32x2_t, z28,
+ svrshl_s32_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_z0_z0_z18:
+** srshl {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z18, svint32x2_t, z0,
+ svrshl_s32_x2 (z0, z18),
+ svrshl (z0, z18))
+
+/*
+** rshl_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** |
+** srshl {z4\.s - z5\.s}, {z4\.s - z5\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z4_z4_z23, svint32x2_t, z4,
+ svrshl_s32_x2 (z4, z23),
+ svrshl (z4, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** srshl {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svint32x2_t, svint32_t, z24,
+ svrshl_single_s32_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** srshl {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svint32x2_t, svint32_t, z24,
+ svrshl_single_s32_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** srshl {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svint32x2_t, svint32_t, z24,
+ svrshl_single_s32_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** srshl {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svint32x2_t, svint32_t, z1,
+ svrshl_single_s32_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** srshl ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svint32x2_t, svint32_t, z1,
+ svrshl_single_s32_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** srshl {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svint32x2_t, svint32_t, z18,
+ svrshl_single_s32_x2 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** srshl ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svint32x2_t, svint32_t,
+ z0_res = svrshl_single_s32_x2 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** srshl {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svint32x2_t, svint32_t,
+ z0 = svrshl_single_s32_x2 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** srshl {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svint32x2_t, svint32_t, z24,
+ svrshl_single_s32_x2 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s32_x4.c
new file mode 100644
index 0000000..676d9eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s32_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** srshl {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z4, svint32x4_t, z0,
+ svrshl_s32_x4 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z0_z4_z0:
+** srshl {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (rshl_z0_z4_z0, svint32x4_t, z0,
+ svrshl_s32_x4 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.s - z31\.s}
+** |
+** srshl [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z4_z28, svint32x4_t, z0,
+ svrshl_s32_x4 (z4, z28),
+ svrshl (z4, z28))
+
+/*
+** rshl_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z18_z18_z4, svint32x4_t, z18,
+ svrshl_s32_x4 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z23_z23_z28, svint32x4_t, z23,
+ svrshl_s32_x4 (z23, z28),
+ svrshl (z23, z28))
+
+/*
+** rshl_z28_z28_z0:
+** srshl {z28\.s - z31\.s}, {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (rshl_z28_z28_z0, svint32x4_t, z28,
+ svrshl_s32_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** |
+** srshl {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z0_z18, svint32x4_t, z0,
+ svrshl_s32_x4 (z0, z18),
+ svrshl (z0, z18))
+
+/*
+** rshl_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** |
+** srshl {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z4_z4_z23, svint32x4_t, z4,
+ svrshl_s32_x4 (z4, z23),
+ svrshl (z4, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** srshl {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svint32x4_t, svint32_t, z24,
+ svrshl_single_s32_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** srshl {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svint32x4_t, svint32_t, z24,
+ svrshl_single_s32_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svint32x4_t, svint32_t, z24,
+ svrshl_single_s32_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** srshl {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svint32x4_t, svint32_t, z1,
+ svrshl_single_s32_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svint32x4_t, svint32_t, z1,
+ svrshl_single_s32_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svint32x4_t, svint32_t, z18,
+ svrshl_single_s32_x4 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** srshl ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svint32x4_t, svint32_t,
+ z0_res = svrshl_single_s32_x4 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** srshl {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svint32x4_t, svint32_t,
+ z0 = svrshl_single_s32_x4 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** srshl {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svint32x4_t, svint32_t, z24,
+ svrshl_single_s32_x4 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s64_x2.c
new file mode 100644
index 0000000..ce12ebd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s64_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** srshl {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z4, svint64x2_t, z0,
+ svrshl_s64_x2 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z0_z4_z0:
+** srshl {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (rshl_z0_z4_z0, svint64x2_t, z0,
+ svrshl_s64_x2 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.d - z29\.d}
+** |
+** srshl [^\n]+, {z28\.d - z29\.d}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z4_z28, svint64x2_t, z0,
+ svrshl_s64_x2 (z4, z28),
+ svrshl (z4, z28))
+
+/*
+** rshl_z18_z18_z4:
+** srshl {z18\.d - z19\.d}, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (rshl_z18_z18_z4, svint64x2_t, z18,
+ svrshl_s64_x2 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z23_z23_z18, svint64x2_t, z23,
+ svrshl_s64_x2 (z23, z18),
+ svrshl (z23, z18))
+
+/*
+** rshl_z28_z28_z0:
+** srshl {z28\.d - z29\.d}, {z28\.d - z29\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (rshl_z28_z28_z0, svint64x2_t, z28,
+ svrshl_s64_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_z0_z0_z18:
+** srshl {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z18, svint64x2_t, z0,
+ svrshl_s64_x2 (z0, z18),
+ svrshl (z0, z18))
+
+/*
+** rshl_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** |
+** srshl {z4\.d - z5\.d}, {z4\.d - z5\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z4_z4_z23, svint64x2_t, z4,
+ svrshl_s64_x2 (z4, z23),
+ svrshl (z4, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** srshl {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svint64x2_t, svint64_t, z24,
+ svrshl_single_s64_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** srshl {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svint64x2_t, svint64_t, z24,
+ svrshl_single_s64_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** srshl {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svint64x2_t, svint64_t, z24,
+ svrshl_single_s64_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** srshl {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svint64x2_t, svint64_t, z1,
+ svrshl_single_s64_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** srshl ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svint64x2_t, svint64_t, z1,
+ svrshl_single_s64_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** srshl {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svint64x2_t, svint64_t, z18,
+ svrshl_single_s64_x2 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** srshl ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svint64x2_t, svint64_t,
+ z0_res = svrshl_single_s64_x2 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** srshl {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svint64x2_t, svint64_t,
+ z0 = svrshl_single_s64_x2 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** srshl {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svint64x2_t, svint64_t, z24,
+ svrshl_single_s64_x2 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s64_x4.c
new file mode 100644
index 0000000..9494302
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s64_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** srshl {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z4, svint64x4_t, z0,
+ svrshl_s64_x4 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z0_z4_z0:
+** srshl {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (rshl_z0_z4_z0, svint64x4_t, z0,
+ svrshl_s64_x4 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.d - z31\.d}
+** |
+** srshl [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z4_z28, svint64x4_t, z0,
+ svrshl_s64_x4 (z4, z28),
+ svrshl (z4, z28))
+
+/*
+** rshl_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z18_z18_z4, svint64x4_t, z18,
+ svrshl_s64_x4 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z23_z23_z28, svint64x4_t, z23,
+ svrshl_s64_x4 (z23, z28),
+ svrshl (z23, z28))
+
+/*
+** rshl_z28_z28_z0:
+** srshl {z28\.d - z31\.d}, {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (rshl_z28_z28_z0, svint64x4_t, z28,
+ svrshl_s64_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** |
+** srshl {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z0_z18, svint64x4_t, z0,
+ svrshl_s64_x4 (z0, z18),
+ svrshl (z0, z18))
+
+/*
+** rshl_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** |
+** srshl {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z4_z4_z23, svint64x4_t, z4,
+ svrshl_s64_x4 (z4, z23),
+ svrshl (z4, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** srshl {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svint64x4_t, svint64_t, z24,
+ svrshl_single_s64_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** srshl {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svint64x4_t, svint64_t, z24,
+ svrshl_single_s64_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svint64x4_t, svint64_t, z24,
+ svrshl_single_s64_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** srshl {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svint64x4_t, svint64_t, z1,
+ svrshl_single_s64_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svint64x4_t, svint64_t, z1,
+ svrshl_single_s64_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svint64x4_t, svint64_t, z18,
+ svrshl_single_s64_x4 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** srshl ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svint64x4_t, svint64_t,
+ z0_res = svrshl_single_s64_x4 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** srshl {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svint64x4_t, svint64_t,
+ z0 = svrshl_single_s64_x4 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** srshl {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svint64x4_t, svint64_t, z24,
+ svrshl_single_s64_x4 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s8_x2.c
new file mode 100644
index 0000000..49e7b9f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s8_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** srshl {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z4, svint8x2_t, z0,
+ svrshl_s8_x2 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z0_z4_z0:
+** srshl {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (rshl_z0_z4_z0, svint8x2_t, z0,
+ svrshl_s8_x2 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.b - z29\.b}
+** |
+** srshl [^\n]+, {z28\.b - z29\.b}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z4_z28, svint8x2_t, z0,
+ svrshl_s8_x2 (z4, z28),
+ svrshl (z4, z28))
+
+/*
+** rshl_z18_z18_z4:
+** srshl {z18\.b - z19\.b}, {z18\.b - z19\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (rshl_z18_z18_z4, svint8x2_t, z18,
+ svrshl_s8_x2 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z18\.b - z19\.b}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z23_z23_z18, svint8x2_t, z23,
+ svrshl_s8_x2 (z23, z18),
+ svrshl (z23, z18))
+
+/*
+** rshl_z28_z28_z0:
+** srshl {z28\.b - z29\.b}, {z28\.b - z29\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_XN (rshl_z28_z28_z0, svint8x2_t, z28,
+ svrshl_s8_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_z0_z0_z18:
+** srshl {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z18, svint8x2_t, z0,
+ svrshl_s8_x2 (z0, z18),
+ svrshl (z0, z18))
+
+/*
+** rshl_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** |
+** srshl {z4\.b - z5\.b}, {z4\.b - z5\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z4_z4_z23, svint8x2_t, z4,
+ svrshl_s8_x2 (z4, z23),
+ svrshl (z4, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** srshl {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svint8x2_t, svint8_t, z24,
+ svrshl_single_s8_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** |
+** srshl {z28\.b - z29\.b}, {z28\.b - z29\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svint8x2_t, svint8_t, z24,
+ svrshl_single_s8_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** srshl {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svint8x2_t, svint8_t, z24,
+ svrshl_single_s8_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** srshl {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svint8x2_t, svint8_t, z1,
+ svrshl_single_s8_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** srshl ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svint8x2_t, svint8_t, z1,
+ svrshl_single_s8_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** srshl {z18\.b - z19\.b}, {z18\.b - z19\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svint8x2_t, svint8_t, z18,
+ svrshl_single_s8_x2 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** srshl ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svint8x2_t, svint8_t,
+ z0_res = svrshl_single_s8_x2 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** srshl {z0\.b - z1\.b}, {z0\.b - z1\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svint8x2_t, svint8_t,
+ z0 = svrshl_single_s8_x2 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** srshl {z24\.b - z25\.b}, {z24\.b - z25\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svint8x2_t, svint8_t, z24,
+ svrshl_single_s8_x2 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s8_x4.c
new file mode 100644
index 0000000..ee33999
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_s8_x4.c
@@ -0,0 +1,249 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** srshl {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (rshl_z0_z0_z4, svint8x4_t, z0,
+ svrshl_s8_x4 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z0_z4_z0:
+** srshl {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (rshl_z0_z4_z0, svint8x4_t, z0,
+ svrshl_s8_x4 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z4_z28:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.b - z31\.b}
+** |
+** srshl [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z4_z28, svint8x4_t, z0,
+ svrshl_s8_x4 (z4, z28),
+ svrshl (z4, z28))
+
+/*
+** rshl_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z4\.b - z7\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z18_z18_z4, svint8x4_t, z18,
+ svrshl_s8_x4 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (rshl_z23_z23_z28, svint8x4_t, z23,
+ svrshl_s8_x4 (z23, z28),
+ svrshl (z23, z28))
+
+/*
+** rshl_z28_z28_z0:
+** srshl {z28\.b - z31\.b}, {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (rshl_z28_z28_z0, svint8x4_t, z28,
+ svrshl_s8_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_z0_z0_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** |
+** srshl {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z0_z0_z18, svint8x4_t, z0,
+ svrshl_s8_x4 (z0, z18),
+ svrshl (z0, z18))
+
+/*
+** rshl_z4_z4_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** |
+** srshl {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN (rshl_z4_z4_z23, svint8x4_t, z4,
+ svrshl_s8_x4 (z4, z23),
+ svrshl (z4, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** srshl {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svint8x4_t, svint8_t, z24,
+ svrshl_single_s8_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** |
+** srshl {z28\.b - z31\.b}, {z28\.b - z31\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svint8x4_t, svint8_t, z24,
+ svrshl_single_s8_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svint8x4_t, svint8_t, z24,
+ svrshl_single_s8_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** srshl {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svint8x4_t, svint8_t, z1,
+ svrshl_single_s8_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svint8x4_t, svint8_t, z1,
+ svrshl_single_s8_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** srshl [^\n]+, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svint8x4_t, svint8_t, z18,
+ svrshl_single_s8_x4 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** srshl ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svint8x4_t, svint8_t,
+ z0_res = svrshl_single_s8_x4 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** srshl {z0\.b - z3\.b}, {z0\.b - z3\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svint8x4_t, svint8_t,
+ z0 = svrshl_single_s8_x4 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** srshl {z24\.b - z27\.b}, {z24\.b - z27\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svint8x4_t, svint8_t, z24,
+ svrshl_single_s8_x4 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u16_x2.c
new file mode 100644
index 0000000..89fc1ec
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u16_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** urshl {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z4, svuint16x2_t, svint16x2_t, z0,
+ svrshl_u16_x2 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z4_z4_z0:
+** urshl {z4\.h - z5\.h}, {z4\.h - z5\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z0, svint16x2_t, svuint16x2_t, z4,
+ svrshl_u16_x2 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z28_z4:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z4\.h - z5\.h}
+** |
+** urshl [^\n]+, {z4\.h - z5\.h}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z28_z4, svuint16x2_t, svint16x2_t, z0,
+ svrshl_u16_x2 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z18_z18_z4:
+** urshl {z18\.h - z19\.h}, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_DUAL_XN (rshl_z18_z18_z4, svuint16x2_t, svint16x2_t, z18,
+ svrshl_u16_x2 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z23_z23_z18, svint16x2_t, svuint16x2_t, z23,
+ svrshl_u16_x2 (z23, z18),
+ svrshl (z23, z18))
+
+/*
+** rshl_z28_z28_z4:
+** urshl {z28\.h - z29\.h}, {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z4, svuint16x2_t, svint16x2_t, z28,
+ svrshl_u16_x2 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z4_z4_z18:
+** urshl {z4\.h - z5\.h}, {z4\.h - z5\.h}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z18, svint16x2_t, svuint16x2_t, z4,
+ svrshl_u16_x2 (z4, z18),
+ svrshl (z4, z18))
+
+/*
+** rshl_z28_z28_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z28\.h - z29\.h}, {z28\.h - z29\.h}, [^\n]+
+** |
+** urshl {z28\.h - z29\.h}, {z28\.h - z29\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z23, svuint16x2_t, svint16x2_t, z28,
+ svrshl_u16_x2 (z28, z23),
+ svrshl (z28, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** urshl {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svuint16x2_t, svint16_t, z24,
+ svrshl_single_u16_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** |
+** urshl {z28\.h - z29\.h}, {z28\.h - z29\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svuint16x2_t, svint16_t, z24,
+ svrshl_single_u16_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** urshl {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svuint16x2_t, svint16_t, z24,
+ svrshl_single_u16_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** urshl {z24\.h - z25\.h}, {z24\.h - z25\.h}, z0\.h
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svuint16x2_t, svint16_t, z1,
+ svrshl_single_u16_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** urshl ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svuint16x2_t, svint16_t, z1,
+ svrshl_single_u16_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** urshl {z18\.h - z19\.h}, {z18\.h - z19\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svuint16x2_t, svint16_t, z18,
+ svrshl_single_u16_x2 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** urshl ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svuint16x2_t, svint16_t,
+ z0_res = svrshl_single_u16_x2 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** urshl {z0\.h - z1\.h}, {z0\.h - z1\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svuint16x2_t, svint16_t,
+ z0 = svrshl_single_u16_x2 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** urshl {z24\.h - z25\.h}, {z24\.h - z25\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svuint16x2_t, svint16_t, z24,
+ svrshl_single_u16_x2 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u16_x4.c
new file mode 100644
index 0000000..b9976b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u16_x4.c
@@ -0,0 +1,228 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** urshl {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z4, svuint16x4_t, svint16x4_t, z0,
+ svrshl_u16_x4 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z4_z4_z0:
+** urshl {z4\.h - z7\.h}, {z4\.h - z7\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z0, svint16x4_t, svuint16x4_t, z4,
+ svrshl_u16_x4 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z18_z18_z4, svuint16x4_t, svint16x4_t, z18,
+ svrshl_u16_x4 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z23_z23_z28, svint16x4_t, svuint16x4_t, z23,
+ svrshl_u16_x4 (z23, z28),
+ svrshl (z23, z28))
+
+/*
+** rshl_z28_z28_z4:
+** urshl {z28\.h - z31\.h}, {z28\.h - z31\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z4, svuint16x4_t, svint16x4_t, z28,
+ svrshl_u16_x4 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z4_z4_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** |
+** urshl {z4\.h - z7\.h}, {z4\.h - z7\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z18, svint16x4_t, svuint16x4_t, z4,
+ svrshl_u16_x4 (z4, z18),
+ svrshl (z4, z18))
+
+/*
+** rshl_z0_z0_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** |
+** urshl {z0\.h - z3\.h}, {z0\.h - z3\.h}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z23, svuint16x4_t, svint16x4_t, z0,
+ svrshl_u16_x4 (z0, z23),
+ svrshl (z0, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** urshl {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svuint16x4_t, svint16_t, z24,
+ svrshl_single_u16_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** |
+** urshl {z28\.h - z31\.h}, {z28\.h - z31\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svuint16x4_t, svint16_t, z24,
+ svrshl_single_u16_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svuint16x4_t, svint16_t, z24,
+ svrshl_single_u16_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** urshl {z24\.h - z27\.h}, {z24\.h - z27\.h}, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svuint16x4_t, svint16_t, z1,
+ svrshl_single_u16_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl ({z[0-9]+\.h - z[0-9]+\.h}), \1, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svuint16x4_t, svint16_t, z1,
+ svrshl_single_u16_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, z0\.h
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svuint16x4_t, svint16_t, z18,
+ svrshl_single_u16_x4 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** urshl ({z[0-9]+\.h - z[0-9]+\.h}), \1, z[0-9]+\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svuint16x4_t, svint16_t,
+ z0_res = svrshl_single_u16_x4 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** urshl {z0\.h - z3\.h}, {z0\.h - z3\.h}, z15\.h
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svuint16x4_t, svint16_t,
+ z0 = svrshl_single_u16_x4 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** urshl {z24\.h - z27\.h}, {z24\.h - z27\.h}, \1\.h
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svuint16x4_t, svint16_t, z24,
+ svrshl_single_u16_x4 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u32_x2.c
new file mode 100644
index 0000000..431e608
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u32_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** urshl {z0\.s - z1\.s}, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z4, svuint32x2_t, svint32x2_t, z0,
+ svrshl_u32_x2 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z4_z4_z0:
+** urshl {z4\.s - z5\.s}, {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z0, svint32x2_t, svuint32x2_t, z4,
+ svrshl_u32_x2 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z28_z4:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z4\.s - z5\.s}
+** |
+** urshl [^\n]+, {z4\.s - z5\.s}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z28_z4, svuint32x2_t, svint32x2_t, z0,
+ svrshl_u32_x2 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z18_z18_z4:
+** urshl {z18\.s - z19\.s}, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_DUAL_XN (rshl_z18_z18_z4, svuint32x2_t, svint32x2_t, z18,
+ svrshl_u32_x2 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z23_z23_z18, svint32x2_t, svuint32x2_t, z23,
+ svrshl_u32_x2 (z23, z18),
+ svrshl (z23, z18))
+
+/*
+** rshl_z28_z28_z4:
+** urshl {z28\.s - z29\.s}, {z28\.s - z29\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z4, svuint32x2_t, svint32x2_t, z28,
+ svrshl_u32_x2 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z4_z4_z18:
+** urshl {z4\.s - z5\.s}, {z4\.s - z5\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z18, svint32x2_t, svuint32x2_t, z4,
+ svrshl_u32_x2 (z4, z18),
+ svrshl (z4, z18))
+
+/*
+** rshl_z28_z28_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z28\.s - z29\.s}, {z28\.s - z29\.s}, [^\n]+
+** |
+** urshl {z28\.s - z29\.s}, {z28\.s - z29\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z23, svuint32x2_t, svint32x2_t, z28,
+ svrshl_u32_x2 (z28, z23),
+ svrshl (z28, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** urshl {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svuint32x2_t, svint32_t, z24,
+ svrshl_single_u32_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** |
+** urshl {z28\.s - z29\.s}, {z28\.s - z29\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svuint32x2_t, svint32_t, z24,
+ svrshl_single_u32_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** urshl {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svuint32x2_t, svint32_t, z24,
+ svrshl_single_u32_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** urshl {z24\.s - z25\.s}, {z24\.s - z25\.s}, z0\.s
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svuint32x2_t, svint32_t, z1,
+ svrshl_single_u32_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** urshl ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svuint32x2_t, svint32_t, z1,
+ svrshl_single_u32_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** urshl {z18\.s - z19\.s}, {z18\.s - z19\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svuint32x2_t, svint32_t, z18,
+ svrshl_single_u32_x2 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** urshl ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svuint32x2_t, svint32_t,
+ z0_res = svrshl_single_u32_x2 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** urshl {z0\.s - z1\.s}, {z0\.s - z1\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svuint32x2_t, svint32_t,
+ z0 = svrshl_single_u32_x2 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** urshl {z24\.s - z25\.s}, {z24\.s - z25\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svuint32x2_t, svint32_t, z24,
+ svrshl_single_u32_x2 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u32_x4.c
new file mode 100644
index 0000000..6f06973
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u32_x4.c
@@ -0,0 +1,228 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** urshl {z0\.s - z3\.s}, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z4, svuint32x4_t, svint32x4_t, z0,
+ svrshl_u32_x4 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z4_z4_z0:
+** urshl {z4\.s - z7\.s}, {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z0, svint32x4_t, svuint32x4_t, z4,
+ svrshl_u32_x4 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z18_z18_z4, svuint32x4_t, svint32x4_t, z18,
+ svrshl_u32_x4 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z23_z23_z28, svint32x4_t, svuint32x4_t, z23,
+ svrshl_u32_x4 (z23, z28),
+ svrshl (z23, z28))
+
+/*
+** rshl_z28_z28_z4:
+** urshl {z28\.s - z31\.s}, {z28\.s - z31\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z4, svuint32x4_t, svint32x4_t, z28,
+ svrshl_u32_x4 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z4_z4_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** |
+** urshl {z4\.s - z7\.s}, {z4\.s - z7\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z18, svint32x4_t, svuint32x4_t, z4,
+ svrshl_u32_x4 (z4, z18),
+ svrshl (z4, z18))
+
+/*
+** rshl_z0_z0_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** |
+** urshl {z0\.s - z3\.s}, {z0\.s - z3\.s}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z23, svuint32x4_t, svint32x4_t, z0,
+ svrshl_u32_x4 (z0, z23),
+ svrshl (z0, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** urshl {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svuint32x4_t, svint32_t, z24,
+ svrshl_single_u32_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** |
+** urshl {z28\.s - z31\.s}, {z28\.s - z31\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svuint32x4_t, svint32_t, z24,
+ svrshl_single_u32_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svuint32x4_t, svint32_t, z24,
+ svrshl_single_u32_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** urshl {z24\.s - z27\.s}, {z24\.s - z27\.s}, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svuint32x4_t, svint32_t, z1,
+ svrshl_single_u32_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl ({z[0-9]+\.s - z[0-9]+\.s}), \1, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svuint32x4_t, svint32_t, z1,
+ svrshl_single_u32_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, z0\.s
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svuint32x4_t, svint32_t, z18,
+ svrshl_single_u32_x4 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** urshl ({z[0-9]+\.s - z[0-9]+\.s}), \1, z[0-9]+\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svuint32x4_t, svint32_t,
+ z0_res = svrshl_single_u32_x4 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** urshl {z0\.s - z3\.s}, {z0\.s - z3\.s}, z15\.s
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svuint32x4_t, svint32_t,
+ z0 = svrshl_single_u32_x4 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** urshl {z24\.s - z27\.s}, {z24\.s - z27\.s}, \1\.s
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svuint32x4_t, svint32_t, z24,
+ svrshl_single_u32_x4 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u64_x2.c
new file mode 100644
index 0000000..b901416
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u64_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** urshl {z0\.d - z1\.d}, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z4, svuint64x2_t, svint64x2_t, z0,
+ svrshl_u64_x2 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z4_z4_z0:
+** urshl {z4\.d - z5\.d}, {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z0, svint64x2_t, svuint64x2_t, z4,
+ svrshl_u64_x2 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z28_z4:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z4\.d - z5\.d}
+** |
+** urshl [^\n]+, {z4\.d - z5\.d}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z28_z4, svuint64x2_t, svint64x2_t, z0,
+ svrshl_u64_x2 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z18_z18_z4:
+** urshl {z18\.d - z19\.d}, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_DUAL_XN (rshl_z18_z18_z4, svuint64x2_t, svint64x2_t, z18,
+ svrshl_u64_x2 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z23_z23_z18, svint64x2_t, svuint64x2_t, z23,
+ svrshl_u64_x2 (z23, z18),
+ svrshl (z23, z18))
+
+/*
+** rshl_z28_z28_z4:
+** urshl {z28\.d - z29\.d}, {z28\.d - z29\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z4, svuint64x2_t, svint64x2_t, z28,
+ svrshl_u64_x2 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z4_z4_z18:
+** urshl {z4\.d - z5\.d}, {z4\.d - z5\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z18, svint64x2_t, svuint64x2_t, z4,
+ svrshl_u64_x2 (z4, z18),
+ svrshl (z4, z18))
+
+/*
+** rshl_z28_z28_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z28\.d - z29\.d}, {z28\.d - z29\.d}, [^\n]+
+** |
+** urshl {z28\.d - z29\.d}, {z28\.d - z29\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z23, svuint64x2_t, svint64x2_t, z28,
+ svrshl_u64_x2 (z28, z23),
+ svrshl (z28, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** urshl {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svuint64x2_t, svint64_t, z24,
+ svrshl_single_u64_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** |
+** urshl {z28\.d - z29\.d}, {z28\.d - z29\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svuint64x2_t, svint64_t, z24,
+ svrshl_single_u64_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** urshl {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svuint64x2_t, svint64_t, z24,
+ svrshl_single_u64_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** urshl {z24\.d - z25\.d}, {z24\.d - z25\.d}, z0\.d
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svuint64x2_t, svint64_t, z1,
+ svrshl_single_u64_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** urshl ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svuint64x2_t, svint64_t, z1,
+ svrshl_single_u64_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** urshl {z18\.d - z19\.d}, {z18\.d - z19\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svuint64x2_t, svint64_t, z18,
+ svrshl_single_u64_x2 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** urshl ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svuint64x2_t, svint64_t,
+ z0_res = svrshl_single_u64_x2 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** urshl {z0\.d - z1\.d}, {z0\.d - z1\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svuint64x2_t, svint64_t,
+ z0 = svrshl_single_u64_x2 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** urshl {z24\.d - z25\.d}, {z24\.d - z25\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svuint64x2_t, svint64_t, z24,
+ svrshl_single_u64_x2 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u64_x4.c
new file mode 100644
index 0000000..7bb99a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u64_x4.c
@@ -0,0 +1,228 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** urshl {z0\.d - z3\.d}, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z4, svuint64x4_t, svint64x4_t, z0,
+ svrshl_u64_x4 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z4_z4_z0:
+** urshl {z4\.d - z7\.d}, {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z0, svint64x4_t, svuint64x4_t, z4,
+ svrshl_u64_x4 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z18_z18_z4, svuint64x4_t, svint64x4_t, z18,
+ svrshl_u64_x4 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z23_z23_z28, svint64x4_t, svuint64x4_t, z23,
+ svrshl_u64_x4 (z23, z28),
+ svrshl (z23, z28))
+
+/*
+** rshl_z28_z28_z4:
+** urshl {z28\.d - z31\.d}, {z28\.d - z31\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z4, svuint64x4_t, svint64x4_t, z28,
+ svrshl_u64_x4 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z4_z4_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** |
+** urshl {z4\.d - z7\.d}, {z4\.d - z7\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z18, svint64x4_t, svuint64x4_t, z4,
+ svrshl_u64_x4 (z4, z18),
+ svrshl (z4, z18))
+
+/*
+** rshl_z0_z0_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** |
+** urshl {z0\.d - z3\.d}, {z0\.d - z3\.d}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z23, svuint64x4_t, svint64x4_t, z0,
+ svrshl_u64_x4 (z0, z23),
+ svrshl (z0, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** urshl {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svuint64x4_t, svint64_t, z24,
+ svrshl_single_u64_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** |
+** urshl {z28\.d - z31\.d}, {z28\.d - z31\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svuint64x4_t, svint64_t, z24,
+ svrshl_single_u64_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svuint64x4_t, svint64_t, z24,
+ svrshl_single_u64_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** urshl {z24\.d - z27\.d}, {z24\.d - z27\.d}, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svuint64x4_t, svint64_t, z1,
+ svrshl_single_u64_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl ({z[0-9]+\.d - z[0-9]+\.d}), \1, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svuint64x4_t, svint64_t, z1,
+ svrshl_single_u64_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, z0\.d
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svuint64x4_t, svint64_t, z18,
+ svrshl_single_u64_x4 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** urshl ({z[0-9]+\.d - z[0-9]+\.d}), \1, z[0-9]+\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svuint64x4_t, svint64_t,
+ z0_res = svrshl_single_u64_x4 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** urshl {z0\.d - z3\.d}, {z0\.d - z3\.d}, z15\.d
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svuint64x4_t, svint64_t,
+ z0 = svrshl_single_u64_x4 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** urshl {z24\.d - z27\.d}, {z24\.d - z27\.d}, \1\.d
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svuint64x4_t, svint64_t, z24,
+ svrshl_single_u64_x4 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u8_x2.c
new file mode 100644
index 0000000..e96c41f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u8_x2.c
@@ -0,0 +1,207 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** urshl {z0\.b - z1\.b}, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z4, svuint8x2_t, svint8x2_t, z0,
+ svrshl_u8_x2 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z4_z4_z0:
+** urshl {z4\.b - z5\.b}, {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z0, svint8x2_t, svuint8x2_t, z4,
+ svrshl_u8_x2 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z0_z28_z4:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z4\.b - z5\.b}
+** |
+** urshl [^\n]+, {z4\.b - z5\.b}
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z28_z4, svuint8x2_t, svint8x2_t, z0,
+ svrshl_u8_x2 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z18_z18_z4:
+** urshl {z18\.b - z19\.b}, {z18\.b - z19\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_DUAL_XN (rshl_z18_z18_z4, svuint8x2_t, svint8x2_t, z18,
+ svrshl_u8_x2 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z18:
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z18\.b - z19\.b}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z23_z23_z18, svint8x2_t, svuint8x2_t, z23,
+ svrshl_u8_x2 (z23, z18),
+ svrshl (z23, z18))
+
+/*
+** rshl_z28_z28_z4:
+** urshl {z28\.b - z29\.b}, {z28\.b - z29\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z4, svuint8x2_t, svint8x2_t, z28,
+ svrshl_u8_x2 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z4_z4_z18:
+** urshl {z4\.b - z5\.b}, {z4\.b - z5\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z18, svint8x2_t, svuint8x2_t, z4,
+ svrshl_u8_x2 (z4, z18),
+ svrshl (z4, z18))
+
+/*
+** rshl_z28_z28_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z28\.b - z29\.b}, {z28\.b - z29\.b}, [^\n]+
+** |
+** urshl {z28\.b - z29\.b}, {z28\.b - z29\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z23, svuint8x2_t, svint8x2_t, z28,
+ svrshl_u8_x2 (z28, z23),
+ svrshl (z28, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** urshl {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svuint8x2_t, svint8_t, z24,
+ svrshl_single_u8_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** |
+** urshl {z28\.b - z29\.b}, {z28\.b - z29\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svuint8x2_t, svint8_t, z24,
+ svrshl_single_u8_x2 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** (
+** mov z24\.d, z1\.d
+** mov z25\.d, z2\.d
+** |
+** mov z25\.d, z2\.d
+** mov z24\.d, z1\.d
+** )
+** urshl {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svuint8x2_t, svint8_t, z24,
+ svrshl_single_u8_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** urshl {z24\.b - z25\.b}, {z24\.b - z25\.b}, z0\.b
+** (
+** mov z1\.d, z24\.d
+** mov z2\.d, z25\.d
+** |
+** mov z2\.d, z25\.d
+** mov z1\.d, z24\.d
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svuint8x2_t, svint8_t, z1,
+ svrshl_single_u8_x2 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** urshl ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svuint8x2_t, svint8_t, z1,
+ svrshl_single_u8_x2 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** urshl {z18\.b - z19\.b}, {z18\.b - z19\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svuint8x2_t, svint8_t, z18,
+ svrshl_single_u8_x2 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** urshl ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svuint8x2_t, svint8_t,
+ z0_res = svrshl_single_u8_x2 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** urshl {z0\.b - z1\.b}, {z0\.b - z1\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svuint8x2_t, svint8_t,
+ z0 = svrshl_single_u8_x2 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** urshl {z24\.b - z25\.b}, {z24\.b - z25\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svuint8x2_t, svint8_t, z24,
+ svrshl_single_u8_x2 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u8_x4.c
new file mode 100644
index 0000000..b0968f6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/rshl_u8_x4.c
@@ -0,0 +1,228 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** rshl_z0_z0_z4:
+** urshl {z0\.b - z3\.b}, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z4, svuint8x4_t, svint8x4_t, z0,
+ svrshl_u8_x4 (z0, z4),
+ svrshl (z0, z4))
+
+/*
+** rshl_z4_z4_z0:
+** urshl {z4\.b - z7\.b}, {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z0, svint8x4_t, svuint8x4_t, z4,
+ svrshl_u8_x4 (z4, z0),
+ svrshl (z4, z0))
+
+/*
+** rshl_z18_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z4\.b - z7\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z18_z18_z4, svuint8x4_t, svint8x4_t, z18,
+ svrshl_u8_x4 (z18, z4),
+ svrshl (z18, z4))
+
+/*
+** rshl_z23_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (rshl_z23_z23_z28, svint8x4_t, svuint8x4_t, z23,
+ svrshl_u8_x4 (z23, z28),
+ svrshl (z23, z28))
+
+/*
+** rshl_z28_z28_z4:
+** urshl {z28\.b - z31\.b}, {z28\.b - z31\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_DUAL_XN (rshl_z28_z28_z4, svuint8x4_t, svint8x4_t, z28,
+ svrshl_u8_x4 (z28, z4),
+ svrshl (z28, z4))
+
+/*
+** rshl_z4_z4_z18:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** |
+** urshl {z4\.b - z7\.b}, {z4\.b - z7\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z4_z4_z18, svint8x4_t, svuint8x4_t, z4,
+ svrshl_u8_x4 (z4, z18),
+ svrshl (z4, z18))
+
+/*
+** rshl_z0_z0_z23:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** |
+** urshl {z0\.b - z3\.b}, {z0\.b - z3\.b}, [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_DUAL_XN (rshl_z0_z0_z23, svuint8x4_t, svint8x4_t, z0,
+ svrshl_u8_x4 (z0, z23),
+ svrshl (z0, z23))
+
+/*
+** rshl_single_z24_z24_z0:
+** urshl {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z0, svuint8x4_t, svint8_t, z24,
+ svrshl_single_u8_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z24_z28_z0:
+** (
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** |
+** urshl {z28\.b - z31\.b}, {z28\.b - z31\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** )
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z28_z0, svuint8x4_t, svint8_t, z24,
+ svrshl_single_u8_x4 (z28, z0),
+ svrshl (z28, z0))
+
+/*
+** rshl_single_z24_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z1_z0, svuint8x4_t, svint8_t, z24,
+ svrshl_single_u8_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z1_z24_z0:
+** urshl {z24\.b - z27\.b}, {z24\.b - z27\.b}, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z24_z0, svuint8x4_t, svint8_t, z1,
+ svrshl_single_u8_x4 (z24, z0),
+ svrshl (z24, z0))
+
+/*
+** rshl_single_z1_z1_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl ({z[0-9]+\.b - z[0-9]+\.b}), \1, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z1_z1_z0, svuint8x4_t, svint8_t, z1,
+ svrshl_single_u8_x4 (z1, z0),
+ svrshl (z1, z0))
+
+/*
+** rshl_single_z18_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** urshl [^\n]+, z0\.b
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z18_z18_z0, svuint8x4_t, svint8_t, z18,
+ svrshl_single_u8_x4 (z18, z0),
+ svrshl (z18, z0))
+
+/*
+** rshl_single_awkward:
+** ...
+** urshl ({z[0-9]+\.b - z[0-9]+\.b}), \1, z[0-9]+\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_AWKWARD (rshl_single_awkward, svuint8x4_t, svint8_t,
+ z0_res = svrshl_single_u8_x4 (z1, z0),
+ z0_res = svrshl (z1, z0))
+
+/*
+** rshl_single_z0_z0_z15:
+** ...
+** urshl {z0\.b - z3\.b}, {z0\.b - z3\.b}, z15\.b
+** ...
+** ret
+*/
+TEST_XN_SINGLE_Z15 (rshl_single_z0_z0_z15, svuint8x4_t, svint8_t,
+ z0 = svrshl_single_u8_x4 (z0, z15),
+ z0 = svrshl (z0, z15))
+
+/*
+** rshl_single_z24_z24_z16:
+** mov (z[0-7])\.d, z16\.d
+** urshl {z24\.b - z27\.b}, {z24\.b - z27\.b}, \1\.b
+** ret
+*/
+TEST_XN_SINGLE (rshl_single_z24_z24_z16, svuint8x4_t, svint8_t, z24,
+ svrshl_single_u8_x4 (z24, z16),
+ svrshl (z24, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_bf16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_bf16_x2.c
new file mode 100644
index 0000000..055bee2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_bf16_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.h - z1\.h}, pn\1, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svbfloat16x2_t, z0,
+ svsel_bf16_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.h - z1\.h}, pn\1, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svbfloat16x2_t, z0,
+ svsel_bf16_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.h - z1\.h}, pn8, {z4\.h - z5\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svbfloat16x2_t, z0,
+ svsel_bf16_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.h - z5\.h}, pn8, {z18\.h - z19\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svbfloat16x2_t, z4,
+ svsel_bf16_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.h - z19\.h}, pn15, {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svbfloat16x2_t, z18,
+ svsel_bf16_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.h - z19\.h}, pn8, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svbfloat16x2_t, z18,
+ svsel_bf16_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svbfloat16x2_t, z23,
+ svsel_bf16_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.h - z1\.h}, pn15, {[^}]+}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svbfloat16x2_t, z0,
+ svsel_bf16_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.h - z1\.h}, pn8, {z28\.h - z29\.h}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svbfloat16x2_t, z0,
+ svsel_bf16_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_bf16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_bf16_x4.c
new file mode 100644
index 0000000..8c0d53a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_bf16_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.h - z3\.h}, pn\1, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svbfloat16x4_t, z0,
+ svsel_bf16_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.h - z3\.h}, pn\1, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svbfloat16x4_t, z0,
+ svsel_bf16_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.h - z3\.h}, pn8, {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svbfloat16x4_t, z0,
+ svsel_bf16_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.h - z7\.h}, pn8, {[^}]+}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svbfloat16x4_t, z4,
+ svsel_bf16_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.h - z31\.h}, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svbfloat16x4_t, z18,
+ svsel_bf16_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svbfloat16x4_t, z18,
+ svsel_bf16_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.h - z3\.h}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svbfloat16x4_t, z23,
+ svsel_bf16_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f16_x2.c
new file mode 100644
index 0000000..617f8bb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f16_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.h - z1\.h}, pn\1, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svfloat16x2_t, z0,
+ svsel_f16_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.h - z1\.h}, pn\1, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svfloat16x2_t, z0,
+ svsel_f16_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.h - z1\.h}, pn8, {z4\.h - z5\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svfloat16x2_t, z0,
+ svsel_f16_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.h - z5\.h}, pn8, {z18\.h - z19\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svfloat16x2_t, z4,
+ svsel_f16_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.h - z19\.h}, pn15, {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svfloat16x2_t, z18,
+ svsel_f16_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.h - z19\.h}, pn8, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svfloat16x2_t, z18,
+ svsel_f16_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svfloat16x2_t, z23,
+ svsel_f16_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.h - z1\.h}, pn15, {[^}]+}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svfloat16x2_t, z0,
+ svsel_f16_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.h - z1\.h}, pn8, {z28\.h - z29\.h}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svfloat16x2_t, z0,
+ svsel_f16_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f16_x4.c
new file mode 100644
index 0000000..5daa8ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f16_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.h - z3\.h}, pn\1, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svfloat16x4_t, z0,
+ svsel_f16_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.h - z3\.h}, pn\1, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svfloat16x4_t, z0,
+ svsel_f16_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.h - z3\.h}, pn8, {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svfloat16x4_t, z0,
+ svsel_f16_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.h - z7\.h}, pn8, {[^}]+}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svfloat16x4_t, z4,
+ svsel_f16_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.h - z31\.h}, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svfloat16x4_t, z18,
+ svsel_f16_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svfloat16x4_t, z18,
+ svsel_f16_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.h - z3\.h}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svfloat16x4_t, z23,
+ svsel_f16_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f32_x2.c
new file mode 100644
index 0000000..a3bbb4f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f32_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.s - z1\.s}, pn\1, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svfloat32x2_t, z0,
+ svsel_f32_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.s - z1\.s}, pn\1, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svfloat32x2_t, z0,
+ svsel_f32_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.s - z1\.s}, pn8, {z4\.s - z5\.s}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svfloat32x2_t, z0,
+ svsel_f32_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.s - z5\.s}, pn8, {z18\.s - z19\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svfloat32x2_t, z4,
+ svsel_f32_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.s - z19\.s}, pn15, {z28\.s - z29\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svfloat32x2_t, z18,
+ svsel_f32_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.s - z19\.s}, pn8, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svfloat32x2_t, z18,
+ svsel_f32_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svfloat32x2_t, z23,
+ svsel_f32_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.s - z1\.s}, pn15, {[^}]+}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svfloat32x2_t, z0,
+ svsel_f32_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.s - z1\.s}, pn8, {z28\.s - z29\.s}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svfloat32x2_t, z0,
+ svsel_f32_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f32_x4.c
new file mode 100644
index 0000000..d0b4e45
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f32_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.s - z3\.s}, pn\1, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svfloat32x4_t, z0,
+ svsel_f32_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.s - z3\.s}, pn\1, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svfloat32x4_t, z0,
+ svsel_f32_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.s - z3\.s}, pn8, {z4\.s - z7\.s}, {z28\.s - z31\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svfloat32x4_t, z0,
+ svsel_f32_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.s - z7\.s}, pn8, {[^}]+}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svfloat32x4_t, z4,
+ svsel_f32_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.s - z31\.s}, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svfloat32x4_t, z18,
+ svsel_f32_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svfloat32x4_t, z18,
+ svsel_f32_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.s - z3\.s}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svfloat32x4_t, z23,
+ svsel_f32_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f64_x2.c
new file mode 100644
index 0000000..678a410
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f64_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.d - z1\.d}, pn\1, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svfloat64x2_t, z0,
+ svsel_f64_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.d - z1\.d}, pn\1, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svfloat64x2_t, z0,
+ svsel_f64_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.d - z1\.d}, pn8, {z4\.d - z5\.d}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svfloat64x2_t, z0,
+ svsel_f64_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.d - z5\.d}, pn8, {z18\.d - z19\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svfloat64x2_t, z4,
+ svsel_f64_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.d - z19\.d}, pn15, {z28\.d - z29\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svfloat64x2_t, z18,
+ svsel_f64_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.d - z19\.d}, pn8, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svfloat64x2_t, z18,
+ svsel_f64_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svfloat64x2_t, z23,
+ svsel_f64_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.d - z1\.d}, pn15, {[^}]+}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svfloat64x2_t, z0,
+ svsel_f64_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.d - z1\.d}, pn8, {z28\.d - z29\.d}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svfloat64x2_t, z0,
+ svsel_f64_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f64_x4.c
new file mode 100644
index 0000000..08352f0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_f64_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.d - z3\.d}, pn\1, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svfloat64x4_t, z0,
+ svsel_f64_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.d - z3\.d}, pn\1, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svfloat64x4_t, z0,
+ svsel_f64_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.d - z3\.d}, pn8, {z4\.d - z7\.d}, {z28\.d - z31\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svfloat64x4_t, z0,
+ svsel_f64_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.d - z7\.d}, pn8, {[^}]+}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svfloat64x4_t, z4,
+ svsel_f64_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.d - z31\.d}, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svfloat64x4_t, z18,
+ svsel_f64_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svfloat64x4_t, z18,
+ svsel_f64_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.d - z3\.d}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svfloat64x4_t, z23,
+ svsel_f64_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s16_x2.c
new file mode 100644
index 0000000..14f5066
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s16_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.h - z1\.h}, pn\1, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svint16x2_t, z0,
+ svsel_s16_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.h - z1\.h}, pn\1, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svint16x2_t, z0,
+ svsel_s16_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.h - z1\.h}, pn8, {z4\.h - z5\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svint16x2_t, z0,
+ svsel_s16_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.h - z5\.h}, pn8, {z18\.h - z19\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svint16x2_t, z4,
+ svsel_s16_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.h - z19\.h}, pn15, {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svint16x2_t, z18,
+ svsel_s16_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.h - z19\.h}, pn8, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svint16x2_t, z18,
+ svsel_s16_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svint16x2_t, z23,
+ svsel_s16_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.h - z1\.h}, pn15, {[^}]+}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svint16x2_t, z0,
+ svsel_s16_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.h - z1\.h}, pn8, {z28\.h - z29\.h}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svint16x2_t, z0,
+ svsel_s16_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s16_x4.c
new file mode 100644
index 0000000..1aa89e3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s16_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.h - z3\.h}, pn\1, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svint16x4_t, z0,
+ svsel_s16_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.h - z3\.h}, pn\1, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svint16x4_t, z0,
+ svsel_s16_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.h - z3\.h}, pn8, {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svint16x4_t, z0,
+ svsel_s16_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.h - z7\.h}, pn8, {[^}]+}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svint16x4_t, z4,
+ svsel_s16_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.h - z31\.h}, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svint16x4_t, z18,
+ svsel_s16_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svint16x4_t, z18,
+ svsel_s16_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.h - z3\.h}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svint16x4_t, z23,
+ svsel_s16_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s32_x2.c
new file mode 100644
index 0000000..dd36c30
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s32_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.s - z1\.s}, pn\1, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svint32x2_t, z0,
+ svsel_s32_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.s - z1\.s}, pn\1, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svint32x2_t, z0,
+ svsel_s32_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.s - z1\.s}, pn8, {z4\.s - z5\.s}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svint32x2_t, z0,
+ svsel_s32_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.s - z5\.s}, pn8, {z18\.s - z19\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svint32x2_t, z4,
+ svsel_s32_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.s - z19\.s}, pn15, {z28\.s - z29\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svint32x2_t, z18,
+ svsel_s32_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.s - z19\.s}, pn8, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svint32x2_t, z18,
+ svsel_s32_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svint32x2_t, z23,
+ svsel_s32_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.s - z1\.s}, pn15, {[^}]+}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svint32x2_t, z0,
+ svsel_s32_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.s - z1\.s}, pn8, {z28\.s - z29\.s}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svint32x2_t, z0,
+ svsel_s32_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s32_x4.c
new file mode 100644
index 0000000..343a081
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s32_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.s - z3\.s}, pn\1, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svint32x4_t, z0,
+ svsel_s32_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.s - z3\.s}, pn\1, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svint32x4_t, z0,
+ svsel_s32_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.s - z3\.s}, pn8, {z4\.s - z7\.s}, {z28\.s - z31\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svint32x4_t, z0,
+ svsel_s32_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.s - z7\.s}, pn8, {[^}]+}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svint32x4_t, z4,
+ svsel_s32_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.s - z31\.s}, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svint32x4_t, z18,
+ svsel_s32_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svint32x4_t, z18,
+ svsel_s32_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.s - z3\.s}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svint32x4_t, z23,
+ svsel_s32_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s64_x2.c
new file mode 100644
index 0000000..f286b0e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s64_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.d - z1\.d}, pn\1, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svint64x2_t, z0,
+ svsel_s64_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.d - z1\.d}, pn\1, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svint64x2_t, z0,
+ svsel_s64_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.d - z1\.d}, pn8, {z4\.d - z5\.d}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svint64x2_t, z0,
+ svsel_s64_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.d - z5\.d}, pn8, {z18\.d - z19\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svint64x2_t, z4,
+ svsel_s64_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.d - z19\.d}, pn15, {z28\.d - z29\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svint64x2_t, z18,
+ svsel_s64_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.d - z19\.d}, pn8, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svint64x2_t, z18,
+ svsel_s64_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svint64x2_t, z23,
+ svsel_s64_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.d - z1\.d}, pn15, {[^}]+}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svint64x2_t, z0,
+ svsel_s64_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.d - z1\.d}, pn8, {z28\.d - z29\.d}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svint64x2_t, z0,
+ svsel_s64_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s64_x4.c
new file mode 100644
index 0000000..fbf554e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s64_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.d - z3\.d}, pn\1, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svint64x4_t, z0,
+ svsel_s64_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.d - z3\.d}, pn\1, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svint64x4_t, z0,
+ svsel_s64_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.d - z3\.d}, pn8, {z4\.d - z7\.d}, {z28\.d - z31\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svint64x4_t, z0,
+ svsel_s64_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.d - z7\.d}, pn8, {[^}]+}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svint64x4_t, z4,
+ svsel_s64_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.d - z31\.d}, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svint64x4_t, z18,
+ svsel_s64_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svint64x4_t, z18,
+ svsel_s64_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.d - z3\.d}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svint64x4_t, z23,
+ svsel_s64_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s8_x2.c
new file mode 100644
index 0000000..42d89ff
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s8_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.b - z1\.b}, pn\1, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svint8x2_t, z0,
+ svsel_s8_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.b - z1\.b}, pn\1, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svint8x2_t, z0,
+ svsel_s8_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.b - z1\.b}, pn8, {z4\.b - z5\.b}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svint8x2_t, z0,
+ svsel_s8_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.b - z5\.b}, pn8, {z18\.b - z19\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svint8x2_t, z4,
+ svsel_s8_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.b - z19\.b}, pn15, {z28\.b - z29\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svint8x2_t, z18,
+ svsel_s8_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.b - z19\.b}, pn8, {z18\.b - z19\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svint8x2_t, z18,
+ svsel_s8_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.b - z1\.b}, {z18\.b - z19\.b}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svint8x2_t, z23,
+ svsel_s8_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.b - z1\.b}, pn15, {[^}]+}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svint8x2_t, z0,
+ svsel_s8_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.b - z1\.b}, pn8, {z28\.b - z29\.b}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svint8x2_t, z0,
+ svsel_s8_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s8_x4.c
new file mode 100644
index 0000000..c6635f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_s8_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.b - z3\.b}, pn\1, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svint8x4_t, z0,
+ svsel_s8_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.b - z3\.b}, pn\1, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svint8x4_t, z0,
+ svsel_s8_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.b - z3\.b}, pn8, {z4\.b - z7\.b}, {z28\.b - z31\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svint8x4_t, z0,
+ svsel_s8_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.b - z7\.b}, pn8, {[^}]+}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svint8x4_t, z4,
+ svsel_s8_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.b - z31\.b}, {z4\.b - z7\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svint8x4_t, z18,
+ svsel_s8_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.b - z7\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svint8x4_t, z18,
+ svsel_s8_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.b - z3\.b}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svint8x4_t, z23,
+ svsel_s8_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u16_x2.c
new file mode 100644
index 0000000..20cd848
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u16_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.h - z1\.h}, pn\1, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svuint16x2_t, z0,
+ svsel_u16_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.h - z1\.h}, pn\1, {z0\.h - z1\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svuint16x2_t, z0,
+ svsel_u16_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.h - z1\.h}, pn8, {z4\.h - z5\.h}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svuint16x2_t, z0,
+ svsel_u16_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.h - z5\.h}, pn8, {z18\.h - z19\.h}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svuint16x2_t, z4,
+ svsel_u16_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.h - z19\.h}, pn15, {z28\.h - z29\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svuint16x2_t, z18,
+ svsel_u16_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.h - z19\.h}, pn8, {z18\.h - z19\.h}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svuint16x2_t, z18,
+ svsel_u16_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.h - z1\.h}, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svuint16x2_t, z23,
+ svsel_u16_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.h - z1\.h}, pn15, {[^}]+}, {z28\.h - z29\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svuint16x2_t, z0,
+ svsel_u16_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.h - z1\.h}, pn8, {z28\.h - z29\.h}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svuint16x2_t, z0,
+ svsel_u16_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u16_x4.c
new file mode 100644
index 0000000..f5ba4fd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u16_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.h - z3\.h}, pn\1, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svuint16x4_t, z0,
+ svsel_u16_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.h - z3\.h}, pn\1, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svuint16x4_t, z0,
+ svsel_u16_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.h - z3\.h}, pn8, {z4\.h - z7\.h}, {z28\.h - z31\.h}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svuint16x4_t, z0,
+ svsel_u16_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.h - z7\.h}, pn8, {[^}]+}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svuint16x4_t, z4,
+ svsel_u16_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.h - z31\.h}, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svuint16x4_t, z18,
+ svsel_u16_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.h - z7\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svuint16x4_t, z18,
+ svsel_u16_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.h - z3\.h}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svuint16x4_t, z23,
+ svsel_u16_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u32_x2.c
new file mode 100644
index 0000000..2b736e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u32_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.s - z1\.s}, pn\1, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svuint32x2_t, z0,
+ svsel_u32_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.s - z1\.s}, pn\1, {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svuint32x2_t, z0,
+ svsel_u32_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.s - z1\.s}, pn8, {z4\.s - z5\.s}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svuint32x2_t, z0,
+ svsel_u32_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.s - z5\.s}, pn8, {z18\.s - z19\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svuint32x2_t, z4,
+ svsel_u32_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.s - z19\.s}, pn15, {z28\.s - z29\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svuint32x2_t, z18,
+ svsel_u32_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.s - z19\.s}, pn8, {z18\.s - z19\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svuint32x2_t, z18,
+ svsel_u32_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.s - z1\.s}, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svuint32x2_t, z23,
+ svsel_u32_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.s - z1\.s}, pn15, {[^}]+}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svuint32x2_t, z0,
+ svsel_u32_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.s - z1\.s}, pn8, {z28\.s - z29\.s}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svuint32x2_t, z0,
+ svsel_u32_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u32_x4.c
new file mode 100644
index 0000000..4783cf7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u32_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.s - z3\.s}, pn\1, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svuint32x4_t, z0,
+ svsel_u32_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.s - z3\.s}, pn\1, {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svuint32x4_t, z0,
+ svsel_u32_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.s - z3\.s}, pn8, {z4\.s - z7\.s}, {z28\.s - z31\.s}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svuint32x4_t, z0,
+ svsel_u32_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.s - z7\.s}, pn8, {[^}]+}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svuint32x4_t, z4,
+ svsel_u32_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.s - z31\.s}, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svuint32x4_t, z18,
+ svsel_u32_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.s - z7\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svuint32x4_t, z18,
+ svsel_u32_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.s - z3\.s}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svuint32x4_t, z23,
+ svsel_u32_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u64_x2.c
new file mode 100644
index 0000000..2e039e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u64_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.d - z1\.d}, pn\1, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svuint64x2_t, z0,
+ svsel_u64_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.d - z1\.d}, pn\1, {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svuint64x2_t, z0,
+ svsel_u64_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.d - z1\.d}, pn8, {z4\.d - z5\.d}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svuint64x2_t, z0,
+ svsel_u64_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.d - z5\.d}, pn8, {z18\.d - z19\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svuint64x2_t, z4,
+ svsel_u64_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.d - z19\.d}, pn15, {z28\.d - z29\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svuint64x2_t, z18,
+ svsel_u64_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.d - z19\.d}, pn8, {z18\.d - z19\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svuint64x2_t, z18,
+ svsel_u64_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.d - z1\.d}, {z18\.d - z19\.d}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svuint64x2_t, z23,
+ svsel_u64_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.d - z1\.d}, pn15, {[^}]+}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svuint64x2_t, z0,
+ svsel_u64_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.d - z1\.d}, pn8, {z28\.d - z29\.d}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svuint64x2_t, z0,
+ svsel_u64_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u64_x4.c
new file mode 100644
index 0000000..28a92d4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u64_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.d - z3\.d}, pn\1, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svuint64x4_t, z0,
+ svsel_u64_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.d - z3\.d}, pn\1, {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svuint64x4_t, z0,
+ svsel_u64_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.d - z3\.d}, pn8, {z4\.d - z7\.d}, {z28\.d - z31\.d}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svuint64x4_t, z0,
+ svsel_u64_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.d - z7\.d}, pn8, {[^}]+}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svuint64x4_t, z4,
+ svsel_u64_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.d - z31\.d}, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svuint64x4_t, z18,
+ svsel_u64_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.d - z7\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svuint64x4_t, z18,
+ svsel_u64_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.d - z3\.d}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svuint64x4_t, z23,
+ svsel_u64_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u8_x2.c
new file mode 100644
index 0000000..edcd1e9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u8_x2.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.b - z1\.b}, pn\1, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svuint8x2_t, z0,
+ svsel_u8_x2 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.b - z1\.b}, pn\1, {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svuint8x2_t, z0,
+ svsel_u8_x2 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.b - z1\.b}, pn8, {z4\.b - z5\.b}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svuint8x2_t, z0,
+ svsel_u8_x2 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** sel {z4\.b - z5\.b}, pn8, {z18\.b - z19\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svuint8x2_t, z4,
+ svsel_u8_x2 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {z18\.b - z19\.b}, pn15, {z28\.b - z29\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svuint8x2_t, z18,
+ svsel_u8_x2 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** sel {z18\.b - z19\.b}, pn8, {z18\.b - z19\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svuint8x2_t, z18,
+ svsel_u8_x2 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** sel [^\n]+, pn15, {z0\.b - z1\.b}, {z18\.b - z19\.b}
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svuint8x2_t, z23,
+ svsel_u8_x2 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
+
+/*
+** sel_z0_pn15_z23_z28:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.b - z1\.b}, pn15, {[^}]+}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn15_z23_z28, svuint8x2_t, z0,
+ svsel_u8_x2 (pn15, z23, z28),
+ svsel (pn15, z23, z28))
+
+/*
+** sel_z0_pn8_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sel {z0\.b - z1\.b}, pn8, {z28\.b - z29\.b}, {[^}]+}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z28_z23, svuint8x2_t, z0,
+ svsel_u8_x2 (pn8, z28, z23),
+ svsel (pn8, z28, z23))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u8_x4.c
new file mode 100644
index 0000000..3a45574
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sel_u8_x4.c
@@ -0,0 +1,92 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sel_z0_pn0_z0_z4:
+** mov p([0-9]+)\.b, p0\.b
+** sel {z0\.b - z3\.b}, pn\1, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn0_z0_z4, svuint8x4_t, z0,
+ svsel_u8_x4 (pn0, z0, z4),
+ svsel (pn0, z0, z4))
+
+/*
+** sel_z0_pn7_z0_z4:
+** mov p([0-9]+)\.b, p7\.b
+** sel {z0\.b - z3\.b}, pn\1, {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn7_z0_z4, svuint8x4_t, z0,
+ svsel_u8_x4 (pn7, z0, z4),
+ svsel (pn7, z0, z4))
+
+/*
+** sel_z0_pn8_z4_z28:
+** sel {z0\.b - z3\.b}, pn8, {z4\.b - z7\.b}, {z28\.b - z31\.b}
+** ret
+*/
+TEST_XN (sel_z0_pn8_z4_z28, svuint8x4_t, z0,
+ svsel_u8_x4 (pn8, z4, z28),
+ svsel (pn8, z4, z28))
+
+/*
+** sel_z4_pn8_z18_z0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {z4\.b - z7\.b}, pn8, {[^}]+}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (sel_z4_pn8_z18_z0, svuint8x4_t, z4,
+ svsel_u8_x4 (pn8, z18, z0),
+ svsel (pn8, z18, z0))
+
+/*
+** sel_z18_pn15_z28_z4:
+** sel {[^}]+}, pn15, {z28\.b - z31\.b}, {z4\.b - z7\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn15_z28_z4, svuint8x4_t, z18,
+ svsel_u8_x4 (pn15, z28, z4),
+ svsel (pn15, z28, z4))
+
+/*
+** sel_z18_pn8_z18_z4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel {[^}]+}, pn8, {[^}]+}, {z4\.b - z7\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z18_pn8_z18_z4, svuint8x4_t, z18,
+ svsel_u8_x4 (pn8, z18, z4),
+ svsel (pn8, z18, z4))
+
+/*
+** sel_z23_pn15_z0_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sel [^\n]+, pn15, {z0\.b - z3\.b}, {[^}]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (sel_z23_pn15_z0_z18, svuint8x4_t, z23,
+ svsel_u8_x4 (pn15, z0, z18),
+ svsel (pn15, z0, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_bf16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_bf16_x2.c
new file mode 100644
index 0000000..904135b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_bf16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_bf16_base:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_base, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_bf16_index:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_index, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_bf16_1:
+** incb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_1, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0 + svcnth (), z0),
+ svst1 (pn8, x0 + svcnth (), z0))
+
+/*
+** st1_bf16_2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_2, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0 + svcnth () * 2, z0),
+ svst1 (pn8, x0 + svcnth () * 2, z0))
+
+/*
+** st1_bf16_14:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_14, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0 + svcnth () * 14, z0),
+ svst1 (pn8, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_bf16_16:
+** incb x0, all, mul #16
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_16, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0 + svcnth () * 16, z0),
+ svst1 (pn8, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_bf16_m1:
+** decb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0 - svcnth (), z0),
+ svst1 (pn8, x0 - svcnth (), z0))
+
+/*
+** st1_bf16_m2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0 - svcnth () * 2, z0),
+ svst1 (pn8, x0 - svcnth () * 2, z0))
+
+/*
+** st1_bf16_m16:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0 - svcnth () * 16, z0),
+ svst1 (pn8, x0 - svcnth () * 16, z0))
+
+/*
+** st1_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0 - svcnth () * 18, z0),
+ svst1 (pn8, x0 - svcnth () * 18, z0))
+
+/*
+** st1_bf16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_z17, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_bf16_z22:
+** st1h {z22\.h(?: - |, )z23\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_z22, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_bf16_z28:
+** st1h {z28\.h(?: - |, )z29\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_z28, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_bf16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_pn0, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_bf16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_pn7, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_bf16_pn15:
+** st1h {z0\.h(?: - |, )z1\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_pn15, svbfloat16x2_t, bfloat16_t,
+ svst1_bf16_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_bf16_0:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_0, svbfloat16x2_t, bfloat16_t,
+ svst1_vnum_bf16_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_bf16_1:
+** incb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_1, svbfloat16x2_t, bfloat16_t,
+ svst1_vnum_bf16_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_bf16_2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_2, svbfloat16x2_t, bfloat16_t,
+ svst1_vnum_bf16_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_bf16_14:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_14, svbfloat16x2_t, bfloat16_t,
+ svst1_vnum_bf16_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_bf16_16:
+** incb x0, all, mul #16
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_16, svbfloat16x2_t, bfloat16_t,
+ svst1_vnum_bf16_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_bf16_m1:
+** decb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ svst1_vnum_bf16_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_bf16_m2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ svst1_vnum_bf16_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_bf16_m16:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ svst1_vnum_bf16_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ svst1_vnum_bf16_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_x1, svbfloat16x2_t, bfloat16_t,
+ svst1_vnum_bf16_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_bf16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_bf16_x4.c
new file mode 100644
index 0000000..2563b72
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_bf16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_bf16_base:
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_base, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_bf16_index:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_index, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_bf16_1:
+** incb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_1, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 + svcnth (), z0),
+ svst1 (pn8, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_bf16_2:
+** incb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_2, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 + svcnth () * 2, z0),
+ svst1 (pn8, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_bf16_3:
+** incb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_3, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 + svcnth () * 3, z0),
+ svst1 (pn8, x0 + svcnth () * 3, z0))
+
+/*
+** st1_bf16_4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_4, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 + svcnth () * 4, z0),
+ svst1 (pn8, x0 + svcnth () * 4, z0))
+
+/*
+** st1_bf16_28:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_28, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 + svcnth () * 28, z0),
+ svst1 (pn8, x0 + svcnth () * 28, z0))
+
+/*
+** st1_bf16_32:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_32, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 + svcnth () * 32, z0),
+ svst1 (pn8, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_bf16_m1:
+** decb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 - svcnth (), z0),
+ svst1 (pn8, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_bf16_m2:
+** decb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 - svcnth () * 2, z0),
+ svst1 (pn8, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_bf16_m3:
+** decb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 - svcnth () * 3, z0),
+ svst1 (pn8, x0 - svcnth () * 3, z0))
+
+/*
+** st1_bf16_m4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 - svcnth () * 4, z0),
+ svst1 (pn8, x0 - svcnth () * 4, z0))
+
+/*
+** st1_bf16_m32:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 - svcnth () * 32, z0),
+ svst1 (pn8, x0 - svcnth () * 32, z0))
+
+/*
+** st1_bf16_m36:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0 - svcnth () * 36, z0),
+ svst1 (pn8, x0 - svcnth () * 36, z0))
+
+/*
+** st1_bf16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_z17, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_bf16_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_z22, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_bf16_z28:
+** st1h {z28\.h - z31\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_z28, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_bf16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_pn0, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_bf16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_pn7, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_bf16_pn15:
+** st1h {z0\.h - z3\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_bf16_pn15, svbfloat16x4_t, bfloat16_t,
+ svst1_bf16_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_bf16_0:
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_0, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_bf16_1:
+** incb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_1, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_bf16_2:
+** incb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_2, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_bf16_3:
+** incb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_3, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_bf16_4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_4, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_bf16_28:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_28, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_bf16_32:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_32, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_bf16_m1:
+** decb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_bf16_m2:
+** decb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_bf16_m3:
+** decb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_bf16_m4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_bf16_m32:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_bf16_m36:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1h {z0\.h - z3\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1h {z0\.h - z3\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_bf16_x1, svbfloat16x4_t, bfloat16_t,
+ svst1_vnum_bf16_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f16_x2.c
new file mode 100644
index 0000000..5b0c12b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_f16_base:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_base, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_f16_index:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_index, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f16_1:
+** incb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_1, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0 + svcnth (), z0),
+ svst1 (pn8, x0 + svcnth (), z0))
+
+/*
+** st1_f16_2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_2, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0 + svcnth () * 2, z0),
+ svst1 (pn8, x0 + svcnth () * 2, z0))
+
+/*
+** st1_f16_14:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_14, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0 + svcnth () * 14, z0),
+ svst1 (pn8, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f16_16:
+** incb x0, all, mul #16
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_16, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0 + svcnth () * 16, z0),
+ svst1 (pn8, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f16_m1:
+** decb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_m1, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0 - svcnth (), z0),
+ svst1 (pn8, x0 - svcnth (), z0))
+
+/*
+** st1_f16_m2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_m2, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0 - svcnth () * 2, z0),
+ svst1 (pn8, x0 - svcnth () * 2, z0))
+
+/*
+** st1_f16_m16:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_m16, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0 - svcnth () * 16, z0),
+ svst1 (pn8, x0 - svcnth () * 16, z0))
+
+/*
+** st1_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_m18, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0 - svcnth () * 18, z0),
+ svst1 (pn8, x0 - svcnth () * 18, z0))
+
+/*
+** st1_f16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_z17, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_f16_z22:
+** st1h {z22\.h(?: - |, )z23\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_z22, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_f16_z28:
+** st1h {z28\.h(?: - |, )z29\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_z28, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_f16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_pn0, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_f16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_pn7, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_f16_pn15:
+** st1h {z0\.h(?: - |, )z1\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_pn15, svfloat16x2_t, float16_t,
+ svst1_f16_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_f16_0:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_0, svfloat16x2_t, float16_t,
+ svst1_vnum_f16_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f16_1:
+** incb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_1, svfloat16x2_t, float16_t,
+ svst1_vnum_f16_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_f16_2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_2, svfloat16x2_t, float16_t,
+ svst1_vnum_f16_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_f16_14:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_14, svfloat16x2_t, float16_t,
+ svst1_vnum_f16_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f16_16:
+** incb x0, all, mul #16
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_16, svfloat16x2_t, float16_t,
+ svst1_vnum_f16_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f16_m1:
+** decb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_m1, svfloat16x2_t, float16_t,
+ svst1_vnum_f16_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_f16_m2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_m2, svfloat16x2_t, float16_t,
+ svst1_vnum_f16_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_f16_m16:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_m16, svfloat16x2_t, float16_t,
+ svst1_vnum_f16_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_m18, svfloat16x2_t, float16_t,
+ svst1_vnum_f16_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_x1, svfloat16x2_t, float16_t,
+ svst1_vnum_f16_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f16_x4.c
new file mode 100644
index 0000000..da0617c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_f16_base:
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_base, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_f16_index:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_index, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f16_1:
+** incb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_1, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 + svcnth (), z0),
+ svst1 (pn8, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f16_2:
+** incb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_2, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 + svcnth () * 2, z0),
+ svst1 (pn8, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f16_3:
+** incb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_3, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 + svcnth () * 3, z0),
+ svst1 (pn8, x0 + svcnth () * 3, z0))
+
+/*
+** st1_f16_4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_4, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 + svcnth () * 4, z0),
+ svst1 (pn8, x0 + svcnth () * 4, z0))
+
+/*
+** st1_f16_28:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_28, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 + svcnth () * 28, z0),
+ svst1 (pn8, x0 + svcnth () * 28, z0))
+
+/*
+** st1_f16_32:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_32, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 + svcnth () * 32, z0),
+ svst1 (pn8, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f16_m1:
+** decb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_m1, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 - svcnth (), z0),
+ svst1 (pn8, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f16_m2:
+** decb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_m2, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 - svcnth () * 2, z0),
+ svst1 (pn8, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f16_m3:
+** decb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_m3, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 - svcnth () * 3, z0),
+ svst1 (pn8, x0 - svcnth () * 3, z0))
+
+/*
+** st1_f16_m4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_m4, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 - svcnth () * 4, z0),
+ svst1 (pn8, x0 - svcnth () * 4, z0))
+
+/*
+** st1_f16_m32:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_m32, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 - svcnth () * 32, z0),
+ svst1 (pn8, x0 - svcnth () * 32, z0))
+
+/*
+** st1_f16_m36:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_m36, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0 - svcnth () * 36, z0),
+ svst1 (pn8, x0 - svcnth () * 36, z0))
+
+/*
+** st1_f16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_z17, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_f16_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_z22, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_f16_z28:
+** st1h {z28\.h - z31\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_z28, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_f16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_pn0, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_f16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_pn7, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_f16_pn15:
+** st1h {z0\.h - z3\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f16_pn15, svfloat16x4_t, float16_t,
+ svst1_f16_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_f16_0:
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_0, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f16_1:
+** incb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_1, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f16_2:
+** incb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_2, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f16_3:
+** incb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_3, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_f16_4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_4, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_f16_28:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_28, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_f16_32:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_32, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f16_m1:
+** decb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_m1, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f16_m2:
+** decb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_m2, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f16_m3:
+** decb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_m3, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_f16_m4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_m4, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_f16_m32:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_m32, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_f16_m36:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_m36, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1h {z0\.h - z3\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1h {z0\.h - z3\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f16_x1, svfloat16x4_t, float16_t,
+ svst1_vnum_f16_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f32_x2.c
new file mode 100644
index 0000000..f0114b6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_f32_base:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_base, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_f32_index:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_index, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f32_1:
+** incb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_1, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0 + svcntw (), z0),
+ svst1 (pn8, x0 + svcntw (), z0))
+
+/*
+** st1_f32_2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_2, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0 + svcntw () * 2, z0),
+ svst1 (pn8, x0 + svcntw () * 2, z0))
+
+/*
+** st1_f32_14:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_14, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0 + svcntw () * 14, z0),
+ svst1 (pn8, x0 + svcntw () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f32_16:
+** incb x0, all, mul #16
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_16, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0 + svcntw () * 16, z0),
+ svst1 (pn8, x0 + svcntw () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f32_m1:
+** decb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_m1, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0 - svcntw (), z0),
+ svst1 (pn8, x0 - svcntw (), z0))
+
+/*
+** st1_f32_m2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_m2, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0 - svcntw () * 2, z0),
+ svst1 (pn8, x0 - svcntw () * 2, z0))
+
+/*
+** st1_f32_m16:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_m16, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0 - svcntw () * 16, z0),
+ svst1 (pn8, x0 - svcntw () * 16, z0))
+
+/*
+** st1_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_m18, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0 - svcntw () * 18, z0),
+ svst1 (pn8, x0 - svcntw () * 18, z0))
+
+/*
+** st1_f32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_z17, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_f32_z22:
+** st1w {z22\.s(?: - |, )z23\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_z22, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_f32_z28:
+** st1w {z28\.s(?: - |, )z29\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_z28, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_f32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_pn0, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_f32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_pn7, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_f32_pn15:
+** st1w {z0\.s(?: - |, )z1\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_pn15, svfloat32x2_t, float32_t,
+ svst1_f32_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_f32_0:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_0, svfloat32x2_t, float32_t,
+ svst1_vnum_f32_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f32_1:
+** incb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_1, svfloat32x2_t, float32_t,
+ svst1_vnum_f32_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_f32_2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_2, svfloat32x2_t, float32_t,
+ svst1_vnum_f32_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_f32_14:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_14, svfloat32x2_t, float32_t,
+ svst1_vnum_f32_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f32_16:
+** incb x0, all, mul #16
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_16, svfloat32x2_t, float32_t,
+ svst1_vnum_f32_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f32_m1:
+** decb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_m1, svfloat32x2_t, float32_t,
+ svst1_vnum_f32_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_f32_m2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_m2, svfloat32x2_t, float32_t,
+ svst1_vnum_f32_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_f32_m16:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_m16, svfloat32x2_t, float32_t,
+ svst1_vnum_f32_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_m18, svfloat32x2_t, float32_t,
+ svst1_vnum_f32_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_x1, svfloat32x2_t, float32_t,
+ svst1_vnum_f32_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f32_x4.c
new file mode 100644
index 0000000..f861a9a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_f32_base:
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_base, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_f32_index:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_index, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f32_1:
+** incb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_1, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 + svcntw (), z0),
+ svst1 (pn8, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f32_2:
+** incb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_2, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 + svcntw () * 2, z0),
+ svst1 (pn8, x0 + svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f32_3:
+** incb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_3, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 + svcntw () * 3, z0),
+ svst1 (pn8, x0 + svcntw () * 3, z0))
+
+/*
+** st1_f32_4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_4, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 + svcntw () * 4, z0),
+ svst1 (pn8, x0 + svcntw () * 4, z0))
+
+/*
+** st1_f32_28:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_28, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 + svcntw () * 28, z0),
+ svst1 (pn8, x0 + svcntw () * 28, z0))
+
+/*
+** st1_f32_32:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_32, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 + svcntw () * 32, z0),
+ svst1 (pn8, x0 + svcntw () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f32_m1:
+** decb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_m1, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 - svcntw (), z0),
+ svst1 (pn8, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f32_m2:
+** decb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_m2, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 - svcntw () * 2, z0),
+ svst1 (pn8, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f32_m3:
+** decb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_m3, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 - svcntw () * 3, z0),
+ svst1 (pn8, x0 - svcntw () * 3, z0))
+
+/*
+** st1_f32_m4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_m4, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 - svcntw () * 4, z0),
+ svst1 (pn8, x0 - svcntw () * 4, z0))
+
+/*
+** st1_f32_m32:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_m32, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 - svcntw () * 32, z0),
+ svst1 (pn8, x0 - svcntw () * 32, z0))
+
+/*
+** st1_f32_m36:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_m36, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0 - svcntw () * 36, z0),
+ svst1 (pn8, x0 - svcntw () * 36, z0))
+
+/*
+** st1_f32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_z17, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_f32_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_z22, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_f32_z28:
+** st1w {z28\.s - z31\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_z28, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_f32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_pn0, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_f32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_pn7, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_f32_pn15:
+** st1w {z0\.s - z3\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f32_pn15, svfloat32x4_t, float32_t,
+ svst1_f32_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_f32_0:
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_0, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f32_1:
+** incb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_1, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f32_2:
+** incb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_2, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f32_3:
+** incb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_3, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_f32_4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_4, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_f32_28:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_28, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_f32_32:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_32, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f32_m1:
+** decb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_m1, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f32_m2:
+** decb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_m2, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f32_m3:
+** decb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_m3, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_f32_m4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_m4, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_f32_m32:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_m32, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_f32_m36:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_m36, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1w {z0\.s - z3\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1w {z0\.s - z3\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f32_x1, svfloat32x4_t, float32_t,
+ svst1_vnum_f32_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f64_x2.c
new file mode 100644
index 0000000..e373ea5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_f64_base:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_base, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_f64_index:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_index, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f64_1:
+** incb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_1, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0 + svcntd (), z0),
+ svst1 (pn8, x0 + svcntd (), z0))
+
+/*
+** st1_f64_2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_2, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0 + svcntd () * 2, z0),
+ svst1 (pn8, x0 + svcntd () * 2, z0))
+
+/*
+** st1_f64_14:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_14, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0 + svcntd () * 14, z0),
+ svst1 (pn8, x0 + svcntd () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f64_16:
+** incb x0, all, mul #16
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_16, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0 + svcntd () * 16, z0),
+ svst1 (pn8, x0 + svcntd () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f64_m1:
+** decb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_m1, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0 - svcntd (), z0),
+ svst1 (pn8, x0 - svcntd (), z0))
+
+/*
+** st1_f64_m2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_m2, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0 - svcntd () * 2, z0),
+ svst1 (pn8, x0 - svcntd () * 2, z0))
+
+/*
+** st1_f64_m16:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_m16, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0 - svcntd () * 16, z0),
+ svst1 (pn8, x0 - svcntd () * 16, z0))
+
+/*
+** st1_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_m18, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0 - svcntd () * 18, z0),
+ svst1 (pn8, x0 - svcntd () * 18, z0))
+
+/*
+** st1_f64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_z17, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_f64_z22:
+** st1d {z22\.d(?: - |, )z23\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_z22, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_f64_z28:
+** st1d {z28\.d(?: - |, )z29\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_z28, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_f64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_pn0, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_f64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_pn7, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_f64_pn15:
+** st1d {z0\.d(?: - |, )z1\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_pn15, svfloat64x2_t, float64_t,
+ svst1_f64_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_f64_0:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_0, svfloat64x2_t, float64_t,
+ svst1_vnum_f64_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f64_1:
+** incb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_1, svfloat64x2_t, float64_t,
+ svst1_vnum_f64_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_f64_2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_2, svfloat64x2_t, float64_t,
+ svst1_vnum_f64_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_f64_14:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_14, svfloat64x2_t, float64_t,
+ svst1_vnum_f64_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f64_16:
+** incb x0, all, mul #16
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_16, svfloat64x2_t, float64_t,
+ svst1_vnum_f64_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f64_m1:
+** decb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_m1, svfloat64x2_t, float64_t,
+ svst1_vnum_f64_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_f64_m2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_m2, svfloat64x2_t, float64_t,
+ svst1_vnum_f64_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_f64_m16:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_m16, svfloat64x2_t, float64_t,
+ svst1_vnum_f64_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_m18, svfloat64x2_t, float64_t,
+ svst1_vnum_f64_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_x1, svfloat64x2_t, float64_t,
+ svst1_vnum_f64_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f64_x4.c
new file mode 100644
index 0000000..d7b2182
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_f64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_f64_base:
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_base, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_f64_index:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_index, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f64_1:
+** incb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_1, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 + svcntd (), z0),
+ svst1 (pn8, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f64_2:
+** incb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_2, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 + svcntd () * 2, z0),
+ svst1 (pn8, x0 + svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f64_3:
+** incb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_3, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 + svcntd () * 3, z0),
+ svst1 (pn8, x0 + svcntd () * 3, z0))
+
+/*
+** st1_f64_4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_4, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 + svcntd () * 4, z0),
+ svst1 (pn8, x0 + svcntd () * 4, z0))
+
+/*
+** st1_f64_28:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_28, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 + svcntd () * 28, z0),
+ svst1 (pn8, x0 + svcntd () * 28, z0))
+
+/*
+** st1_f64_32:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_32, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 + svcntd () * 32, z0),
+ svst1 (pn8, x0 + svcntd () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f64_m1:
+** decb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_m1, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 - svcntd (), z0),
+ svst1 (pn8, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f64_m2:
+** decb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_m2, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 - svcntd () * 2, z0),
+ svst1 (pn8, x0 - svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_f64_m3:
+** decb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_m3, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 - svcntd () * 3, z0),
+ svst1 (pn8, x0 - svcntd () * 3, z0))
+
+/*
+** st1_f64_m4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_m4, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 - svcntd () * 4, z0),
+ svst1 (pn8, x0 - svcntd () * 4, z0))
+
+/*
+** st1_f64_m32:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_m32, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 - svcntd () * 32, z0),
+ svst1 (pn8, x0 - svcntd () * 32, z0))
+
+/*
+** st1_f64_m36:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_m36, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0 - svcntd () * 36, z0),
+ svst1 (pn8, x0 - svcntd () * 36, z0))
+
+/*
+** st1_f64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_z17, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_f64_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_z22, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_f64_z28:
+** st1d {z28\.d - z31\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_z28, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_f64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_pn0, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_f64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_pn7, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_f64_pn15:
+** st1d {z0\.d - z3\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_f64_pn15, svfloat64x4_t, float64_t,
+ svst1_f64_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_f64_0:
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_0, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f64_1:
+** incb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_1, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f64_2:
+** incb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_2, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f64_3:
+** incb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_3, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_f64_4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_4, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_f64_28:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_28, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_f64_32:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_32, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f64_m1:
+** decb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_m1, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f64_m2:
+** decb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_m2, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_f64_m3:
+** decb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_m3, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_f64_m4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_m4, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_f64_m32:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_m32, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_f64_m36:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_m36, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1d {z0\.d - z3\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1d {z0\.d - z3\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_f64_x1, svfloat64x4_t, float64_t,
+ svst1_vnum_f64_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s16_x2.c
new file mode 100644
index 0000000..623b09b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_s16_base:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_base, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_s16_index:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_index, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s16_1:
+** incb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_1, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0 + svcnth (), z0),
+ svst1 (pn8, x0 + svcnth (), z0))
+
+/*
+** st1_s16_2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_2, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0 + svcnth () * 2, z0),
+ svst1 (pn8, x0 + svcnth () * 2, z0))
+
+/*
+** st1_s16_14:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_14, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0 + svcnth () * 14, z0),
+ svst1 (pn8, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s16_16:
+** incb x0, all, mul #16
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_16, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0 + svcnth () * 16, z0),
+ svst1 (pn8, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s16_m1:
+** decb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_m1, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0 - svcnth (), z0),
+ svst1 (pn8, x0 - svcnth (), z0))
+
+/*
+** st1_s16_m2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_m2, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0 - svcnth () * 2, z0),
+ svst1 (pn8, x0 - svcnth () * 2, z0))
+
+/*
+** st1_s16_m16:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_m16, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0 - svcnth () * 16, z0),
+ svst1 (pn8, x0 - svcnth () * 16, z0))
+
+/*
+** st1_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_m18, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0 - svcnth () * 18, z0),
+ svst1 (pn8, x0 - svcnth () * 18, z0))
+
+/*
+** st1_s16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_z17, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_s16_z22:
+** st1h {z22\.h(?: - |, )z23\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_z22, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_s16_z28:
+** st1h {z28\.h(?: - |, )z29\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_z28, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_s16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_pn0, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_s16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_pn7, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_s16_pn15:
+** st1h {z0\.h(?: - |, )z1\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_pn15, svint16x2_t, int16_t,
+ svst1_s16_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_s16_0:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_0, svint16x2_t, int16_t,
+ svst1_vnum_s16_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s16_1:
+** incb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_1, svint16x2_t, int16_t,
+ svst1_vnum_s16_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_s16_2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_2, svint16x2_t, int16_t,
+ svst1_vnum_s16_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_s16_14:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_14, svint16x2_t, int16_t,
+ svst1_vnum_s16_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s16_16:
+** incb x0, all, mul #16
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_16, svint16x2_t, int16_t,
+ svst1_vnum_s16_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s16_m1:
+** decb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_m1, svint16x2_t, int16_t,
+ svst1_vnum_s16_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_s16_m2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_m2, svint16x2_t, int16_t,
+ svst1_vnum_s16_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_s16_m16:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_m16, svint16x2_t, int16_t,
+ svst1_vnum_s16_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_m18, svint16x2_t, int16_t,
+ svst1_vnum_s16_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_x1, svint16x2_t, int16_t,
+ svst1_vnum_s16_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s16_x4.c
new file mode 100644
index 0000000..bf1611a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_s16_base:
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_base, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_s16_index:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_index, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s16_1:
+** incb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_1, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 + svcnth (), z0),
+ svst1 (pn8, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s16_2:
+** incb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_2, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 + svcnth () * 2, z0),
+ svst1 (pn8, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s16_3:
+** incb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_3, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 + svcnth () * 3, z0),
+ svst1 (pn8, x0 + svcnth () * 3, z0))
+
+/*
+** st1_s16_4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_4, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 + svcnth () * 4, z0),
+ svst1 (pn8, x0 + svcnth () * 4, z0))
+
+/*
+** st1_s16_28:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_28, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 + svcnth () * 28, z0),
+ svst1 (pn8, x0 + svcnth () * 28, z0))
+
+/*
+** st1_s16_32:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_32, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 + svcnth () * 32, z0),
+ svst1 (pn8, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s16_m1:
+** decb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_m1, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 - svcnth (), z0),
+ svst1 (pn8, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s16_m2:
+** decb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_m2, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 - svcnth () * 2, z0),
+ svst1 (pn8, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s16_m3:
+** decb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_m3, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 - svcnth () * 3, z0),
+ svst1 (pn8, x0 - svcnth () * 3, z0))
+
+/*
+** st1_s16_m4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_m4, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 - svcnth () * 4, z0),
+ svst1 (pn8, x0 - svcnth () * 4, z0))
+
+/*
+** st1_s16_m32:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_m32, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 - svcnth () * 32, z0),
+ svst1 (pn8, x0 - svcnth () * 32, z0))
+
+/*
+** st1_s16_m36:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_m36, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0 - svcnth () * 36, z0),
+ svst1 (pn8, x0 - svcnth () * 36, z0))
+
+/*
+** st1_s16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_z17, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_s16_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_z22, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_s16_z28:
+** st1h {z28\.h - z31\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_z28, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_s16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_pn0, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_s16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_pn7, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_s16_pn15:
+** st1h {z0\.h - z3\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s16_pn15, svint16x4_t, int16_t,
+ svst1_s16_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_s16_0:
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_0, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s16_1:
+** incb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_1, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s16_2:
+** incb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_2, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s16_3:
+** incb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_3, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_s16_4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_4, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_s16_28:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_28, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_s16_32:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_32, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s16_m1:
+** decb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_m1, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s16_m2:
+** decb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_m2, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s16_m3:
+** decb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_m3, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_s16_m4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_m4, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_s16_m32:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_m32, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_s16_m36:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_m36, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1h {z0\.h - z3\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1h {z0\.h - z3\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s16_x1, svint16x4_t, int16_t,
+ svst1_vnum_s16_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s32_x2.c
new file mode 100644
index 0000000..2bb8174
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_s32_base:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_base, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_s32_index:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_index, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s32_1:
+** incb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_1, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0 + svcntw (), z0),
+ svst1 (pn8, x0 + svcntw (), z0))
+
+/*
+** st1_s32_2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_2, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0 + svcntw () * 2, z0),
+ svst1 (pn8, x0 + svcntw () * 2, z0))
+
+/*
+** st1_s32_14:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_14, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0 + svcntw () * 14, z0),
+ svst1 (pn8, x0 + svcntw () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s32_16:
+** incb x0, all, mul #16
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_16, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0 + svcntw () * 16, z0),
+ svst1 (pn8, x0 + svcntw () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s32_m1:
+** decb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_m1, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0 - svcntw (), z0),
+ svst1 (pn8, x0 - svcntw (), z0))
+
+/*
+** st1_s32_m2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_m2, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0 - svcntw () * 2, z0),
+ svst1 (pn8, x0 - svcntw () * 2, z0))
+
+/*
+** st1_s32_m16:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_m16, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0 - svcntw () * 16, z0),
+ svst1 (pn8, x0 - svcntw () * 16, z0))
+
+/*
+** st1_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_m18, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0 - svcntw () * 18, z0),
+ svst1 (pn8, x0 - svcntw () * 18, z0))
+
+/*
+** st1_s32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_z17, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_s32_z22:
+** st1w {z22\.s(?: - |, )z23\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_z22, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_s32_z28:
+** st1w {z28\.s(?: - |, )z29\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_z28, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_s32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_pn0, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_s32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_pn7, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_s32_pn15:
+** st1w {z0\.s(?: - |, )z1\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_pn15, svint32x2_t, int32_t,
+ svst1_s32_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_s32_0:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_0, svint32x2_t, int32_t,
+ svst1_vnum_s32_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s32_1:
+** incb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_1, svint32x2_t, int32_t,
+ svst1_vnum_s32_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_s32_2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_2, svint32x2_t, int32_t,
+ svst1_vnum_s32_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_s32_14:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_14, svint32x2_t, int32_t,
+ svst1_vnum_s32_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s32_16:
+** incb x0, all, mul #16
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_16, svint32x2_t, int32_t,
+ svst1_vnum_s32_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s32_m1:
+** decb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_m1, svint32x2_t, int32_t,
+ svst1_vnum_s32_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_s32_m2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_m2, svint32x2_t, int32_t,
+ svst1_vnum_s32_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_s32_m16:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_m16, svint32x2_t, int32_t,
+ svst1_vnum_s32_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_m18, svint32x2_t, int32_t,
+ svst1_vnum_s32_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_x1, svint32x2_t, int32_t,
+ svst1_vnum_s32_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s32_x4.c
new file mode 100644
index 0000000..35e63a8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_s32_base:
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_base, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_s32_index:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_index, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s32_1:
+** incb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_1, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 + svcntw (), z0),
+ svst1 (pn8, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s32_2:
+** incb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_2, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 + svcntw () * 2, z0),
+ svst1 (pn8, x0 + svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s32_3:
+** incb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_3, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 + svcntw () * 3, z0),
+ svst1 (pn8, x0 + svcntw () * 3, z0))
+
+/*
+** st1_s32_4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_4, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 + svcntw () * 4, z0),
+ svst1 (pn8, x0 + svcntw () * 4, z0))
+
+/*
+** st1_s32_28:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_28, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 + svcntw () * 28, z0),
+ svst1 (pn8, x0 + svcntw () * 28, z0))
+
+/*
+** st1_s32_32:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_32, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 + svcntw () * 32, z0),
+ svst1 (pn8, x0 + svcntw () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s32_m1:
+** decb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_m1, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 - svcntw (), z0),
+ svst1 (pn8, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s32_m2:
+** decb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_m2, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 - svcntw () * 2, z0),
+ svst1 (pn8, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s32_m3:
+** decb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_m3, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 - svcntw () * 3, z0),
+ svst1 (pn8, x0 - svcntw () * 3, z0))
+
+/*
+** st1_s32_m4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_m4, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 - svcntw () * 4, z0),
+ svst1 (pn8, x0 - svcntw () * 4, z0))
+
+/*
+** st1_s32_m32:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_m32, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 - svcntw () * 32, z0),
+ svst1 (pn8, x0 - svcntw () * 32, z0))
+
+/*
+** st1_s32_m36:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_m36, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0 - svcntw () * 36, z0),
+ svst1 (pn8, x0 - svcntw () * 36, z0))
+
+/*
+** st1_s32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_z17, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_s32_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_z22, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_s32_z28:
+** st1w {z28\.s - z31\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_z28, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_s32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_pn0, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_s32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_pn7, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_s32_pn15:
+** st1w {z0\.s - z3\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s32_pn15, svint32x4_t, int32_t,
+ svst1_s32_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_s32_0:
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_0, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s32_1:
+** incb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_1, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s32_2:
+** incb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_2, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s32_3:
+** incb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_3, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_s32_4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_4, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_s32_28:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_28, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_s32_32:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_32, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s32_m1:
+** decb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_m1, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s32_m2:
+** decb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_m2, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s32_m3:
+** decb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_m3, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_s32_m4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_m4, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_s32_m32:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_m32, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_s32_m36:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_m36, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1w {z0\.s - z3\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1w {z0\.s - z3\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s32_x1, svint32x4_t, int32_t,
+ svst1_vnum_s32_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s64_x2.c
new file mode 100644
index 0000000..b379679
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_s64_base:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_base, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_s64_index:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_index, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s64_1:
+** incb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_1, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0 + svcntd (), z0),
+ svst1 (pn8, x0 + svcntd (), z0))
+
+/*
+** st1_s64_2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_2, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0 + svcntd () * 2, z0),
+ svst1 (pn8, x0 + svcntd () * 2, z0))
+
+/*
+** st1_s64_14:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_14, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0 + svcntd () * 14, z0),
+ svst1 (pn8, x0 + svcntd () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s64_16:
+** incb x0, all, mul #16
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_16, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0 + svcntd () * 16, z0),
+ svst1 (pn8, x0 + svcntd () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s64_m1:
+** decb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_m1, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0 - svcntd (), z0),
+ svst1 (pn8, x0 - svcntd (), z0))
+
+/*
+** st1_s64_m2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_m2, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0 - svcntd () * 2, z0),
+ svst1 (pn8, x0 - svcntd () * 2, z0))
+
+/*
+** st1_s64_m16:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_m16, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0 - svcntd () * 16, z0),
+ svst1 (pn8, x0 - svcntd () * 16, z0))
+
+/*
+** st1_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_m18, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0 - svcntd () * 18, z0),
+ svst1 (pn8, x0 - svcntd () * 18, z0))
+
+/*
+** st1_s64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_z17, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_s64_z22:
+** st1d {z22\.d(?: - |, )z23\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_z22, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_s64_z28:
+** st1d {z28\.d(?: - |, )z29\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_z28, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_s64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_pn0, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_s64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_pn7, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_s64_pn15:
+** st1d {z0\.d(?: - |, )z1\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_pn15, svint64x2_t, int64_t,
+ svst1_s64_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_s64_0:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_0, svint64x2_t, int64_t,
+ svst1_vnum_s64_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s64_1:
+** incb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_1, svint64x2_t, int64_t,
+ svst1_vnum_s64_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_s64_2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_2, svint64x2_t, int64_t,
+ svst1_vnum_s64_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_s64_14:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_14, svint64x2_t, int64_t,
+ svst1_vnum_s64_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s64_16:
+** incb x0, all, mul #16
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_16, svint64x2_t, int64_t,
+ svst1_vnum_s64_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s64_m1:
+** decb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_m1, svint64x2_t, int64_t,
+ svst1_vnum_s64_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_s64_m2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_m2, svint64x2_t, int64_t,
+ svst1_vnum_s64_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_s64_m16:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_m16, svint64x2_t, int64_t,
+ svst1_vnum_s64_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_m18, svint64x2_t, int64_t,
+ svst1_vnum_s64_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_x1, svint64x2_t, int64_t,
+ svst1_vnum_s64_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s64_x4.c
new file mode 100644
index 0000000..359edf6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_s64_base:
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_base, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_s64_index:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_index, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s64_1:
+** incb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_1, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 + svcntd (), z0),
+ svst1 (pn8, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s64_2:
+** incb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_2, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 + svcntd () * 2, z0),
+ svst1 (pn8, x0 + svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s64_3:
+** incb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_3, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 + svcntd () * 3, z0),
+ svst1 (pn8, x0 + svcntd () * 3, z0))
+
+/*
+** st1_s64_4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_4, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 + svcntd () * 4, z0),
+ svst1 (pn8, x0 + svcntd () * 4, z0))
+
+/*
+** st1_s64_28:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_28, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 + svcntd () * 28, z0),
+ svst1 (pn8, x0 + svcntd () * 28, z0))
+
+/*
+** st1_s64_32:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_32, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 + svcntd () * 32, z0),
+ svst1 (pn8, x0 + svcntd () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s64_m1:
+** decb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_m1, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 - svcntd (), z0),
+ svst1 (pn8, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s64_m2:
+** decb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_m2, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 - svcntd () * 2, z0),
+ svst1 (pn8, x0 - svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s64_m3:
+** decb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_m3, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 - svcntd () * 3, z0),
+ svst1 (pn8, x0 - svcntd () * 3, z0))
+
+/*
+** st1_s64_m4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_m4, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 - svcntd () * 4, z0),
+ svst1 (pn8, x0 - svcntd () * 4, z0))
+
+/*
+** st1_s64_m32:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_m32, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 - svcntd () * 32, z0),
+ svst1 (pn8, x0 - svcntd () * 32, z0))
+
+/*
+** st1_s64_m36:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_m36, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0 - svcntd () * 36, z0),
+ svst1 (pn8, x0 - svcntd () * 36, z0))
+
+/*
+** st1_s64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_z17, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_s64_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_z22, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_s64_z28:
+** st1d {z28\.d - z31\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_z28, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_s64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_pn0, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_s64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_pn7, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_s64_pn15:
+** st1d {z0\.d - z3\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s64_pn15, svint64x4_t, int64_t,
+ svst1_s64_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_s64_0:
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_0, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s64_1:
+** incb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_1, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s64_2:
+** incb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_2, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s64_3:
+** incb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_3, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_s64_4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_4, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_s64_28:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_28, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_s64_32:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_32, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s64_m1:
+** decb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_m1, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s64_m2:
+** decb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_m2, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s64_m3:
+** decb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_m3, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_s64_m4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_m4, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_s64_m32:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_m32, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_s64_m36:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_m36, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1d {z0\.d - z3\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1d {z0\.d - z3\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s64_x1, svint64x4_t, int64_t,
+ svst1_vnum_s64_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s8_x2.c
new file mode 100644
index 0000000..8da1e5f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s8_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_s8_base:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_base, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_s8_index:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, x1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_index, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s8_1:
+** incb x0
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_1, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0 + svcntb (), z0),
+ svst1 (pn8, x0 + svcntb (), z0))
+
+/*
+** st1_s8_2:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_2, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0 + svcntb () * 2, z0),
+ svst1 (pn8, x0 + svcntb () * 2, z0))
+
+/*
+** st1_s8_14:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_14, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0 + svcntb () * 14, z0),
+ svst1 (pn8, x0 + svcntb () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s8_16:
+** incb x0, all, mul #16
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_16, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0 + svcntb () * 16, z0),
+ svst1 (pn8, x0 + svcntb () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s8_m1:
+** decb x0
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_m1, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0 - svcntb (), z0),
+ svst1 (pn8, x0 - svcntb (), z0))
+
+/*
+** st1_s8_m2:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_m2, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0 - svcntb () * 2, z0),
+ svst1 (pn8, x0 - svcntb () * 2, z0))
+
+/*
+** st1_s8_m16:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_m16, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0 - svcntb () * 16, z0),
+ svst1 (pn8, x0 - svcntb () * 16, z0))
+
+/*
+** st1_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_m18, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0 - svcntb () * 18, z0),
+ svst1 (pn8, x0 - svcntb () * 18, z0))
+
+/*
+** st1_s8_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_z17, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_s8_z22:
+** st1b {z22\.b(?: - |, )z23\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_z22, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_s8_z28:
+** st1b {z28\.b(?: - |, )z29\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_z28, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_s8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1b {z0\.b(?: - |, )z1\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_pn0, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_s8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1b {z0\.b(?: - |, )z1\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_pn7, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_s8_pn15:
+** st1b {z0\.b(?: - |, )z1\.b}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_pn15, svint8x2_t, int8_t,
+ svst1_s8_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_s8_0:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_0, svint8x2_t, int8_t,
+ svst1_vnum_s8_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s8_1:
+** incb x0
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_1, svint8x2_t, int8_t,
+ svst1_vnum_s8_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_s8_2:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_2, svint8x2_t, int8_t,
+ svst1_vnum_s8_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_s8_14:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_14, svint8x2_t, int8_t,
+ svst1_vnum_s8_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s8_16:
+** incb x0, all, mul #16
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_16, svint8x2_t, int8_t,
+ svst1_vnum_s8_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s8_m1:
+** decb x0
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_m1, svint8x2_t, int8_t,
+ svst1_vnum_s8_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_s8_m2:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_m2, svint8x2_t, int8_t,
+ svst1_vnum_s8_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_s8_m16:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_m16, svint8x2_t, int8_t,
+ svst1_vnum_s8_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_m18, svint8x2_t, int8_t,
+ svst1_vnum_s8_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_x1, svint8x2_t, int8_t,
+ svst1_vnum_s8_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s8_x4.c
new file mode 100644
index 0000000..66b8934
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_s8_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_s8_base:
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_base, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_s8_index:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, x1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_index, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s8_1:
+** incb x0
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_1, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 + svcntb (), z0),
+ svst1 (pn8, x0 + svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s8_2:
+** incb x0, all, mul #2
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_2, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 + svcntb () * 2, z0),
+ svst1 (pn8, x0 + svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s8_3:
+** incb x0, all, mul #3
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_3, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 + svcntb () * 3, z0),
+ svst1 (pn8, x0 + svcntb () * 3, z0))
+
+/*
+** st1_s8_4:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_4, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 + svcntb () * 4, z0),
+ svst1 (pn8, x0 + svcntb () * 4, z0))
+
+/*
+** st1_s8_28:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_28, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 + svcntb () * 28, z0),
+ svst1 (pn8, x0 + svcntb () * 28, z0))
+
+/*
+** st1_s8_32:
+** [^{]*
+** st1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_32, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 + svcntb () * 32, z0),
+ svst1 (pn8, x0 + svcntb () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s8_m1:
+** decb x0
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_m1, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 - svcntb (), z0),
+ svst1 (pn8, x0 - svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s8_m2:
+** decb x0, all, mul #2
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_m2, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 - svcntb () * 2, z0),
+ svst1 (pn8, x0 - svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_s8_m3:
+** decb x0, all, mul #3
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_m3, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 - svcntb () * 3, z0),
+ svst1 (pn8, x0 - svcntb () * 3, z0))
+
+/*
+** st1_s8_m4:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_m4, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 - svcntb () * 4, z0),
+ svst1 (pn8, x0 - svcntb () * 4, z0))
+
+/*
+** st1_s8_m32:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_m32, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 - svcntb () * 32, z0),
+ svst1 (pn8, x0 - svcntb () * 32, z0))
+
+/*
+** st1_s8_m36:
+** [^{]*
+** st1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_m36, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0 - svcntb () * 36, z0),
+ svst1 (pn8, x0 - svcntb () * 36, z0))
+
+/*
+** st1_s8_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_z17, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_s8_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_z22, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_s8_z28:
+** st1b {z28\.b - z31\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_z28, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_s8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1b {z0\.b - z3\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_pn0, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_s8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1b {z0\.b - z3\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_pn7, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_s8_pn15:
+** st1b {z0\.b - z3\.b}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_s8_pn15, svint8x4_t, int8_t,
+ svst1_s8_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_s8_0:
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_0, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s8_1:
+** incb x0
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_1, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s8_2:
+** incb x0, all, mul #2
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_2, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s8_3:
+** incb x0, all, mul #3
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_3, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_s8_4:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_4, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_s8_28:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_28, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_s8_32:
+** [^{]*
+** st1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_32, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s8_m1:
+** decb x0
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_m1, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s8_m2:
+** decb x0, all, mul #2
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_m2, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_s8_m3:
+** decb x0, all, mul #3
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_m3, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_s8_m4:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_m4, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_s8_m32:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_m32, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_s8_m36:
+** [^{]*
+** st1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_m36, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1b {z0\.b - z3\.b}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1b {z0\.b - z3\.b}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_s8_x1, svint8x4_t, int8_t,
+ svst1_vnum_s8_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u16_x2.c
new file mode 100644
index 0000000..f9cfe02
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_u16_base:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_base, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_u16_index:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_index, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u16_1:
+** incb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_1, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0 + svcnth (), z0),
+ svst1 (pn8, x0 + svcnth (), z0))
+
+/*
+** st1_u16_2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_2, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0 + svcnth () * 2, z0),
+ svst1 (pn8, x0 + svcnth () * 2, z0))
+
+/*
+** st1_u16_14:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_14, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0 + svcnth () * 14, z0),
+ svst1 (pn8, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u16_16:
+** incb x0, all, mul #16
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_16, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0 + svcnth () * 16, z0),
+ svst1 (pn8, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u16_m1:
+** decb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_m1, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0 - svcnth (), z0),
+ svst1 (pn8, x0 - svcnth (), z0))
+
+/*
+** st1_u16_m2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_m2, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0 - svcnth () * 2, z0),
+ svst1 (pn8, x0 - svcnth () * 2, z0))
+
+/*
+** st1_u16_m16:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_m16, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0 - svcnth () * 16, z0),
+ svst1 (pn8, x0 - svcnth () * 16, z0))
+
+/*
+** st1_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_m18, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0 - svcnth () * 18, z0),
+ svst1 (pn8, x0 - svcnth () * 18, z0))
+
+/*
+** st1_u16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_z17, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_u16_z22:
+** st1h {z22\.h(?: - |, )z23\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_z22, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_u16_z28:
+** st1h {z28\.h(?: - |, )z29\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_z28, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_u16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_pn0, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_u16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_pn7, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_u16_pn15:
+** st1h {z0\.h(?: - |, )z1\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_pn15, svuint16x2_t, uint16_t,
+ svst1_u16_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_u16_0:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_0, svuint16x2_t, uint16_t,
+ svst1_vnum_u16_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u16_1:
+** incb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_1, svuint16x2_t, uint16_t,
+ svst1_vnum_u16_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_u16_2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_2, svuint16x2_t, uint16_t,
+ svst1_vnum_u16_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_u16_14:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_14, svuint16x2_t, uint16_t,
+ svst1_vnum_u16_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u16_16:
+** incb x0, all, mul #16
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_16, svuint16x2_t, uint16_t,
+ svst1_vnum_u16_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u16_m1:
+** decb x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_m1, svuint16x2_t, uint16_t,
+ svst1_vnum_u16_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_u16_m2:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_m2, svuint16x2_t, uint16_t,
+ svst1_vnum_u16_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_u16_m16:
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_m16, svuint16x2_t, uint16_t,
+ svst1_vnum_u16_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_m18, svuint16x2_t, uint16_t,
+ svst1_vnum_u16_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_x1, svuint16x2_t, uint16_t,
+ svst1_vnum_u16_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u16_x4.c
new file mode 100644
index 0000000..e0f6dd8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_u16_base:
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_base, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_u16_index:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_index, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u16_1:
+** incb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_1, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 + svcnth (), z0),
+ svst1 (pn8, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u16_2:
+** incb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_2, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 + svcnth () * 2, z0),
+ svst1 (pn8, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u16_3:
+** incb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_3, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 + svcnth () * 3, z0),
+ svst1 (pn8, x0 + svcnth () * 3, z0))
+
+/*
+** st1_u16_4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_4, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 + svcnth () * 4, z0),
+ svst1 (pn8, x0 + svcnth () * 4, z0))
+
+/*
+** st1_u16_28:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_28, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 + svcnth () * 28, z0),
+ svst1 (pn8, x0 + svcnth () * 28, z0))
+
+/*
+** st1_u16_32:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_32, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 + svcnth () * 32, z0),
+ svst1 (pn8, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u16_m1:
+** decb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_m1, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 - svcnth (), z0),
+ svst1 (pn8, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u16_m2:
+** decb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_m2, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 - svcnth () * 2, z0),
+ svst1 (pn8, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u16_m3:
+** decb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_m3, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 - svcnth () * 3, z0),
+ svst1 (pn8, x0 - svcnth () * 3, z0))
+
+/*
+** st1_u16_m4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_m4, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 - svcnth () * 4, z0),
+ svst1 (pn8, x0 - svcnth () * 4, z0))
+
+/*
+** st1_u16_m32:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_m32, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 - svcnth () * 32, z0),
+ svst1 (pn8, x0 - svcnth () * 32, z0))
+
+/*
+** st1_u16_m36:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_m36, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0 - svcnth () * 36, z0),
+ svst1 (pn8, x0 - svcnth () * 36, z0))
+
+/*
+** st1_u16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_z17, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_u16_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_z22, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_u16_z28:
+** st1h {z28\.h - z31\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_z28, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_u16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_pn0, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_u16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_pn7, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_u16_pn15:
+** st1h {z0\.h - z3\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u16_pn15, svuint16x4_t, uint16_t,
+ svst1_u16_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_u16_0:
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_0, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u16_1:
+** incb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_1, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u16_2:
+** incb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_2, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u16_3:
+** incb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_3, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_u16_4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_4, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_u16_28:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_28, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_u16_32:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_32, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u16_m1:
+** decb x0
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_m1, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u16_m2:
+** decb x0, all, mul #2
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_m2, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u16_m3:
+** decb x0, all, mul #3
+** st1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_m3, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_u16_m4:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_m4, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_u16_m32:
+** st1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_m32, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_u16_m36:
+** [^{]*
+** st1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_m36, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1h {z0\.h - z3\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1h {z0\.h - z3\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u16_x1, svuint16x4_t, uint16_t,
+ svst1_vnum_u16_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u32_x2.c
new file mode 100644
index 0000000..327ad66
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_u32_base:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_base, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_u32_index:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_index, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u32_1:
+** incb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_1, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0 + svcntw (), z0),
+ svst1 (pn8, x0 + svcntw (), z0))
+
+/*
+** st1_u32_2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_2, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0 + svcntw () * 2, z0),
+ svst1 (pn8, x0 + svcntw () * 2, z0))
+
+/*
+** st1_u32_14:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_14, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0 + svcntw () * 14, z0),
+ svst1 (pn8, x0 + svcntw () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u32_16:
+** incb x0, all, mul #16
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_16, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0 + svcntw () * 16, z0),
+ svst1 (pn8, x0 + svcntw () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u32_m1:
+** decb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_m1, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0 - svcntw (), z0),
+ svst1 (pn8, x0 - svcntw (), z0))
+
+/*
+** st1_u32_m2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_m2, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0 - svcntw () * 2, z0),
+ svst1 (pn8, x0 - svcntw () * 2, z0))
+
+/*
+** st1_u32_m16:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_m16, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0 - svcntw () * 16, z0),
+ svst1 (pn8, x0 - svcntw () * 16, z0))
+
+/*
+** st1_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_m18, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0 - svcntw () * 18, z0),
+ svst1 (pn8, x0 - svcntw () * 18, z0))
+
+/*
+** st1_u32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_z17, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_u32_z22:
+** st1w {z22\.s(?: - |, )z23\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_z22, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_u32_z28:
+** st1w {z28\.s(?: - |, )z29\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_z28, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_u32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_pn0, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_u32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_pn7, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_u32_pn15:
+** st1w {z0\.s(?: - |, )z1\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_pn15, svuint32x2_t, uint32_t,
+ svst1_u32_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_u32_0:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_0, svuint32x2_t, uint32_t,
+ svst1_vnum_u32_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u32_1:
+** incb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_1, svuint32x2_t, uint32_t,
+ svst1_vnum_u32_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_u32_2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_2, svuint32x2_t, uint32_t,
+ svst1_vnum_u32_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_u32_14:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_14, svuint32x2_t, uint32_t,
+ svst1_vnum_u32_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u32_16:
+** incb x0, all, mul #16
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_16, svuint32x2_t, uint32_t,
+ svst1_vnum_u32_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u32_m1:
+** decb x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_m1, svuint32x2_t, uint32_t,
+ svst1_vnum_u32_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_u32_m2:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_m2, svuint32x2_t, uint32_t,
+ svst1_vnum_u32_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_u32_m16:
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_m16, svuint32x2_t, uint32_t,
+ svst1_vnum_u32_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_m18, svuint32x2_t, uint32_t,
+ svst1_vnum_u32_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_x1, svuint32x2_t, uint32_t,
+ svst1_vnum_u32_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u32_x4.c
new file mode 100644
index 0000000..cdd276f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_u32_base:
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_base, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_u32_index:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_index, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u32_1:
+** incb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_1, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 + svcntw (), z0),
+ svst1 (pn8, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u32_2:
+** incb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_2, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 + svcntw () * 2, z0),
+ svst1 (pn8, x0 + svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u32_3:
+** incb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_3, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 + svcntw () * 3, z0),
+ svst1 (pn8, x0 + svcntw () * 3, z0))
+
+/*
+** st1_u32_4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_4, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 + svcntw () * 4, z0),
+ svst1 (pn8, x0 + svcntw () * 4, z0))
+
+/*
+** st1_u32_28:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_28, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 + svcntw () * 28, z0),
+ svst1 (pn8, x0 + svcntw () * 28, z0))
+
+/*
+** st1_u32_32:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_32, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 + svcntw () * 32, z0),
+ svst1 (pn8, x0 + svcntw () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u32_m1:
+** decb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_m1, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 - svcntw (), z0),
+ svst1 (pn8, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u32_m2:
+** decb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_m2, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 - svcntw () * 2, z0),
+ svst1 (pn8, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u32_m3:
+** decb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_m3, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 - svcntw () * 3, z0),
+ svst1 (pn8, x0 - svcntw () * 3, z0))
+
+/*
+** st1_u32_m4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_m4, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 - svcntw () * 4, z0),
+ svst1 (pn8, x0 - svcntw () * 4, z0))
+
+/*
+** st1_u32_m32:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_m32, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 - svcntw () * 32, z0),
+ svst1 (pn8, x0 - svcntw () * 32, z0))
+
+/*
+** st1_u32_m36:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_m36, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0 - svcntw () * 36, z0),
+ svst1 (pn8, x0 - svcntw () * 36, z0))
+
+/*
+** st1_u32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_z17, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_u32_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_z22, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_u32_z28:
+** st1w {z28\.s - z31\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_z28, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_u32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_pn0, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_u32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_pn7, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_u32_pn15:
+** st1w {z0\.s - z3\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u32_pn15, svuint32x4_t, uint32_t,
+ svst1_u32_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_u32_0:
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_0, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u32_1:
+** incb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_1, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u32_2:
+** incb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_2, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u32_3:
+** incb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_3, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_u32_4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_4, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_u32_28:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_28, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_u32_32:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_32, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u32_m1:
+** decb x0
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_m1, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u32_m2:
+** decb x0, all, mul #2
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_m2, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u32_m3:
+** decb x0, all, mul #3
+** st1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_m3, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_u32_m4:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_m4, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_u32_m32:
+** st1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_m32, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_u32_m36:
+** [^{]*
+** st1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_m36, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1w {z0\.s - z3\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1w {z0\.s - z3\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u32_x1, svuint32x4_t, uint32_t,
+ svst1_vnum_u32_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u64_x2.c
new file mode 100644
index 0000000..9186626
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_u64_base:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_base, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_u64_index:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_index, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u64_1:
+** incb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_1, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0 + svcntd (), z0),
+ svst1 (pn8, x0 + svcntd (), z0))
+
+/*
+** st1_u64_2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_2, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0 + svcntd () * 2, z0),
+ svst1 (pn8, x0 + svcntd () * 2, z0))
+
+/*
+** st1_u64_14:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_14, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0 + svcntd () * 14, z0),
+ svst1 (pn8, x0 + svcntd () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u64_16:
+** incb x0, all, mul #16
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_16, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0 + svcntd () * 16, z0),
+ svst1 (pn8, x0 + svcntd () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u64_m1:
+** decb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_m1, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0 - svcntd (), z0),
+ svst1 (pn8, x0 - svcntd (), z0))
+
+/*
+** st1_u64_m2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_m2, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0 - svcntd () * 2, z0),
+ svst1 (pn8, x0 - svcntd () * 2, z0))
+
+/*
+** st1_u64_m16:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_m16, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0 - svcntd () * 16, z0),
+ svst1 (pn8, x0 - svcntd () * 16, z0))
+
+/*
+** st1_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_m18, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0 - svcntd () * 18, z0),
+ svst1 (pn8, x0 - svcntd () * 18, z0))
+
+/*
+** st1_u64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_z17, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_u64_z22:
+** st1d {z22\.d(?: - |, )z23\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_z22, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_u64_z28:
+** st1d {z28\.d(?: - |, )z29\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_z28, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_u64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_pn0, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_u64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_pn7, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_u64_pn15:
+** st1d {z0\.d(?: - |, )z1\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_pn15, svuint64x2_t, uint64_t,
+ svst1_u64_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_u64_0:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_0, svuint64x2_t, uint64_t,
+ svst1_vnum_u64_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u64_1:
+** incb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_1, svuint64x2_t, uint64_t,
+ svst1_vnum_u64_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_u64_2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_2, svuint64x2_t, uint64_t,
+ svst1_vnum_u64_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_u64_14:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_14, svuint64x2_t, uint64_t,
+ svst1_vnum_u64_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u64_16:
+** incb x0, all, mul #16
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_16, svuint64x2_t, uint64_t,
+ svst1_vnum_u64_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u64_m1:
+** decb x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_m1, svuint64x2_t, uint64_t,
+ svst1_vnum_u64_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_u64_m2:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_m2, svuint64x2_t, uint64_t,
+ svst1_vnum_u64_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_u64_m16:
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_m16, svuint64x2_t, uint64_t,
+ svst1_vnum_u64_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_m18, svuint64x2_t, uint64_t,
+ svst1_vnum_u64_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_x1, svuint64x2_t, uint64_t,
+ svst1_vnum_u64_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u64_x4.c
new file mode 100644
index 0000000..829a5b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_u64_base:
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_base, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_u64_index:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_index, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u64_1:
+** incb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_1, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 + svcntd (), z0),
+ svst1 (pn8, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u64_2:
+** incb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_2, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 + svcntd () * 2, z0),
+ svst1 (pn8, x0 + svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u64_3:
+** incb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_3, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 + svcntd () * 3, z0),
+ svst1 (pn8, x0 + svcntd () * 3, z0))
+
+/*
+** st1_u64_4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_4, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 + svcntd () * 4, z0),
+ svst1 (pn8, x0 + svcntd () * 4, z0))
+
+/*
+** st1_u64_28:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_28, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 + svcntd () * 28, z0),
+ svst1 (pn8, x0 + svcntd () * 28, z0))
+
+/*
+** st1_u64_32:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_32, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 + svcntd () * 32, z0),
+ svst1 (pn8, x0 + svcntd () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u64_m1:
+** decb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_m1, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 - svcntd (), z0),
+ svst1 (pn8, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u64_m2:
+** decb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_m2, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 - svcntd () * 2, z0),
+ svst1 (pn8, x0 - svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u64_m3:
+** decb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_m3, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 - svcntd () * 3, z0),
+ svst1 (pn8, x0 - svcntd () * 3, z0))
+
+/*
+** st1_u64_m4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_m4, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 - svcntd () * 4, z0),
+ svst1 (pn8, x0 - svcntd () * 4, z0))
+
+/*
+** st1_u64_m32:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_m32, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 - svcntd () * 32, z0),
+ svst1 (pn8, x0 - svcntd () * 32, z0))
+
+/*
+** st1_u64_m36:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_m36, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0 - svcntd () * 36, z0),
+ svst1 (pn8, x0 - svcntd () * 36, z0))
+
+/*
+** st1_u64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_z17, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_u64_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_z22, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_u64_z28:
+** st1d {z28\.d - z31\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_z28, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_u64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_pn0, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_u64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_pn7, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_u64_pn15:
+** st1d {z0\.d - z3\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u64_pn15, svuint64x4_t, uint64_t,
+ svst1_u64_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_u64_0:
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_0, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u64_1:
+** incb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_1, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u64_2:
+** incb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_2, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u64_3:
+** incb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_3, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_u64_4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_4, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_u64_28:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_28, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_u64_32:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_32, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u64_m1:
+** decb x0
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_m1, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u64_m2:
+** decb x0, all, mul #2
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_m2, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u64_m3:
+** decb x0, all, mul #3
+** st1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_m3, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_u64_m4:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_m4, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_u64_m32:
+** st1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_m32, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_u64_m36:
+** [^{]*
+** st1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_m36, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1d {z0\.d - z3\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1d {z0\.d - z3\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u64_x1, svuint64x4_t, uint64_t,
+ svst1_vnum_u64_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u8_x2.c
new file mode 100644
index 0000000..74dae18
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u8_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_u8_base:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_base, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_u8_index:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, x1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_index, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u8_1:
+** incb x0
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_1, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0 + svcntb (), z0),
+ svst1 (pn8, x0 + svcntb (), z0))
+
+/*
+** st1_u8_2:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_2, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0 + svcntb () * 2, z0),
+ svst1 (pn8, x0 + svcntb () * 2, z0))
+
+/*
+** st1_u8_14:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_14, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0 + svcntb () * 14, z0),
+ svst1 (pn8, x0 + svcntb () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u8_16:
+** incb x0, all, mul #16
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_16, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0 + svcntb () * 16, z0),
+ svst1 (pn8, x0 + svcntb () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u8_m1:
+** decb x0
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_m1, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0 - svcntb (), z0),
+ svst1 (pn8, x0 - svcntb (), z0))
+
+/*
+** st1_u8_m2:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_m2, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0 - svcntb () * 2, z0),
+ svst1 (pn8, x0 - svcntb () * 2, z0))
+
+/*
+** st1_u8_m16:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_m16, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0 - svcntb () * 16, z0),
+ svst1 (pn8, x0 - svcntb () * 16, z0))
+
+/*
+** st1_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_m18, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0 - svcntb () * 18, z0),
+ svst1 (pn8, x0 - svcntb () * 18, z0))
+
+/*
+** st1_u8_z17:
+** mov [^\n]+
+** mov [^\n]+
+** st1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_z17, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_u8_z22:
+** st1b {z22\.b(?: - |, )z23\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_z22, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_u8_z28:
+** st1b {z28\.b(?: - |, )z29\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_z28, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_u8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1b {z0\.b(?: - |, )z1\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_pn0, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_u8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1b {z0\.b(?: - |, )z1\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_pn7, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_u8_pn15:
+** st1b {z0\.b(?: - |, )z1\.b}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_pn15, svuint8x2_t, uint8_t,
+ svst1_u8_x2 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_u8_0:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_0, svuint8x2_t, uint8_t,
+ svst1_vnum_u8_x2 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u8_1:
+** incb x0
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_1, svuint8x2_t, uint8_t,
+ svst1_vnum_u8_x2 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/*
+** st1_vnum_u8_2:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_2, svuint8x2_t, uint8_t,
+ svst1_vnum_u8_x2 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/*
+** st1_vnum_u8_14:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_14, svuint8x2_t, uint8_t,
+ svst1_vnum_u8_x2 (pn8, x0, 14, z0),
+ svst1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u8_16:
+** incb x0, all, mul #16
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_16, svuint8x2_t, uint8_t,
+ svst1_vnum_u8_x2 (pn8, x0, 16, z0),
+ svst1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u8_m1:
+** decb x0
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_m1, svuint8x2_t, uint8_t,
+ svst1_vnum_u8_x2 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/*
+** st1_vnum_u8_m2:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_m2, svuint8x2_t, uint8_t,
+ svst1_vnum_u8_x2 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/*
+** st1_vnum_u8_m16:
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_m16, svuint8x2_t, uint8_t,
+ svst1_vnum_u8_x2 (pn8, x0, -16, z0),
+ svst1_vnum (pn8, x0, -16, z0))
+
+/*
+** st1_vnum_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_m18, svuint8x2_t, uint8_t,
+ svst1_vnum_u8_x2 (pn8, x0, -18, z0),
+ svst1_vnum (pn8, x0, -18, z0))
+
+/*
+** st1_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_x1, svuint8x2_t, uint8_t,
+ svst1_vnum_u8_x2 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u8_x4.c
new file mode 100644
index 0000000..7d22d18
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/st1_u8_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** st1_u8_base:
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_base, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0, z0),
+ svst1 (pn8, x0, z0))
+
+/*
+** st1_u8_index:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, x1\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_index, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 + x1, z0),
+ svst1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u8_1:
+** incb x0
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_1, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 + svcntb (), z0),
+ svst1 (pn8, x0 + svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u8_2:
+** incb x0, all, mul #2
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_2, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 + svcntb () * 2, z0),
+ svst1 (pn8, x0 + svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u8_3:
+** incb x0, all, mul #3
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_3, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 + svcntb () * 3, z0),
+ svst1 (pn8, x0 + svcntb () * 3, z0))
+
+/*
+** st1_u8_4:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_4, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 + svcntb () * 4, z0),
+ svst1 (pn8, x0 + svcntb () * 4, z0))
+
+/*
+** st1_u8_28:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_28, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 + svcntb () * 28, z0),
+ svst1 (pn8, x0 + svcntb () * 28, z0))
+
+/*
+** st1_u8_32:
+** [^{]*
+** st1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_32, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 + svcntb () * 32, z0),
+ svst1 (pn8, x0 + svcntb () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u8_m1:
+** decb x0
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_m1, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 - svcntb (), z0),
+ svst1 (pn8, x0 - svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u8_m2:
+** decb x0, all, mul #2
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_m2, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 - svcntb () * 2, z0),
+ svst1 (pn8, x0 - svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_u8_m3:
+** decb x0, all, mul #3
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_m3, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 - svcntb () * 3, z0),
+ svst1 (pn8, x0 - svcntb () * 3, z0))
+
+/*
+** st1_u8_m4:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_m4, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 - svcntb () * 4, z0),
+ svst1 (pn8, x0 - svcntb () * 4, z0))
+
+/*
+** st1_u8_m32:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_m32, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 - svcntb () * 32, z0),
+ svst1 (pn8, x0 - svcntb () * 32, z0))
+
+/*
+** st1_u8_m36:
+** [^{]*
+** st1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_m36, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0 - svcntb () * 36, z0),
+ svst1 (pn8, x0 - svcntb () * 36, z0))
+
+/*
+** st1_u8_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_z17, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0, z17),
+ svst1 (pn8, x0, z17))
+
+/*
+** st1_u8_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** st1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_z22, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0, z22),
+ svst1 (pn8, x0, z22))
+
+/*
+** st1_u8_z28:
+** st1b {z28\.b - z31\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_z28, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn8, x0, z28),
+ svst1 (pn8, x0, z28))
+
+/*
+** st1_u8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** st1b {z0\.b - z3\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_pn0, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn0, x0, z0),
+ svst1 (pn0, x0, z0))
+
+/*
+** st1_u8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** st1b {z0\.b - z3\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_pn7, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn7, x0, z0),
+ svst1 (pn7, x0, z0))
+
+/*
+** st1_u8_pn15:
+** st1b {z0\.b - z3\.b}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_u8_pn15, svuint8x4_t, uint8_t,
+ svst1_u8_x4 (pn15, x0, z0),
+ svst1 (pn15, x0, z0))
+
+/*
+** st1_vnum_u8_0:
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_0, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, 0, z0),
+ svst1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u8_1:
+** incb x0
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_1, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, 1, z0),
+ svst1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u8_2:
+** incb x0, all, mul #2
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_2, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, 2, z0),
+ svst1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u8_3:
+** incb x0, all, mul #3
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_3, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, 3, z0),
+ svst1_vnum (pn8, x0, 3, z0))
+
+/*
+** st1_vnum_u8_4:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_4, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, 4, z0),
+ svst1_vnum (pn8, x0, 4, z0))
+
+/*
+** st1_vnum_u8_28:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_28, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, 28, z0),
+ svst1_vnum (pn8, x0, 28, z0))
+
+/*
+** st1_vnum_u8_32:
+** [^{]*
+** st1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_32, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, 32, z0),
+ svst1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u8_m1:
+** decb x0
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_m1, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, -1, z0),
+ svst1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u8_m2:
+** decb x0, all, mul #2
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_m2, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, -2, z0),
+ svst1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1_vnum_u8_m3:
+** decb x0, all, mul #3
+** st1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_m3, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, -3, z0),
+ svst1_vnum (pn8, x0, -3, z0))
+
+/*
+** st1_vnum_u8_m4:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_m4, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, -4, z0),
+ svst1_vnum (pn8, x0, -4, z0))
+
+/*
+** st1_vnum_u8_m32:
+** st1b {z0\.b - z3\.b}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_m32, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, -32, z0),
+ svst1_vnum (pn8, x0, -32, z0))
+
+/*
+** st1_vnum_u8_m36:
+** [^{]*
+** st1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_m36, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, -36, z0),
+ svst1_vnum (pn8, x0, -36, z0))
+
+/*
+** st1_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st1b {z0\.b - z3\.b}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st1b {z0\.b - z3\.b}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (st1_vnum_u8_x1, svuint8x4_t, uint8_t,
+ svst1_vnum_u8_x4 (pn8, x0, x1, z0),
+ svst1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_bf16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_bf16_x2.c
new file mode 100644
index 0000000..d2a3718
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_bf16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_bf16_base:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_base, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_bf16_index:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_index, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_bf16_1:
+** incb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_1, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0 + svcnth (), z0),
+ svstnt1 (pn8, x0 + svcnth (), z0))
+
+/*
+** stnt1_bf16_2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_2, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0 + svcnth () * 2, z0),
+ svstnt1 (pn8, x0 + svcnth () * 2, z0))
+
+/*
+** stnt1_bf16_14:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_14, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0 + svcnth () * 14, z0),
+ svstnt1 (pn8, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_bf16_16:
+** incb x0, all, mul #16
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_16, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0 + svcnth () * 16, z0),
+ svstnt1 (pn8, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_bf16_m1:
+** decb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0 - svcnth (), z0),
+ svstnt1 (pn8, x0 - svcnth (), z0))
+
+/*
+** stnt1_bf16_m2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0 - svcnth () * 2, z0),
+ svstnt1 (pn8, x0 - svcnth () * 2, z0))
+
+/*
+** stnt1_bf16_m16:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0 - svcnth () * 16, z0),
+ svstnt1 (pn8, x0 - svcnth () * 16, z0))
+
+/*
+** stnt1_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0 - svcnth () * 18, z0),
+ svstnt1 (pn8, x0 - svcnth () * 18, z0))
+
+/*
+** stnt1_bf16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_z17, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_bf16_z22:
+** stnt1h {z22\.h(?: - |, )z23\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_z22, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_bf16_z28:
+** stnt1h {z28\.h(?: - |, )z29\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_z28, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_bf16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_pn0, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_bf16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_pn7, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_bf16_pn15:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_pn15, svbfloat16x2_t, bfloat16_t,
+ svstnt1_bf16_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_bf16_0:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_0, svbfloat16x2_t, bfloat16_t,
+ svstnt1_vnum_bf16_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_bf16_1:
+** incb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_1, svbfloat16x2_t, bfloat16_t,
+ svstnt1_vnum_bf16_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_bf16_2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_2, svbfloat16x2_t, bfloat16_t,
+ svstnt1_vnum_bf16_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_bf16_14:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_14, svbfloat16x2_t, bfloat16_t,
+ svstnt1_vnum_bf16_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_bf16_16:
+** incb x0, all, mul #16
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_16, svbfloat16x2_t, bfloat16_t,
+ svstnt1_vnum_bf16_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_bf16_m1:
+** decb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ svstnt1_vnum_bf16_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_bf16_m2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ svstnt1_vnum_bf16_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_bf16_m16:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ svstnt1_vnum_bf16_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ svstnt1_vnum_bf16_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_x1, svbfloat16x2_t, bfloat16_t,
+ svstnt1_vnum_bf16_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_bf16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_bf16_x4.c
new file mode 100644
index 0000000..4db1e8f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_bf16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_bf16_base:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_base, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_bf16_index:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_index, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_bf16_1:
+** incb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_1, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 + svcnth (), z0),
+ svstnt1 (pn8, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_bf16_2:
+** incb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_2, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 + svcnth () * 2, z0),
+ svstnt1 (pn8, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_bf16_3:
+** incb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_3, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 + svcnth () * 3, z0),
+ svstnt1 (pn8, x0 + svcnth () * 3, z0))
+
+/*
+** stnt1_bf16_4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_4, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 + svcnth () * 4, z0),
+ svstnt1 (pn8, x0 + svcnth () * 4, z0))
+
+/*
+** stnt1_bf16_28:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_28, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 + svcnth () * 28, z0),
+ svstnt1 (pn8, x0 + svcnth () * 28, z0))
+
+/*
+** stnt1_bf16_32:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_32, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 + svcnth () * 32, z0),
+ svstnt1 (pn8, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_bf16_m1:
+** decb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 - svcnth (), z0),
+ svstnt1 (pn8, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_bf16_m2:
+** decb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 - svcnth () * 2, z0),
+ svstnt1 (pn8, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_bf16_m3:
+** decb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 - svcnth () * 3, z0),
+ svstnt1 (pn8, x0 - svcnth () * 3, z0))
+
+/*
+** stnt1_bf16_m4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 - svcnth () * 4, z0),
+ svstnt1 (pn8, x0 - svcnth () * 4, z0))
+
+/*
+** stnt1_bf16_m32:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 - svcnth () * 32, z0),
+ svstnt1 (pn8, x0 - svcnth () * 32, z0))
+
+/*
+** stnt1_bf16_m36:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0 - svcnth () * 36, z0),
+ svstnt1 (pn8, x0 - svcnth () * 36, z0))
+
+/*
+** stnt1_bf16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_z17, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_bf16_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_z22, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_bf16_z28:
+** stnt1h {z28\.h - z31\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_z28, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_bf16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_pn0, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_bf16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_pn7, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_bf16_pn15:
+** stnt1h {z0\.h - z3\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_bf16_pn15, svbfloat16x4_t, bfloat16_t,
+ svstnt1_bf16_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_bf16_0:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_0, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_bf16_1:
+** incb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_1, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_bf16_2:
+** incb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_2, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_bf16_3:
+** incb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_3, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_bf16_4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_4, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_bf16_28:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_28, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_bf16_32:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_32, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_bf16_m1:
+** decb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_bf16_m2:
+** decb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_bf16_m3:
+** decb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_bf16_m4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_bf16_m32:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_bf16_m36:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_bf16_x1, svbfloat16x4_t, bfloat16_t,
+ svstnt1_vnum_bf16_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f16_x2.c
new file mode 100644
index 0000000..c3bfab7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_f16_base:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_base, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_f16_index:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_index, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f16_1:
+** incb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_1, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0 + svcnth (), z0),
+ svstnt1 (pn8, x0 + svcnth (), z0))
+
+/*
+** stnt1_f16_2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_2, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0 + svcnth () * 2, z0),
+ svstnt1 (pn8, x0 + svcnth () * 2, z0))
+
+/*
+** stnt1_f16_14:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_14, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0 + svcnth () * 14, z0),
+ svstnt1 (pn8, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f16_16:
+** incb x0, all, mul #16
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_16, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0 + svcnth () * 16, z0),
+ svstnt1 (pn8, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f16_m1:
+** decb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_m1, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0 - svcnth (), z0),
+ svstnt1 (pn8, x0 - svcnth (), z0))
+
+/*
+** stnt1_f16_m2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_m2, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0 - svcnth () * 2, z0),
+ svstnt1 (pn8, x0 - svcnth () * 2, z0))
+
+/*
+** stnt1_f16_m16:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_m16, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0 - svcnth () * 16, z0),
+ svstnt1 (pn8, x0 - svcnth () * 16, z0))
+
+/*
+** stnt1_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_m18, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0 - svcnth () * 18, z0),
+ svstnt1 (pn8, x0 - svcnth () * 18, z0))
+
+/*
+** stnt1_f16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_z17, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_f16_z22:
+** stnt1h {z22\.h(?: - |, )z23\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_z22, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_f16_z28:
+** stnt1h {z28\.h(?: - |, )z29\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_z28, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_f16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_pn0, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_f16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_pn7, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_f16_pn15:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_pn15, svfloat16x2_t, float16_t,
+ svstnt1_f16_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_f16_0:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_0, svfloat16x2_t, float16_t,
+ svstnt1_vnum_f16_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f16_1:
+** incb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_1, svfloat16x2_t, float16_t,
+ svstnt1_vnum_f16_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_f16_2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_2, svfloat16x2_t, float16_t,
+ svstnt1_vnum_f16_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_f16_14:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_14, svfloat16x2_t, float16_t,
+ svstnt1_vnum_f16_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f16_16:
+** incb x0, all, mul #16
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_16, svfloat16x2_t, float16_t,
+ svstnt1_vnum_f16_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f16_m1:
+** decb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_m1, svfloat16x2_t, float16_t,
+ svstnt1_vnum_f16_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_f16_m2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_m2, svfloat16x2_t, float16_t,
+ svstnt1_vnum_f16_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_f16_m16:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_m16, svfloat16x2_t, float16_t,
+ svstnt1_vnum_f16_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_m18, svfloat16x2_t, float16_t,
+ svstnt1_vnum_f16_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_x1, svfloat16x2_t, float16_t,
+ svstnt1_vnum_f16_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f16_x4.c
new file mode 100644
index 0000000..6ccdece
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_f16_base:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_base, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_f16_index:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_index, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f16_1:
+** incb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_1, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 + svcnth (), z0),
+ svstnt1 (pn8, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f16_2:
+** incb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_2, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 + svcnth () * 2, z0),
+ svstnt1 (pn8, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f16_3:
+** incb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_3, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 + svcnth () * 3, z0),
+ svstnt1 (pn8, x0 + svcnth () * 3, z0))
+
+/*
+** stnt1_f16_4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_4, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 + svcnth () * 4, z0),
+ svstnt1 (pn8, x0 + svcnth () * 4, z0))
+
+/*
+** stnt1_f16_28:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_28, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 + svcnth () * 28, z0),
+ svstnt1 (pn8, x0 + svcnth () * 28, z0))
+
+/*
+** stnt1_f16_32:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_32, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 + svcnth () * 32, z0),
+ svstnt1 (pn8, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f16_m1:
+** decb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_m1, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 - svcnth (), z0),
+ svstnt1 (pn8, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f16_m2:
+** decb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_m2, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 - svcnth () * 2, z0),
+ svstnt1 (pn8, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f16_m3:
+** decb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_m3, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 - svcnth () * 3, z0),
+ svstnt1 (pn8, x0 - svcnth () * 3, z0))
+
+/*
+** stnt1_f16_m4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_m4, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 - svcnth () * 4, z0),
+ svstnt1 (pn8, x0 - svcnth () * 4, z0))
+
+/*
+** stnt1_f16_m32:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_m32, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 - svcnth () * 32, z0),
+ svstnt1 (pn8, x0 - svcnth () * 32, z0))
+
+/*
+** stnt1_f16_m36:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_m36, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0 - svcnth () * 36, z0),
+ svstnt1 (pn8, x0 - svcnth () * 36, z0))
+
+/*
+** stnt1_f16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_z17, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_f16_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_z22, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_f16_z28:
+** stnt1h {z28\.h - z31\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_z28, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_f16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_pn0, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_f16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_pn7, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_f16_pn15:
+** stnt1h {z0\.h - z3\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f16_pn15, svfloat16x4_t, float16_t,
+ svstnt1_f16_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_f16_0:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_0, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f16_1:
+** incb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_1, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f16_2:
+** incb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_2, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f16_3:
+** incb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_3, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_f16_4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_4, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_f16_28:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_28, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_f16_32:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_32, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f16_m1:
+** decb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_m1, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f16_m2:
+** decb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_m2, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f16_m3:
+** decb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_m3, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_f16_m4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_m4, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_f16_m32:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_m32, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_f16_m36:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_m36, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f16_x1, svfloat16x4_t, float16_t,
+ svstnt1_vnum_f16_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f32_x2.c
new file mode 100644
index 0000000..f9c5636
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_f32_base:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_base, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_f32_index:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_index, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f32_1:
+** incb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_1, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0 + svcntw (), z0),
+ svstnt1 (pn8, x0 + svcntw (), z0))
+
+/*
+** stnt1_f32_2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_2, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0 + svcntw () * 2, z0),
+ svstnt1 (pn8, x0 + svcntw () * 2, z0))
+
+/*
+** stnt1_f32_14:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_14, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0 + svcntw () * 14, z0),
+ svstnt1 (pn8, x0 + svcntw () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f32_16:
+** incb x0, all, mul #16
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_16, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0 + svcntw () * 16, z0),
+ svstnt1 (pn8, x0 + svcntw () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f32_m1:
+** decb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_m1, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0 - svcntw (), z0),
+ svstnt1 (pn8, x0 - svcntw (), z0))
+
+/*
+** stnt1_f32_m2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_m2, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0 - svcntw () * 2, z0),
+ svstnt1 (pn8, x0 - svcntw () * 2, z0))
+
+/*
+** stnt1_f32_m16:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_m16, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0 - svcntw () * 16, z0),
+ svstnt1 (pn8, x0 - svcntw () * 16, z0))
+
+/*
+** stnt1_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_m18, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0 - svcntw () * 18, z0),
+ svstnt1 (pn8, x0 - svcntw () * 18, z0))
+
+/*
+** stnt1_f32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_z17, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_f32_z22:
+** stnt1w {z22\.s(?: - |, )z23\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_z22, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_f32_z28:
+** stnt1w {z28\.s(?: - |, )z29\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_z28, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_f32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_pn0, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_f32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_pn7, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_f32_pn15:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_pn15, svfloat32x2_t, float32_t,
+ svstnt1_f32_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_f32_0:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_0, svfloat32x2_t, float32_t,
+ svstnt1_vnum_f32_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f32_1:
+** incb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_1, svfloat32x2_t, float32_t,
+ svstnt1_vnum_f32_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_f32_2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_2, svfloat32x2_t, float32_t,
+ svstnt1_vnum_f32_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_f32_14:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_14, svfloat32x2_t, float32_t,
+ svstnt1_vnum_f32_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f32_16:
+** incb x0, all, mul #16
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_16, svfloat32x2_t, float32_t,
+ svstnt1_vnum_f32_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f32_m1:
+** decb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_m1, svfloat32x2_t, float32_t,
+ svstnt1_vnum_f32_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_f32_m2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_m2, svfloat32x2_t, float32_t,
+ svstnt1_vnum_f32_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_f32_m16:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_m16, svfloat32x2_t, float32_t,
+ svstnt1_vnum_f32_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_m18, svfloat32x2_t, float32_t,
+ svstnt1_vnum_f32_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_x1, svfloat32x2_t, float32_t,
+ svstnt1_vnum_f32_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f32_x4.c
new file mode 100644
index 0000000..30a5ce4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_f32_base:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_base, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_f32_index:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_index, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f32_1:
+** incb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_1, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 + svcntw (), z0),
+ svstnt1 (pn8, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f32_2:
+** incb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_2, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 + svcntw () * 2, z0),
+ svstnt1 (pn8, x0 + svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f32_3:
+** incb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_3, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 + svcntw () * 3, z0),
+ svstnt1 (pn8, x0 + svcntw () * 3, z0))
+
+/*
+** stnt1_f32_4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_4, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 + svcntw () * 4, z0),
+ svstnt1 (pn8, x0 + svcntw () * 4, z0))
+
+/*
+** stnt1_f32_28:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_28, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 + svcntw () * 28, z0),
+ svstnt1 (pn8, x0 + svcntw () * 28, z0))
+
+/*
+** stnt1_f32_32:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_32, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 + svcntw () * 32, z0),
+ svstnt1 (pn8, x0 + svcntw () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f32_m1:
+** decb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_m1, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 - svcntw (), z0),
+ svstnt1 (pn8, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f32_m2:
+** decb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_m2, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 - svcntw () * 2, z0),
+ svstnt1 (pn8, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f32_m3:
+** decb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_m3, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 - svcntw () * 3, z0),
+ svstnt1 (pn8, x0 - svcntw () * 3, z0))
+
+/*
+** stnt1_f32_m4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_m4, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 - svcntw () * 4, z0),
+ svstnt1 (pn8, x0 - svcntw () * 4, z0))
+
+/*
+** stnt1_f32_m32:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_m32, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 - svcntw () * 32, z0),
+ svstnt1 (pn8, x0 - svcntw () * 32, z0))
+
+/*
+** stnt1_f32_m36:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_m36, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0 - svcntw () * 36, z0),
+ svstnt1 (pn8, x0 - svcntw () * 36, z0))
+
+/*
+** stnt1_f32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_z17, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_f32_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_z22, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_f32_z28:
+** stnt1w {z28\.s - z31\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_z28, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_f32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_pn0, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_f32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_pn7, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_f32_pn15:
+** stnt1w {z0\.s - z3\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f32_pn15, svfloat32x4_t, float32_t,
+ svstnt1_f32_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_f32_0:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_0, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f32_1:
+** incb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_1, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f32_2:
+** incb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_2, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f32_3:
+** incb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_3, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_f32_4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_4, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_f32_28:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_28, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_f32_32:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_32, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f32_m1:
+** decb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_m1, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f32_m2:
+** decb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_m2, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f32_m3:
+** decb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_m3, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_f32_m4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_m4, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_f32_m32:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_m32, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_f32_m36:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_m36, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f32_x1, svfloat32x4_t, float32_t,
+ svstnt1_vnum_f32_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f64_x2.c
new file mode 100644
index 0000000..a587502
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_f64_base:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_base, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_f64_index:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_index, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f64_1:
+** incb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_1, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0 + svcntd (), z0),
+ svstnt1 (pn8, x0 + svcntd (), z0))
+
+/*
+** stnt1_f64_2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_2, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0 + svcntd () * 2, z0),
+ svstnt1 (pn8, x0 + svcntd () * 2, z0))
+
+/*
+** stnt1_f64_14:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_14, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0 + svcntd () * 14, z0),
+ svstnt1 (pn8, x0 + svcntd () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f64_16:
+** incb x0, all, mul #16
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_16, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0 + svcntd () * 16, z0),
+ svstnt1 (pn8, x0 + svcntd () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f64_m1:
+** decb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_m1, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0 - svcntd (), z0),
+ svstnt1 (pn8, x0 - svcntd (), z0))
+
+/*
+** stnt1_f64_m2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_m2, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0 - svcntd () * 2, z0),
+ svstnt1 (pn8, x0 - svcntd () * 2, z0))
+
+/*
+** stnt1_f64_m16:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_m16, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0 - svcntd () * 16, z0),
+ svstnt1 (pn8, x0 - svcntd () * 16, z0))
+
+/*
+** stnt1_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_m18, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0 - svcntd () * 18, z0),
+ svstnt1 (pn8, x0 - svcntd () * 18, z0))
+
+/*
+** stnt1_f64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_z17, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_f64_z22:
+** stnt1d {z22\.d(?: - |, )z23\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_z22, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_f64_z28:
+** stnt1d {z28\.d(?: - |, )z29\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_z28, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_f64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_pn0, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_f64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_pn7, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_f64_pn15:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_pn15, svfloat64x2_t, float64_t,
+ svstnt1_f64_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_f64_0:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_0, svfloat64x2_t, float64_t,
+ svstnt1_vnum_f64_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f64_1:
+** incb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_1, svfloat64x2_t, float64_t,
+ svstnt1_vnum_f64_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_f64_2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_2, svfloat64x2_t, float64_t,
+ svstnt1_vnum_f64_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_f64_14:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_14, svfloat64x2_t, float64_t,
+ svstnt1_vnum_f64_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f64_16:
+** incb x0, all, mul #16
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_16, svfloat64x2_t, float64_t,
+ svstnt1_vnum_f64_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f64_m1:
+** decb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_m1, svfloat64x2_t, float64_t,
+ svstnt1_vnum_f64_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_f64_m2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_m2, svfloat64x2_t, float64_t,
+ svstnt1_vnum_f64_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_f64_m16:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_m16, svfloat64x2_t, float64_t,
+ svstnt1_vnum_f64_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_m18, svfloat64x2_t, float64_t,
+ svstnt1_vnum_f64_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_x1, svfloat64x2_t, float64_t,
+ svstnt1_vnum_f64_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f64_x4.c
new file mode 100644
index 0000000..42bfc51
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_f64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_f64_base:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_base, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_f64_index:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_index, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f64_1:
+** incb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_1, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 + svcntd (), z0),
+ svstnt1 (pn8, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f64_2:
+** incb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_2, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 + svcntd () * 2, z0),
+ svstnt1 (pn8, x0 + svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f64_3:
+** incb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_3, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 + svcntd () * 3, z0),
+ svstnt1 (pn8, x0 + svcntd () * 3, z0))
+
+/*
+** stnt1_f64_4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_4, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 + svcntd () * 4, z0),
+ svstnt1 (pn8, x0 + svcntd () * 4, z0))
+
+/*
+** stnt1_f64_28:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_28, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 + svcntd () * 28, z0),
+ svstnt1 (pn8, x0 + svcntd () * 28, z0))
+
+/*
+** stnt1_f64_32:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_32, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 + svcntd () * 32, z0),
+ svstnt1 (pn8, x0 + svcntd () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f64_m1:
+** decb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_m1, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 - svcntd (), z0),
+ svstnt1 (pn8, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f64_m2:
+** decb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_m2, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 - svcntd () * 2, z0),
+ svstnt1 (pn8, x0 - svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_f64_m3:
+** decb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_m3, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 - svcntd () * 3, z0),
+ svstnt1 (pn8, x0 - svcntd () * 3, z0))
+
+/*
+** stnt1_f64_m4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_m4, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 - svcntd () * 4, z0),
+ svstnt1 (pn8, x0 - svcntd () * 4, z0))
+
+/*
+** stnt1_f64_m32:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_m32, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 - svcntd () * 32, z0),
+ svstnt1 (pn8, x0 - svcntd () * 32, z0))
+
+/*
+** stnt1_f64_m36:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_m36, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0 - svcntd () * 36, z0),
+ svstnt1 (pn8, x0 - svcntd () * 36, z0))
+
+/*
+** stnt1_f64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_z17, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_f64_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_z22, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_f64_z28:
+** stnt1d {z28\.d - z31\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_z28, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_f64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_pn0, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_f64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_pn7, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_f64_pn15:
+** stnt1d {z0\.d - z3\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_f64_pn15, svfloat64x4_t, float64_t,
+ svstnt1_f64_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_f64_0:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_0, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f64_1:
+** incb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_1, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f64_2:
+** incb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_2, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f64_3:
+** incb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_3, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_f64_4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_4, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_f64_28:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_28, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_f64_32:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_32, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f64_m1:
+** decb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_m1, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f64_m2:
+** decb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_m2, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_f64_m3:
+** decb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_m3, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_f64_m4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_m4, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_f64_m32:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_m32, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_f64_m36:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_m36, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_f64_x1, svfloat64x4_t, float64_t,
+ svstnt1_vnum_f64_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s16_x2.c
new file mode 100644
index 0000000..dcb8f89
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_s16_base:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_base, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_s16_index:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_index, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s16_1:
+** incb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_1, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0 + svcnth (), z0),
+ svstnt1 (pn8, x0 + svcnth (), z0))
+
+/*
+** stnt1_s16_2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_2, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0 + svcnth () * 2, z0),
+ svstnt1 (pn8, x0 + svcnth () * 2, z0))
+
+/*
+** stnt1_s16_14:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_14, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0 + svcnth () * 14, z0),
+ svstnt1 (pn8, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s16_16:
+** incb x0, all, mul #16
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_16, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0 + svcnth () * 16, z0),
+ svstnt1 (pn8, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s16_m1:
+** decb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_m1, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0 - svcnth (), z0),
+ svstnt1 (pn8, x0 - svcnth (), z0))
+
+/*
+** stnt1_s16_m2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_m2, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0 - svcnth () * 2, z0),
+ svstnt1 (pn8, x0 - svcnth () * 2, z0))
+
+/*
+** stnt1_s16_m16:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_m16, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0 - svcnth () * 16, z0),
+ svstnt1 (pn8, x0 - svcnth () * 16, z0))
+
+/*
+** stnt1_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_m18, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0 - svcnth () * 18, z0),
+ svstnt1 (pn8, x0 - svcnth () * 18, z0))
+
+/*
+** stnt1_s16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_z17, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_s16_z22:
+** stnt1h {z22\.h(?: - |, )z23\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_z22, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_s16_z28:
+** stnt1h {z28\.h(?: - |, )z29\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_z28, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_s16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_pn0, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_s16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_pn7, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_s16_pn15:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_pn15, svint16x2_t, int16_t,
+ svstnt1_s16_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_s16_0:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_0, svint16x2_t, int16_t,
+ svstnt1_vnum_s16_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s16_1:
+** incb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_1, svint16x2_t, int16_t,
+ svstnt1_vnum_s16_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_s16_2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_2, svint16x2_t, int16_t,
+ svstnt1_vnum_s16_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_s16_14:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_14, svint16x2_t, int16_t,
+ svstnt1_vnum_s16_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s16_16:
+** incb x0, all, mul #16
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_16, svint16x2_t, int16_t,
+ svstnt1_vnum_s16_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s16_m1:
+** decb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_m1, svint16x2_t, int16_t,
+ svstnt1_vnum_s16_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_s16_m2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_m2, svint16x2_t, int16_t,
+ svstnt1_vnum_s16_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_s16_m16:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_m16, svint16x2_t, int16_t,
+ svstnt1_vnum_s16_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_m18, svint16x2_t, int16_t,
+ svstnt1_vnum_s16_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_x1, svint16x2_t, int16_t,
+ svstnt1_vnum_s16_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s16_x4.c
new file mode 100644
index 0000000..87290a6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_s16_base:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_base, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_s16_index:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_index, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s16_1:
+** incb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_1, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 + svcnth (), z0),
+ svstnt1 (pn8, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s16_2:
+** incb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_2, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 + svcnth () * 2, z0),
+ svstnt1 (pn8, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s16_3:
+** incb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_3, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 + svcnth () * 3, z0),
+ svstnt1 (pn8, x0 + svcnth () * 3, z0))
+
+/*
+** stnt1_s16_4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_4, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 + svcnth () * 4, z0),
+ svstnt1 (pn8, x0 + svcnth () * 4, z0))
+
+/*
+** stnt1_s16_28:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_28, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 + svcnth () * 28, z0),
+ svstnt1 (pn8, x0 + svcnth () * 28, z0))
+
+/*
+** stnt1_s16_32:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_32, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 + svcnth () * 32, z0),
+ svstnt1 (pn8, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s16_m1:
+** decb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_m1, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 - svcnth (), z0),
+ svstnt1 (pn8, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s16_m2:
+** decb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_m2, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 - svcnth () * 2, z0),
+ svstnt1 (pn8, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s16_m3:
+** decb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_m3, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 - svcnth () * 3, z0),
+ svstnt1 (pn8, x0 - svcnth () * 3, z0))
+
+/*
+** stnt1_s16_m4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_m4, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 - svcnth () * 4, z0),
+ svstnt1 (pn8, x0 - svcnth () * 4, z0))
+
+/*
+** stnt1_s16_m32:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_m32, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 - svcnth () * 32, z0),
+ svstnt1 (pn8, x0 - svcnth () * 32, z0))
+
+/*
+** stnt1_s16_m36:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_m36, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0 - svcnth () * 36, z0),
+ svstnt1 (pn8, x0 - svcnth () * 36, z0))
+
+/*
+** stnt1_s16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_z17, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_s16_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_z22, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_s16_z28:
+** stnt1h {z28\.h - z31\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_z28, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_s16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_pn0, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_s16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_pn7, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_s16_pn15:
+** stnt1h {z0\.h - z3\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s16_pn15, svint16x4_t, int16_t,
+ svstnt1_s16_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_s16_0:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_0, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s16_1:
+** incb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_1, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s16_2:
+** incb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_2, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s16_3:
+** incb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_3, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_s16_4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_4, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_s16_28:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_28, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_s16_32:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_32, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s16_m1:
+** decb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_m1, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s16_m2:
+** decb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_m2, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s16_m3:
+** decb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_m3, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_s16_m4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_m4, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_s16_m32:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_m32, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_s16_m36:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_m36, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s16_x1, svint16x4_t, int16_t,
+ svstnt1_vnum_s16_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s32_x2.c
new file mode 100644
index 0000000..3c1a038
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_s32_base:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_base, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_s32_index:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_index, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s32_1:
+** incb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_1, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0 + svcntw (), z0),
+ svstnt1 (pn8, x0 + svcntw (), z0))
+
+/*
+** stnt1_s32_2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_2, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0 + svcntw () * 2, z0),
+ svstnt1 (pn8, x0 + svcntw () * 2, z0))
+
+/*
+** stnt1_s32_14:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_14, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0 + svcntw () * 14, z0),
+ svstnt1 (pn8, x0 + svcntw () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s32_16:
+** incb x0, all, mul #16
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_16, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0 + svcntw () * 16, z0),
+ svstnt1 (pn8, x0 + svcntw () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s32_m1:
+** decb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_m1, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0 - svcntw (), z0),
+ svstnt1 (pn8, x0 - svcntw (), z0))
+
+/*
+** stnt1_s32_m2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_m2, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0 - svcntw () * 2, z0),
+ svstnt1 (pn8, x0 - svcntw () * 2, z0))
+
+/*
+** stnt1_s32_m16:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_m16, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0 - svcntw () * 16, z0),
+ svstnt1 (pn8, x0 - svcntw () * 16, z0))
+
+/*
+** stnt1_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_m18, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0 - svcntw () * 18, z0),
+ svstnt1 (pn8, x0 - svcntw () * 18, z0))
+
+/*
+** stnt1_s32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_z17, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_s32_z22:
+** stnt1w {z22\.s(?: - |, )z23\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_z22, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_s32_z28:
+** stnt1w {z28\.s(?: - |, )z29\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_z28, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_s32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_pn0, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_s32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_pn7, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_s32_pn15:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_pn15, svint32x2_t, int32_t,
+ svstnt1_s32_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_s32_0:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_0, svint32x2_t, int32_t,
+ svstnt1_vnum_s32_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s32_1:
+** incb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_1, svint32x2_t, int32_t,
+ svstnt1_vnum_s32_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_s32_2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_2, svint32x2_t, int32_t,
+ svstnt1_vnum_s32_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_s32_14:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_14, svint32x2_t, int32_t,
+ svstnt1_vnum_s32_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s32_16:
+** incb x0, all, mul #16
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_16, svint32x2_t, int32_t,
+ svstnt1_vnum_s32_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s32_m1:
+** decb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_m1, svint32x2_t, int32_t,
+ svstnt1_vnum_s32_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_s32_m2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_m2, svint32x2_t, int32_t,
+ svstnt1_vnum_s32_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_s32_m16:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_m16, svint32x2_t, int32_t,
+ svstnt1_vnum_s32_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_m18, svint32x2_t, int32_t,
+ svstnt1_vnum_s32_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_x1, svint32x2_t, int32_t,
+ svstnt1_vnum_s32_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s32_x4.c
new file mode 100644
index 0000000..d9a08b0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_s32_base:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_base, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_s32_index:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_index, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s32_1:
+** incb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_1, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 + svcntw (), z0),
+ svstnt1 (pn8, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s32_2:
+** incb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_2, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 + svcntw () * 2, z0),
+ svstnt1 (pn8, x0 + svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s32_3:
+** incb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_3, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 + svcntw () * 3, z0),
+ svstnt1 (pn8, x0 + svcntw () * 3, z0))
+
+/*
+** stnt1_s32_4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_4, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 + svcntw () * 4, z0),
+ svstnt1 (pn8, x0 + svcntw () * 4, z0))
+
+/*
+** stnt1_s32_28:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_28, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 + svcntw () * 28, z0),
+ svstnt1 (pn8, x0 + svcntw () * 28, z0))
+
+/*
+** stnt1_s32_32:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_32, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 + svcntw () * 32, z0),
+ svstnt1 (pn8, x0 + svcntw () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s32_m1:
+** decb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_m1, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 - svcntw (), z0),
+ svstnt1 (pn8, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s32_m2:
+** decb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_m2, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 - svcntw () * 2, z0),
+ svstnt1 (pn8, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s32_m3:
+** decb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_m3, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 - svcntw () * 3, z0),
+ svstnt1 (pn8, x0 - svcntw () * 3, z0))
+
+/*
+** stnt1_s32_m4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_m4, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 - svcntw () * 4, z0),
+ svstnt1 (pn8, x0 - svcntw () * 4, z0))
+
+/*
+** stnt1_s32_m32:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_m32, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 - svcntw () * 32, z0),
+ svstnt1 (pn8, x0 - svcntw () * 32, z0))
+
+/*
+** stnt1_s32_m36:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_m36, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0 - svcntw () * 36, z0),
+ svstnt1 (pn8, x0 - svcntw () * 36, z0))
+
+/*
+** stnt1_s32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_z17, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_s32_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_z22, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_s32_z28:
+** stnt1w {z28\.s - z31\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_z28, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_s32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_pn0, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_s32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_pn7, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_s32_pn15:
+** stnt1w {z0\.s - z3\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s32_pn15, svint32x4_t, int32_t,
+ svstnt1_s32_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_s32_0:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_0, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s32_1:
+** incb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_1, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s32_2:
+** incb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_2, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s32_3:
+** incb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_3, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_s32_4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_4, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_s32_28:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_28, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_s32_32:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_32, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s32_m1:
+** decb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_m1, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s32_m2:
+** decb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_m2, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s32_m3:
+** decb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_m3, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_s32_m4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_m4, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_s32_m32:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_m32, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_s32_m36:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_m36, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s32_x1, svint32x4_t, int32_t,
+ svstnt1_vnum_s32_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s64_x2.c
new file mode 100644
index 0000000..79668a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_s64_base:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_base, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_s64_index:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_index, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s64_1:
+** incb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_1, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0 + svcntd (), z0),
+ svstnt1 (pn8, x0 + svcntd (), z0))
+
+/*
+** stnt1_s64_2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_2, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0 + svcntd () * 2, z0),
+ svstnt1 (pn8, x0 + svcntd () * 2, z0))
+
+/*
+** stnt1_s64_14:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_14, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0 + svcntd () * 14, z0),
+ svstnt1 (pn8, x0 + svcntd () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s64_16:
+** incb x0, all, mul #16
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_16, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0 + svcntd () * 16, z0),
+ svstnt1 (pn8, x0 + svcntd () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s64_m1:
+** decb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_m1, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0 - svcntd (), z0),
+ svstnt1 (pn8, x0 - svcntd (), z0))
+
+/*
+** stnt1_s64_m2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_m2, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0 - svcntd () * 2, z0),
+ svstnt1 (pn8, x0 - svcntd () * 2, z0))
+
+/*
+** stnt1_s64_m16:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_m16, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0 - svcntd () * 16, z0),
+ svstnt1 (pn8, x0 - svcntd () * 16, z0))
+
+/*
+** stnt1_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_m18, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0 - svcntd () * 18, z0),
+ svstnt1 (pn8, x0 - svcntd () * 18, z0))
+
+/*
+** stnt1_s64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_z17, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_s64_z22:
+** stnt1d {z22\.d(?: - |, )z23\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_z22, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_s64_z28:
+** stnt1d {z28\.d(?: - |, )z29\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_z28, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_s64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_pn0, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_s64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_pn7, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_s64_pn15:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_pn15, svint64x2_t, int64_t,
+ svstnt1_s64_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_s64_0:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_0, svint64x2_t, int64_t,
+ svstnt1_vnum_s64_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s64_1:
+** incb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_1, svint64x2_t, int64_t,
+ svstnt1_vnum_s64_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_s64_2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_2, svint64x2_t, int64_t,
+ svstnt1_vnum_s64_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_s64_14:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_14, svint64x2_t, int64_t,
+ svstnt1_vnum_s64_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s64_16:
+** incb x0, all, mul #16
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_16, svint64x2_t, int64_t,
+ svstnt1_vnum_s64_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s64_m1:
+** decb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_m1, svint64x2_t, int64_t,
+ svstnt1_vnum_s64_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_s64_m2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_m2, svint64x2_t, int64_t,
+ svstnt1_vnum_s64_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_s64_m16:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_m16, svint64x2_t, int64_t,
+ svstnt1_vnum_s64_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_m18, svint64x2_t, int64_t,
+ svstnt1_vnum_s64_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_x1, svint64x2_t, int64_t,
+ svstnt1_vnum_s64_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s64_x4.c
new file mode 100644
index 0000000..0e5f6b4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_s64_base:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_base, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_s64_index:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_index, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s64_1:
+** incb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_1, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 + svcntd (), z0),
+ svstnt1 (pn8, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s64_2:
+** incb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_2, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 + svcntd () * 2, z0),
+ svstnt1 (pn8, x0 + svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s64_3:
+** incb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_3, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 + svcntd () * 3, z0),
+ svstnt1 (pn8, x0 + svcntd () * 3, z0))
+
+/*
+** stnt1_s64_4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_4, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 + svcntd () * 4, z0),
+ svstnt1 (pn8, x0 + svcntd () * 4, z0))
+
+/*
+** stnt1_s64_28:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_28, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 + svcntd () * 28, z0),
+ svstnt1 (pn8, x0 + svcntd () * 28, z0))
+
+/*
+** stnt1_s64_32:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_32, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 + svcntd () * 32, z0),
+ svstnt1 (pn8, x0 + svcntd () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s64_m1:
+** decb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_m1, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 - svcntd (), z0),
+ svstnt1 (pn8, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s64_m2:
+** decb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_m2, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 - svcntd () * 2, z0),
+ svstnt1 (pn8, x0 - svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s64_m3:
+** decb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_m3, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 - svcntd () * 3, z0),
+ svstnt1 (pn8, x0 - svcntd () * 3, z0))
+
+/*
+** stnt1_s64_m4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_m4, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 - svcntd () * 4, z0),
+ svstnt1 (pn8, x0 - svcntd () * 4, z0))
+
+/*
+** stnt1_s64_m32:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_m32, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 - svcntd () * 32, z0),
+ svstnt1 (pn8, x0 - svcntd () * 32, z0))
+
+/*
+** stnt1_s64_m36:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_m36, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0 - svcntd () * 36, z0),
+ svstnt1 (pn8, x0 - svcntd () * 36, z0))
+
+/*
+** stnt1_s64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_z17, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_s64_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_z22, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_s64_z28:
+** stnt1d {z28\.d - z31\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_z28, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_s64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_pn0, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_s64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_pn7, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_s64_pn15:
+** stnt1d {z0\.d - z3\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s64_pn15, svint64x4_t, int64_t,
+ svstnt1_s64_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_s64_0:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_0, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s64_1:
+** incb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_1, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s64_2:
+** incb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_2, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s64_3:
+** incb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_3, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_s64_4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_4, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_s64_28:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_28, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_s64_32:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_32, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s64_m1:
+** decb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_m1, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s64_m2:
+** decb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_m2, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s64_m3:
+** decb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_m3, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_s64_m4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_m4, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_s64_m32:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_m32, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_s64_m36:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_m36, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s64_x1, svint64x4_t, int64_t,
+ svstnt1_vnum_s64_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s8_x2.c
new file mode 100644
index 0000000..5b243cd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s8_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_s8_base:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_base, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_s8_index:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, x1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_index, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s8_1:
+** incb x0
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_1, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0 + svcntb (), z0),
+ svstnt1 (pn8, x0 + svcntb (), z0))
+
+/*
+** stnt1_s8_2:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_2, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0 + svcntb () * 2, z0),
+ svstnt1 (pn8, x0 + svcntb () * 2, z0))
+
+/*
+** stnt1_s8_14:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_14, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0 + svcntb () * 14, z0),
+ svstnt1 (pn8, x0 + svcntb () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s8_16:
+** incb x0, all, mul #16
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_16, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0 + svcntb () * 16, z0),
+ svstnt1 (pn8, x0 + svcntb () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s8_m1:
+** decb x0
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_m1, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0 - svcntb (), z0),
+ svstnt1 (pn8, x0 - svcntb (), z0))
+
+/*
+** stnt1_s8_m2:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_m2, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0 - svcntb () * 2, z0),
+ svstnt1 (pn8, x0 - svcntb () * 2, z0))
+
+/*
+** stnt1_s8_m16:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_m16, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0 - svcntb () * 16, z0),
+ svstnt1 (pn8, x0 - svcntb () * 16, z0))
+
+/*
+** stnt1_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_m18, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0 - svcntb () * 18, z0),
+ svstnt1 (pn8, x0 - svcntb () * 18, z0))
+
+/*
+** stnt1_s8_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_z17, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_s8_z22:
+** stnt1b {z22\.b(?: - |, )z23\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_z22, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_s8_z28:
+** stnt1b {z28\.b(?: - |, )z29\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_z28, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_s8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_pn0, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_s8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_pn7, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_s8_pn15:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_pn15, svint8x2_t, int8_t,
+ svstnt1_s8_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_s8_0:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_0, svint8x2_t, int8_t,
+ svstnt1_vnum_s8_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s8_1:
+** incb x0
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_1, svint8x2_t, int8_t,
+ svstnt1_vnum_s8_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_s8_2:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_2, svint8x2_t, int8_t,
+ svstnt1_vnum_s8_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_s8_14:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_14, svint8x2_t, int8_t,
+ svstnt1_vnum_s8_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s8_16:
+** incb x0, all, mul #16
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_16, svint8x2_t, int8_t,
+ svstnt1_vnum_s8_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s8_m1:
+** decb x0
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_m1, svint8x2_t, int8_t,
+ svstnt1_vnum_s8_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_s8_m2:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_m2, svint8x2_t, int8_t,
+ svstnt1_vnum_s8_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_s8_m16:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_m16, svint8x2_t, int8_t,
+ svstnt1_vnum_s8_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_m18, svint8x2_t, int8_t,
+ svstnt1_vnum_s8_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_x1, svint8x2_t, int8_t,
+ svstnt1_vnum_s8_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s8_x4.c
new file mode 100644
index 0000000..f966154
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_s8_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_s8_base:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_base, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_s8_index:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, x1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_index, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s8_1:
+** incb x0
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_1, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 + svcntb (), z0),
+ svstnt1 (pn8, x0 + svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s8_2:
+** incb x0, all, mul #2
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_2, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 + svcntb () * 2, z0),
+ svstnt1 (pn8, x0 + svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s8_3:
+** incb x0, all, mul #3
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_3, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 + svcntb () * 3, z0),
+ svstnt1 (pn8, x0 + svcntb () * 3, z0))
+
+/*
+** stnt1_s8_4:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_4, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 + svcntb () * 4, z0),
+ svstnt1 (pn8, x0 + svcntb () * 4, z0))
+
+/*
+** stnt1_s8_28:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_28, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 + svcntb () * 28, z0),
+ svstnt1 (pn8, x0 + svcntb () * 28, z0))
+
+/*
+** stnt1_s8_32:
+** [^{]*
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_32, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 + svcntb () * 32, z0),
+ svstnt1 (pn8, x0 + svcntb () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s8_m1:
+** decb x0
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_m1, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 - svcntb (), z0),
+ svstnt1 (pn8, x0 - svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s8_m2:
+** decb x0, all, mul #2
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_m2, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 - svcntb () * 2, z0),
+ svstnt1 (pn8, x0 - svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_s8_m3:
+** decb x0, all, mul #3
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_m3, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 - svcntb () * 3, z0),
+ svstnt1 (pn8, x0 - svcntb () * 3, z0))
+
+/*
+** stnt1_s8_m4:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_m4, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 - svcntb () * 4, z0),
+ svstnt1 (pn8, x0 - svcntb () * 4, z0))
+
+/*
+** stnt1_s8_m32:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_m32, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 - svcntb () * 32, z0),
+ svstnt1 (pn8, x0 - svcntb () * 32, z0))
+
+/*
+** stnt1_s8_m36:
+** [^{]*
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_m36, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0 - svcntb () * 36, z0),
+ svstnt1 (pn8, x0 - svcntb () * 36, z0))
+
+/*
+** stnt1_s8_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_z17, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_s8_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_z22, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_s8_z28:
+** stnt1b {z28\.b - z31\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_z28, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_s8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1b {z0\.b - z3\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_pn0, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_s8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1b {z0\.b - z3\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_pn7, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_s8_pn15:
+** stnt1b {z0\.b - z3\.b}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_s8_pn15, svint8x4_t, int8_t,
+ svstnt1_s8_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_s8_0:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_0, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s8_1:
+** incb x0
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_1, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s8_2:
+** incb x0, all, mul #2
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_2, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s8_3:
+** incb x0, all, mul #3
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_3, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_s8_4:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_4, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_s8_28:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_28, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_s8_32:
+** [^{]*
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_32, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s8_m1:
+** decb x0
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_m1, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s8_m2:
+** decb x0, all, mul #2
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_m2, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_s8_m3:
+** decb x0, all, mul #3
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_m3, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_s8_m4:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_m4, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_s8_m32:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_m32, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_s8_m36:
+** [^{]*
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_m36, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1b {z0\.b - z3\.b}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_s8_x1, svint8x4_t, int8_t,
+ svstnt1_vnum_s8_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u16_x2.c
new file mode 100644
index 0000000..ec3387ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u16_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_u16_base:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_base, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_u16_index:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_index, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u16_1:
+** incb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_1, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0 + svcnth (), z0),
+ svstnt1 (pn8, x0 + svcnth (), z0))
+
+/*
+** stnt1_u16_2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_2, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0 + svcnth () * 2, z0),
+ svstnt1 (pn8, x0 + svcnth () * 2, z0))
+
+/*
+** stnt1_u16_14:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_14, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0 + svcnth () * 14, z0),
+ svstnt1 (pn8, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u16_16:
+** incb x0, all, mul #16
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_16, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0 + svcnth () * 16, z0),
+ svstnt1 (pn8, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u16_m1:
+** decb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_m1, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0 - svcnth (), z0),
+ svstnt1 (pn8, x0 - svcnth (), z0))
+
+/*
+** stnt1_u16_m2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_m2, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0 - svcnth () * 2, z0),
+ svstnt1 (pn8, x0 - svcnth () * 2, z0))
+
+/*
+** stnt1_u16_m16:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_m16, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0 - svcnth () * 16, z0),
+ svstnt1 (pn8, x0 - svcnth () * 16, z0))
+
+/*
+** stnt1_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_m18, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0 - svcnth () * 18, z0),
+ svstnt1 (pn8, x0 - svcnth () * 18, z0))
+
+/*
+** stnt1_u16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_z17, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_u16_z22:
+** stnt1h {z22\.h(?: - |, )z23\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_z22, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_u16_z28:
+** stnt1h {z28\.h(?: - |, )z29\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_z28, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_u16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_pn0, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_u16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_pn7, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_u16_pn15:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_pn15, svuint16x2_t, uint16_t,
+ svstnt1_u16_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_u16_0:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_0, svuint16x2_t, uint16_t,
+ svstnt1_vnum_u16_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u16_1:
+** incb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_1, svuint16x2_t, uint16_t,
+ svstnt1_vnum_u16_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_u16_2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_2, svuint16x2_t, uint16_t,
+ svstnt1_vnum_u16_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_u16_14:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_14, svuint16x2_t, uint16_t,
+ svstnt1_vnum_u16_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u16_16:
+** incb x0, all, mul #16
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_16, svuint16x2_t, uint16_t,
+ svstnt1_vnum_u16_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u16_m1:
+** decb x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_m1, svuint16x2_t, uint16_t,
+ svstnt1_vnum_u16_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_u16_m2:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_m2, svuint16x2_t, uint16_t,
+ svstnt1_vnum_u16_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_u16_m16:
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_m16, svuint16x2_t, uint16_t,
+ svstnt1_vnum_u16_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_m18, svuint16x2_t, uint16_t,
+ svstnt1_vnum_u16_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1h {z0\.h(?: - |, )z1\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_x1, svuint16x2_t, uint16_t,
+ svstnt1_vnum_u16_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u16_x4.c
new file mode 100644
index 0000000..0ce35c0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u16_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_u16_base:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_base, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_u16_index:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, x1, lsl #?1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_index, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u16_1:
+** incb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_1, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 + svcnth (), z0),
+ svstnt1 (pn8, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u16_2:
+** incb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_2, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 + svcnth () * 2, z0),
+ svstnt1 (pn8, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u16_3:
+** incb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_3, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 + svcnth () * 3, z0),
+ svstnt1 (pn8, x0 + svcnth () * 3, z0))
+
+/*
+** stnt1_u16_4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_4, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 + svcnth () * 4, z0),
+ svstnt1 (pn8, x0 + svcnth () * 4, z0))
+
+/*
+** stnt1_u16_28:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_28, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 + svcnth () * 28, z0),
+ svstnt1 (pn8, x0 + svcnth () * 28, z0))
+
+/*
+** stnt1_u16_32:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_32, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 + svcnth () * 32, z0),
+ svstnt1 (pn8, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u16_m1:
+** decb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_m1, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 - svcnth (), z0),
+ svstnt1 (pn8, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u16_m2:
+** decb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_m2, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 - svcnth () * 2, z0),
+ svstnt1 (pn8, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u16_m3:
+** decb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_m3, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 - svcnth () * 3, z0),
+ svstnt1 (pn8, x0 - svcnth () * 3, z0))
+
+/*
+** stnt1_u16_m4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_m4, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 - svcnth () * 4, z0),
+ svstnt1 (pn8, x0 - svcnth () * 4, z0))
+
+/*
+** stnt1_u16_m32:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_m32, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 - svcnth () * 32, z0),
+ svstnt1 (pn8, x0 - svcnth () * 32, z0))
+
+/*
+** stnt1_u16_m36:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_m36, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0 - svcnth () * 36, z0),
+ svstnt1 (pn8, x0 - svcnth () * 36, z0))
+
+/*
+** stnt1_u16_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_z17, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_u16_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1h {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_z22, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_u16_z28:
+** stnt1h {z28\.h - z31\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_z28, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_u16_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_pn0, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_u16_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1h {z0\.h - z3\.h}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_pn7, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_u16_pn15:
+** stnt1h {z0\.h - z3\.h}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u16_pn15, svuint16x4_t, uint16_t,
+ svstnt1_u16_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_u16_0:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_0, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u16_1:
+** incb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_1, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u16_2:
+** incb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_2, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u16_3:
+** incb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_3, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_u16_4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_4, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_u16_28:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_28, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_u16_32:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_32, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u16_m1:
+** decb x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_m1, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u16_m2:
+** decb x0, all, mul #2
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_m2, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u16_m3:
+** decb x0, all, mul #3
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_m3, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_u16_m4:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_m4, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_u16_m32:
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_m32, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_u16_m36:
+** [^{]*
+** stnt1h {z0\.h - z3\.h}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_m36, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1h {z0\.h - z3\.h}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1h {z0\.h - z3\.h}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u16_x1, svuint16x4_t, uint16_t,
+ svstnt1_vnum_u16_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u32_x2.c
new file mode 100644
index 0000000..e185e6d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u32_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_u32_base:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_base, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_u32_index:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_index, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u32_1:
+** incb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_1, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0 + svcntw (), z0),
+ svstnt1 (pn8, x0 + svcntw (), z0))
+
+/*
+** stnt1_u32_2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_2, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0 + svcntw () * 2, z0),
+ svstnt1 (pn8, x0 + svcntw () * 2, z0))
+
+/*
+** stnt1_u32_14:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_14, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0 + svcntw () * 14, z0),
+ svstnt1 (pn8, x0 + svcntw () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u32_16:
+** incb x0, all, mul #16
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_16, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0 + svcntw () * 16, z0),
+ svstnt1 (pn8, x0 + svcntw () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u32_m1:
+** decb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_m1, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0 - svcntw (), z0),
+ svstnt1 (pn8, x0 - svcntw (), z0))
+
+/*
+** stnt1_u32_m2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_m2, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0 - svcntw () * 2, z0),
+ svstnt1 (pn8, x0 - svcntw () * 2, z0))
+
+/*
+** stnt1_u32_m16:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_m16, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0 - svcntw () * 16, z0),
+ svstnt1 (pn8, x0 - svcntw () * 16, z0))
+
+/*
+** stnt1_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_m18, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0 - svcntw () * 18, z0),
+ svstnt1 (pn8, x0 - svcntw () * 18, z0))
+
+/*
+** stnt1_u32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_z17, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_u32_z22:
+** stnt1w {z22\.s(?: - |, )z23\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_z22, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_u32_z28:
+** stnt1w {z28\.s(?: - |, )z29\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_z28, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_u32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_pn0, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_u32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_pn7, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_u32_pn15:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_pn15, svuint32x2_t, uint32_t,
+ svstnt1_u32_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_u32_0:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_0, svuint32x2_t, uint32_t,
+ svstnt1_vnum_u32_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u32_1:
+** incb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_1, svuint32x2_t, uint32_t,
+ svstnt1_vnum_u32_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_u32_2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_2, svuint32x2_t, uint32_t,
+ svstnt1_vnum_u32_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_u32_14:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_14, svuint32x2_t, uint32_t,
+ svstnt1_vnum_u32_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u32_16:
+** incb x0, all, mul #16
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_16, svuint32x2_t, uint32_t,
+ svstnt1_vnum_u32_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u32_m1:
+** decb x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_m1, svuint32x2_t, uint32_t,
+ svstnt1_vnum_u32_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_u32_m2:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_m2, svuint32x2_t, uint32_t,
+ svstnt1_vnum_u32_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_u32_m16:
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_m16, svuint32x2_t, uint32_t,
+ svstnt1_vnum_u32_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_m18, svuint32x2_t, uint32_t,
+ svstnt1_vnum_u32_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1w {z0\.s(?: - |, )z1\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_x1, svuint32x2_t, uint32_t,
+ svstnt1_vnum_u32_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u32_x4.c
new file mode 100644
index 0000000..3c77f3a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u32_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_u32_base:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_base, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_u32_index:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, x1, lsl #?2\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_index, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u32_1:
+** incb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_1, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 + svcntw (), z0),
+ svstnt1 (pn8, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u32_2:
+** incb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_2, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 + svcntw () * 2, z0),
+ svstnt1 (pn8, x0 + svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u32_3:
+** incb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_3, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 + svcntw () * 3, z0),
+ svstnt1 (pn8, x0 + svcntw () * 3, z0))
+
+/*
+** stnt1_u32_4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_4, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 + svcntw () * 4, z0),
+ svstnt1 (pn8, x0 + svcntw () * 4, z0))
+
+/*
+** stnt1_u32_28:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_28, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 + svcntw () * 28, z0),
+ svstnt1 (pn8, x0 + svcntw () * 28, z0))
+
+/*
+** stnt1_u32_32:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_32, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 + svcntw () * 32, z0),
+ svstnt1 (pn8, x0 + svcntw () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u32_m1:
+** decb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_m1, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 - svcntw (), z0),
+ svstnt1 (pn8, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u32_m2:
+** decb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_m2, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 - svcntw () * 2, z0),
+ svstnt1 (pn8, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u32_m3:
+** decb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_m3, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 - svcntw () * 3, z0),
+ svstnt1 (pn8, x0 - svcntw () * 3, z0))
+
+/*
+** stnt1_u32_m4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_m4, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 - svcntw () * 4, z0),
+ svstnt1 (pn8, x0 - svcntw () * 4, z0))
+
+/*
+** stnt1_u32_m32:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_m32, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 - svcntw () * 32, z0),
+ svstnt1 (pn8, x0 - svcntw () * 32, z0))
+
+/*
+** stnt1_u32_m36:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_m36, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0 - svcntw () * 36, z0),
+ svstnt1 (pn8, x0 - svcntw () * 36, z0))
+
+/*
+** stnt1_u32_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_z17, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_u32_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1w {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_z22, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_u32_z28:
+** stnt1w {z28\.s - z31\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_z28, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_u32_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_pn0, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_u32_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1w {z0\.s - z3\.s}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_pn7, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_u32_pn15:
+** stnt1w {z0\.s - z3\.s}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u32_pn15, svuint32x4_t, uint32_t,
+ svstnt1_u32_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_u32_0:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_0, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u32_1:
+** incb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_1, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u32_2:
+** incb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_2, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u32_3:
+** incb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_3, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_u32_4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_4, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_u32_28:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_28, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_u32_32:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_32, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u32_m1:
+** decb x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_m1, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u32_m2:
+** decb x0, all, mul #2
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_m2, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u32_m3:
+** decb x0, all, mul #3
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_m3, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_u32_m4:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_m4, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_u32_m32:
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_m32, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_u32_m36:
+** [^{]*
+** stnt1w {z0\.s - z3\.s}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_m36, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1w {z0\.s - z3\.s}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1w {z0\.s - z3\.s}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u32_x1, svuint32x4_t, uint32_t,
+ svstnt1_vnum_u32_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u64_x2.c
new file mode 100644
index 0000000..f23c484
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u64_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_u64_base:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_base, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_u64_index:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_index, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u64_1:
+** incb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_1, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0 + svcntd (), z0),
+ svstnt1 (pn8, x0 + svcntd (), z0))
+
+/*
+** stnt1_u64_2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_2, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0 + svcntd () * 2, z0),
+ svstnt1 (pn8, x0 + svcntd () * 2, z0))
+
+/*
+** stnt1_u64_14:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_14, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0 + svcntd () * 14, z0),
+ svstnt1 (pn8, x0 + svcntd () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u64_16:
+** incb x0, all, mul #16
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_16, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0 + svcntd () * 16, z0),
+ svstnt1 (pn8, x0 + svcntd () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u64_m1:
+** decb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_m1, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0 - svcntd (), z0),
+ svstnt1 (pn8, x0 - svcntd (), z0))
+
+/*
+** stnt1_u64_m2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_m2, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0 - svcntd () * 2, z0),
+ svstnt1 (pn8, x0 - svcntd () * 2, z0))
+
+/*
+** stnt1_u64_m16:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_m16, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0 - svcntd () * 16, z0),
+ svstnt1 (pn8, x0 - svcntd () * 16, z0))
+
+/*
+** stnt1_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_m18, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0 - svcntd () * 18, z0),
+ svstnt1 (pn8, x0 - svcntd () * 18, z0))
+
+/*
+** stnt1_u64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_z17, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_u64_z22:
+** stnt1d {z22\.d(?: - |, )z23\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_z22, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_u64_z28:
+** stnt1d {z28\.d(?: - |, )z29\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_z28, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_u64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_pn0, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_u64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_pn7, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_u64_pn15:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_pn15, svuint64x2_t, uint64_t,
+ svstnt1_u64_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_u64_0:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_0, svuint64x2_t, uint64_t,
+ svstnt1_vnum_u64_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u64_1:
+** incb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_1, svuint64x2_t, uint64_t,
+ svstnt1_vnum_u64_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_u64_2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_2, svuint64x2_t, uint64_t,
+ svstnt1_vnum_u64_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_u64_14:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_14, svuint64x2_t, uint64_t,
+ svstnt1_vnum_u64_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u64_16:
+** incb x0, all, mul #16
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_16, svuint64x2_t, uint64_t,
+ svstnt1_vnum_u64_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u64_m1:
+** decb x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_m1, svuint64x2_t, uint64_t,
+ svstnt1_vnum_u64_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_u64_m2:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_m2, svuint64x2_t, uint64_t,
+ svstnt1_vnum_u64_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_u64_m16:
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_m16, svuint64x2_t, uint64_t,
+ svstnt1_vnum_u64_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_m18, svuint64x2_t, uint64_t,
+ svstnt1_vnum_u64_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1d {z0\.d(?: - |, )z1\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_x1, svuint64x2_t, uint64_t,
+ svstnt1_vnum_u64_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u64_x4.c
new file mode 100644
index 0000000..aaef67e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u64_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_u64_base:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_base, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_u64_index:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, x1, lsl #?3\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_index, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u64_1:
+** incb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_1, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 + svcntd (), z0),
+ svstnt1 (pn8, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u64_2:
+** incb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_2, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 + svcntd () * 2, z0),
+ svstnt1 (pn8, x0 + svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u64_3:
+** incb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_3, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 + svcntd () * 3, z0),
+ svstnt1 (pn8, x0 + svcntd () * 3, z0))
+
+/*
+** stnt1_u64_4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_4, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 + svcntd () * 4, z0),
+ svstnt1 (pn8, x0 + svcntd () * 4, z0))
+
+/*
+** stnt1_u64_28:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_28, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 + svcntd () * 28, z0),
+ svstnt1 (pn8, x0 + svcntd () * 28, z0))
+
+/*
+** stnt1_u64_32:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_32, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 + svcntd () * 32, z0),
+ svstnt1 (pn8, x0 + svcntd () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u64_m1:
+** decb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_m1, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 - svcntd (), z0),
+ svstnt1 (pn8, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u64_m2:
+** decb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_m2, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 - svcntd () * 2, z0),
+ svstnt1 (pn8, x0 - svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u64_m3:
+** decb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_m3, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 - svcntd () * 3, z0),
+ svstnt1 (pn8, x0 - svcntd () * 3, z0))
+
+/*
+** stnt1_u64_m4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_m4, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 - svcntd () * 4, z0),
+ svstnt1 (pn8, x0 - svcntd () * 4, z0))
+
+/*
+** stnt1_u64_m32:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_m32, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 - svcntd () * 32, z0),
+ svstnt1 (pn8, x0 - svcntd () * 32, z0))
+
+/*
+** stnt1_u64_m36:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_m36, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0 - svcntd () * 36, z0),
+ svstnt1 (pn8, x0 - svcntd () * 36, z0))
+
+/*
+** stnt1_u64_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_z17, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_u64_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1d {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_z22, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_u64_z28:
+** stnt1d {z28\.d - z31\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_z28, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_u64_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_pn0, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_u64_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1d {z0\.d - z3\.d}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_pn7, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_u64_pn15:
+** stnt1d {z0\.d - z3\.d}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u64_pn15, svuint64x4_t, uint64_t,
+ svstnt1_u64_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_u64_0:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_0, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u64_1:
+** incb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_1, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u64_2:
+** incb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_2, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u64_3:
+** incb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_3, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_u64_4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_4, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_u64_28:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_28, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_u64_32:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_32, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u64_m1:
+** decb x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_m1, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u64_m2:
+** decb x0, all, mul #2
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_m2, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u64_m3:
+** decb x0, all, mul #3
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_m3, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_u64_m4:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_m4, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_u64_m32:
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_m32, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_u64_m36:
+** [^{]*
+** stnt1d {z0\.d - z3\.d}, pn8, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_m36, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1d {z0\.d - z3\.d}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1d {z0\.d - z3\.d}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u64_x1, svuint64x4_t, uint64_t,
+ svstnt1_vnum_u64_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u8_x2.c
new file mode 100644
index 0000000..5431aaa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u8_x2.c
@@ -0,0 +1,262 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_u8_base:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_base, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_u8_index:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, x1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_index, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u8_1:
+** incb x0
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_1, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0 + svcntb (), z0),
+ svstnt1 (pn8, x0 + svcntb (), z0))
+
+/*
+** stnt1_u8_2:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_2, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0 + svcntb () * 2, z0),
+ svstnt1 (pn8, x0 + svcntb () * 2, z0))
+
+/*
+** stnt1_u8_14:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_14, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0 + svcntb () * 14, z0),
+ svstnt1 (pn8, x0 + svcntb () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u8_16:
+** incb x0, all, mul #16
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_16, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0 + svcntb () * 16, z0),
+ svstnt1 (pn8, x0 + svcntb () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u8_m1:
+** decb x0
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_m1, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0 - svcntb (), z0),
+ svstnt1 (pn8, x0 - svcntb (), z0))
+
+/*
+** stnt1_u8_m2:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_m2, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0 - svcntb () * 2, z0),
+ svstnt1 (pn8, x0 - svcntb () * 2, z0))
+
+/*
+** stnt1_u8_m16:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_m16, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0 - svcntb () * 16, z0),
+ svstnt1 (pn8, x0 - svcntb () * 16, z0))
+
+/*
+** stnt1_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_m18, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0 - svcntb () * 18, z0),
+ svstnt1 (pn8, x0 - svcntb () * 18, z0))
+
+/*
+** stnt1_u8_z17:
+** mov [^\n]+
+** mov [^\n]+
+** stnt1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_z17, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_u8_z22:
+** stnt1b {z22\.b(?: - |, )z23\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_z22, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_u8_z28:
+** stnt1b {z28\.b(?: - |, )z29\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_z28, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_u8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_pn0, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_u8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_pn7, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_u8_pn15:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_pn15, svuint8x2_t, uint8_t,
+ svstnt1_u8_x2 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_u8_0:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_0, svuint8x2_t, uint8_t,
+ svstnt1_vnum_u8_x2 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u8_1:
+** incb x0
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_1, svuint8x2_t, uint8_t,
+ svstnt1_vnum_u8_x2 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/*
+** stnt1_vnum_u8_2:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_2, svuint8x2_t, uint8_t,
+ svstnt1_vnum_u8_x2 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/*
+** stnt1_vnum_u8_14:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_14, svuint8x2_t, uint8_t,
+ svstnt1_vnum_u8_x2 (pn8, x0, 14, z0),
+ svstnt1_vnum (pn8, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u8_16:
+** incb x0, all, mul #16
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_16, svuint8x2_t, uint8_t,
+ svstnt1_vnum_u8_x2 (pn8, x0, 16, z0),
+ svstnt1_vnum (pn8, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u8_m1:
+** decb x0
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_m1, svuint8x2_t, uint8_t,
+ svstnt1_vnum_u8_x2 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/*
+** stnt1_vnum_u8_m2:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_m2, svuint8x2_t, uint8_t,
+ svstnt1_vnum_u8_x2 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/*
+** stnt1_vnum_u8_m16:
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_m16, svuint8x2_t, uint8_t,
+ svstnt1_vnum_u8_x2 (pn8, x0, -16, z0),
+ svstnt1_vnum (pn8, x0, -16, z0))
+
+/*
+** stnt1_vnum_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[\1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_m18, svuint8x2_t, uint8_t,
+ svstnt1_vnum_u8_x2 (pn8, x0, -18, z0),
+ svstnt1_vnum (pn8, x0, -18, z0))
+
+/*
+** stnt1_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1b {z0\.b(?: - |, )z1\.b}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_x1, svuint8x2_t, uint8_t,
+ svstnt1_vnum_u8_x2 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u8_x4.c
new file mode 100644
index 0000000..fcf3f63
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/stnt1_u8_x4.c
@@ -0,0 +1,354 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** stnt1_u8_base:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_base, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0, z0),
+ svstnt1 (pn8, x0, z0))
+
+/*
+** stnt1_u8_index:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, x1\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_index, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 + x1, z0),
+ svstnt1 (pn8, x0 + x1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u8_1:
+** incb x0
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_1, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 + svcntb (), z0),
+ svstnt1 (pn8, x0 + svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u8_2:
+** incb x0, all, mul #2
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_2, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 + svcntb () * 2, z0),
+ svstnt1 (pn8, x0 + svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u8_3:
+** incb x0, all, mul #3
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_3, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 + svcntb () * 3, z0),
+ svstnt1 (pn8, x0 + svcntb () * 3, z0))
+
+/*
+** stnt1_u8_4:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_4, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 + svcntb () * 4, z0),
+ svstnt1 (pn8, x0 + svcntb () * 4, z0))
+
+/*
+** stnt1_u8_28:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_28, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 + svcntb () * 28, z0),
+ svstnt1 (pn8, x0 + svcntb () * 28, z0))
+
+/*
+** stnt1_u8_32:
+** [^{]*
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_32, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 + svcntb () * 32, z0),
+ svstnt1 (pn8, x0 + svcntb () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u8_m1:
+** decb x0
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_m1, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 - svcntb (), z0),
+ svstnt1 (pn8, x0 - svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u8_m2:
+** decb x0, all, mul #2
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_m2, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 - svcntb () * 2, z0),
+ svstnt1 (pn8, x0 - svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_u8_m3:
+** decb x0, all, mul #3
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_m3, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 - svcntb () * 3, z0),
+ svstnt1 (pn8, x0 - svcntb () * 3, z0))
+
+/*
+** stnt1_u8_m4:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_m4, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 - svcntb () * 4, z0),
+ svstnt1 (pn8, x0 - svcntb () * 4, z0))
+
+/*
+** stnt1_u8_m32:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_m32, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 - svcntb () * 32, z0),
+ svstnt1 (pn8, x0 - svcntb () * 32, z0))
+
+/*
+** stnt1_u8_m36:
+** [^{]*
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_m36, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0 - svcntb () * 36, z0),
+ svstnt1 (pn8, x0 - svcntb () * 36, z0))
+
+/*
+** stnt1_u8_z17:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_z17, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0, z17),
+ svstnt1 (pn8, x0, z17))
+
+/*
+** stnt1_u8_z22:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** stnt1b {z[^\n]+}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_z22, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0, z22),
+ svstnt1 (pn8, x0, z22))
+
+/*
+** stnt1_u8_z28:
+** stnt1b {z28\.b - z31\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_z28, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn8, x0, z28),
+ svstnt1 (pn8, x0, z28))
+
+/*
+** stnt1_u8_pn0:
+** mov p([89]|1[0-5])\.b, p0\.b
+** stnt1b {z0\.b - z3\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_pn0, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn0, x0, z0),
+ svstnt1 (pn0, x0, z0))
+
+/*
+** stnt1_u8_pn7:
+** mov p([89]|1[0-5])\.b, p7\.b
+** stnt1b {z0\.b - z3\.b}, pn\1, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_pn7, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn7, x0, z0),
+ svstnt1 (pn7, x0, z0))
+
+/*
+** stnt1_u8_pn15:
+** stnt1b {z0\.b - z3\.b}, pn15, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_u8_pn15, svuint8x4_t, uint8_t,
+ svstnt1_u8_x4 (pn15, x0, z0),
+ svstnt1 (pn15, x0, z0))
+
+/*
+** stnt1_vnum_u8_0:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_0, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, 0, z0),
+ svstnt1_vnum (pn8, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u8_1:
+** incb x0
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_1, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, 1, z0),
+ svstnt1_vnum (pn8, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u8_2:
+** incb x0, all, mul #2
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_2, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, 2, z0),
+ svstnt1_vnum (pn8, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u8_3:
+** incb x0, all, mul #3
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_3, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, 3, z0),
+ svstnt1_vnum (pn8, x0, 3, z0))
+
+/*
+** stnt1_vnum_u8_4:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_4, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, 4, z0),
+ svstnt1_vnum (pn8, x0, 4, z0))
+
+/*
+** stnt1_vnum_u8_28:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_28, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, 28, z0),
+ svstnt1_vnum (pn8, x0, 28, z0))
+
+/*
+** stnt1_vnum_u8_32:
+** [^{]*
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_32, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, 32, z0),
+ svstnt1_vnum (pn8, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u8_m1:
+** decb x0
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_m1, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, -1, z0),
+ svstnt1_vnum (pn8, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u8_m2:
+** decb x0, all, mul #2
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_m2, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, -2, z0),
+ svstnt1_vnum (pn8, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** stnt1_vnum_u8_m3:
+** decb x0, all, mul #3
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_m3, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, -3, z0),
+ svstnt1_vnum (pn8, x0, -3, z0))
+
+/*
+** stnt1_vnum_u8_m4:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_m4, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, -4, z0),
+ svstnt1_vnum (pn8, x0, -4, z0))
+
+/*
+** stnt1_vnum_u8_m32:
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_m32, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, -32, z0),
+ svstnt1_vnum (pn8, x0, -32, z0))
+
+/*
+** stnt1_vnum_u8_m36:
+** [^{]*
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, x[0-9]+\]
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_m36, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, -36, z0),
+ svstnt1_vnum (pn8, x0, -36, z0))
+
+/*
+** stnt1_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** stnt1b {z0\.b - z3\.b}, pn8, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** stnt1b {z0\.b - z3\.b}, pn8, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE_COUNT (stnt1_vnum_u8_x1, svuint8x4_t, uint8_t,
+ svstnt1_vnum_u8_x4 (pn8, x0, x1, z0),
+ svstnt1_vnum (pn8, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/str_zt.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/str_zt.c
new file mode 100644
index 0000000..c8ecacb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/str_zt.c
@@ -0,0 +1,36 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#define SHARED_ZT0
+#include "test_sme2_acle.h"
+
+/*
+** str_zt0_x0:
+** str zt0, \[x0\]
+** ret
+*/
+PROTO (str_zt0_x0, void, (char *x0)) { svstr_zt (0, x0); }
+
+/*
+** str_zt0_x0p1:
+** add (x[0-9]+), x0, #?1
+** str zt0, \[\1\]
+** ret
+*/
+PROTO (str_zt0_x0p1, void, (char *x0)) { svstr_zt (0, x0 + 1); }
+
+/*
+** str_zt0_x0p64:
+** add (x[0-9]+), x0, #?64
+** str zt0, \[\1\]
+** ret
+*/
+PROTO (str_zt0_x0p64, void, (char *x0)) { svstr_zt (0, x0 + 64); }
+
+/*
+** str_zt0_x0_vl1:
+** incb x0
+** str zt0, \[x0\]
+** ret
+*/
+PROTO (str_zt0_x0_vl1, void, (char *x0)) { svstr_zt (0, x0 + svcntb()); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_s32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_s32_vg1x2.c
new file mode 100644
index 0000000..75b4205
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_s32_vg1x2.c
@@ -0,0 +1,180 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_0_z0_z0, svint32x2_t,
+ svsub_write_za32_s32_vg1x2 (0, z0, z0),
+ svsub_write_za32_vg1x2 (0, z0, z0))
+
+/*
+** sub_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w0_z0_z0, svint32x2_t,
+ svsub_write_za32_s32_vg1x2 (w0, z0, z0),
+ svsub_write_za32_vg1x2 (w0, z0, z0))
+
+/*
+** sub_write_w8_z0_z4:
+** sub za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z4, svint32x2_t,
+ svsub_write_za32_s32_vg1x2 (w8, z0, z4),
+ svsub_write_za32_vg1x2 (w8, z0, z4))
+
+/*
+** sub_write_w8_z4_z18:
+** sub za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z4_z18, svint32x2_t,
+ svsub_write_za32_s32_vg1x2 (w8, z4, z18),
+ svsub_write_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_write_w8_z23_z0:
+** ...
+** sub za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z23_z0, svint32x2_t,
+ svsub_write_za32_s32_vg1x2 (w8, z23, z0),
+ svsub_write_za32_vg1x2 (w8, z23, z0))
+
+/*
+** sub_write_w8_z18_z23:
+** ...
+** sub za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z18_z23, svint32x2_t,
+ svsub_write_za32_s32_vg1x2 (w8, z18, z23),
+ svsub_write_za32_vg1x2 (w8, z18, z23))
+
+/*
+** sub_write_w8_z4_z28:
+** sub za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z4_z28, svint32x2_t,
+ svsub_write_za32_s32_vg1x2 (w8, z4, z28),
+ svsub_write_za32_vg1x2 (w8, z4, z28))
+
+/*
+** sub_write_w8p7_z4_z0:
+** sub za\.s\[w8, 7, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p7_z4_z0, svint32x2_t,
+ svsub_write_za32_s32_vg1x2 (w8 + 7, z4, z0),
+ svsub_write_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** sub_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p8_z4_z4, svint32x2_t,
+ svsub_write_za32_s32_vg1x2 (w8 + 8, z4, z4),
+ svsub_write_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** sub_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8m1_z4_z0, svint32x2_t,
+ svsub_write_za32_s32_vg1x2 (w8 - 1, z4, z0),
+ svsub_write_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** sub_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_0_z1_z0, svint32x2_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x2 (0, z1, z0),
+ svsub_write_za32_vg1x2 (0, z1, z0))
+
+/*
+** sub_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0_z1_z0, svint32x2_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x2 (w0, z1, z0),
+ svsub_write_za32_vg1x2 (w0, z1, z0))
+
+/*
+** sub_write_single_w8_z1_z0:
+** sub za\.s\[w8, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z1_z0, svint32x2_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x2 (w8, z1, z0),
+ svsub_write_za32_vg1x2 (w8, z1, z0))
+
+/*
+** sub_write_single_w8p7_z1_z0:
+** sub za\.s\[w8, 7, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p7_z1_z0, svint32x2_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x2 (w8 + 7, z1, z0),
+ svsub_write_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** sub_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p8_z1_z0, svint32x2_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x2 (w8 + 8, z1, z0),
+ svsub_write_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** sub_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sub za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0m1_z1_z0, svint32x2_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x2 (w0 - 1, z1, z0),
+ svsub_write_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** sub_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sub za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (sub_write_single_w8_z0_z15, svint32x2_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x2 (w8, z0, z15),
+ svsub_write_za32_vg1x2 (w8, z0, z15))
+
+/*
+** sub_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sub za\.s\[w8, 0, vgx2\], {z20\.s - z21\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z20_z16, svint32x2_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x2 (w8, z20, z16),
+ svsub_write_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_s32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_s32_vg1x4.c
new file mode 100644
index 0000000..9ef49ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_s32_vg1x4.c
@@ -0,0 +1,172 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_0_z0_z0, svint32x4_t,
+ svsub_write_za32_s32_vg1x4 (0, z0, z0),
+ svsub_write_za32_vg1x4 (0, z0, z0))
+
+/*
+** sub_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w0_z0_z0, svint32x4_t,
+ svsub_write_za32_s32_vg1x4 (w0, z0, z0),
+ svsub_write_za32_vg1x4 (w0, z0, z0))
+
+/*
+** sub_write_w8_z0_z4:
+** sub za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z4, svint32x4_t,
+ svsub_write_za32_s32_vg1x4 (w8, z0, z4),
+ svsub_write_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_write_w8_z0_z18:
+** ...
+** sub za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z18, svint32x4_t,
+ svsub_write_za32_s32_vg1x4 (w8, z0, z18),
+ svsub_write_za32_vg1x4 (w8, z0, z18))
+
+/*
+** sub_write_w8_z18_z28:
+** ...
+** sub za\.s\[w8, 0, vgx4\], [^\n]+, {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z18_z28, svint32x4_t,
+ svsub_write_za32_s32_vg1x4 (w8, z18, z28),
+ svsub_write_za32_vg1x4 (w8, z18, z28))
+
+/*
+** sub_write_w8_z28_z23:
+** ...
+** sub za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z28_z23, svint32x4_t,
+ svsub_write_za32_s32_vg1x4 (w8, z28, z23),
+ svsub_write_za32_vg1x4 (w8, z28, z23))
+
+/*
+** sub_write_w8p7_z4_z0:
+** sub za\.s\[w8, 7, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p7_z4_z0, svint32x4_t,
+ svsub_write_za32_s32_vg1x4 (w8 + 7, z4, z0),
+ svsub_write_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** sub_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p8_z4_z4, svint32x4_t,
+ svsub_write_za32_s32_vg1x4 (w8 + 8, z4, z4),
+ svsub_write_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** sub_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8m1_z4_z0, svint32x4_t,
+ svsub_write_za32_s32_vg1x4 (w8 - 1, z4, z0),
+ svsub_write_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** sub_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_0_z1_z0, svint32x4_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x4 (0, z1, z0),
+ svsub_write_za32_vg1x4 (0, z1, z0))
+
+/*
+** sub_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0_z1_z0, svint32x4_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x4 (w0, z1, z0),
+ svsub_write_za32_vg1x4 (w0, z1, z0))
+
+/*
+** sub_write_single_w8_z1_z0:
+** sub za\.s\[w8, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z1_z0, svint32x4_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x4 (w8, z1, z0),
+ svsub_write_za32_vg1x4 (w8, z1, z0))
+
+/*
+** sub_write_single_w8p7_z1_z0:
+** sub za\.s\[w8, 7, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p7_z1_z0, svint32x4_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x4 (w8 + 7, z1, z0),
+ svsub_write_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** sub_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p8_z1_z0, svint32x4_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x4 (w8 + 8, z1, z0),
+ svsub_write_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** sub_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sub za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0m1_z1_z0, svint32x4_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x4 (w0 - 1, z1, z0),
+ svsub_write_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** sub_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sub za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (sub_write_single_w8_z0_z15, svint32x4_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x4 (w8, z0, z15),
+ svsub_write_za32_vg1x4 (w8, z0, z15))
+
+/*
+** sub_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sub za\.s\[w8, 0, vgx4\], {z20\.s - z23\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z20_z16, svint32x4_t, svint32_t,
+ svsub_write_single_za32_s32_vg1x4 (w8, z20, z16),
+ svsub_write_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_u32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_u32_vg1x2.c
new file mode 100644
index 0000000..c655e46
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_u32_vg1x2.c
@@ -0,0 +1,180 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_0_z0_z0, svuint32x2_t,
+ svsub_write_za32_u32_vg1x2 (0, z0, z0),
+ svsub_write_za32_vg1x2 (0, z0, z0))
+
+/*
+** sub_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w0_z0_z0, svuint32x2_t,
+ svsub_write_za32_u32_vg1x2 (w0, z0, z0),
+ svsub_write_za32_vg1x2 (w0, z0, z0))
+
+/*
+** sub_write_w8_z0_z4:
+** sub za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z4, svuint32x2_t,
+ svsub_write_za32_u32_vg1x2 (w8, z0, z4),
+ svsub_write_za32_vg1x2 (w8, z0, z4))
+
+/*
+** sub_write_w8_z4_z18:
+** sub za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z4_z18, svuint32x2_t,
+ svsub_write_za32_u32_vg1x2 (w8, z4, z18),
+ svsub_write_za32_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_write_w8_z23_z0:
+** ...
+** sub za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z23_z0, svuint32x2_t,
+ svsub_write_za32_u32_vg1x2 (w8, z23, z0),
+ svsub_write_za32_vg1x2 (w8, z23, z0))
+
+/*
+** sub_write_w8_z18_z23:
+** ...
+** sub za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z18_z23, svuint32x2_t,
+ svsub_write_za32_u32_vg1x2 (w8, z18, z23),
+ svsub_write_za32_vg1x2 (w8, z18, z23))
+
+/*
+** sub_write_w8_z4_z28:
+** sub za\.s\[w8, 0, vgx2\], {z4\.s - z5\.s}, {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z4_z28, svuint32x2_t,
+ svsub_write_za32_u32_vg1x2 (w8, z4, z28),
+ svsub_write_za32_vg1x2 (w8, z4, z28))
+
+/*
+** sub_write_w8p7_z4_z0:
+** sub za\.s\[w8, 7, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p7_z4_z0, svuint32x2_t,
+ svsub_write_za32_u32_vg1x2 (w8 + 7, z4, z0),
+ svsub_write_za32_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** sub_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p8_z4_z4, svuint32x2_t,
+ svsub_write_za32_u32_vg1x2 (w8 + 8, z4, z4),
+ svsub_write_za32_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** sub_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.s\[\1, 0, vgx2\], {z4\.s - z5\.s}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8m1_z4_z0, svuint32x2_t,
+ svsub_write_za32_u32_vg1x2 (w8 - 1, z4, z0),
+ svsub_write_za32_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** sub_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_0_z1_z0, svuint32x2_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x2 (0, z1, z0),
+ svsub_write_za32_vg1x2 (0, z1, z0))
+
+/*
+** sub_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0_z1_z0, svuint32x2_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x2 (w0, z1, z0),
+ svsub_write_za32_vg1x2 (w0, z1, z0))
+
+/*
+** sub_write_single_w8_z1_z0:
+** sub za\.s\[w8, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z1_z0, svuint32x2_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x2 (w8, z1, z0),
+ svsub_write_za32_vg1x2 (w8, z1, z0))
+
+/*
+** sub_write_single_w8p7_z1_z0:
+** sub za\.s\[w8, 7, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p7_z1_z0, svuint32x2_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x2 (w8 + 7, z1, z0),
+ svsub_write_za32_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** sub_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p8_z1_z0, svuint32x2_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x2 (w8 + 8, z1, z0),
+ svsub_write_za32_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** sub_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sub za\.s\[\1, 0, vgx2\], {z1\.s - z2\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0m1_z1_z0, svuint32x2_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x2 (w0 - 1, z1, z0),
+ svsub_write_za32_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** sub_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sub za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (sub_write_single_w8_z0_z15, svuint32x2_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x2 (w8, z0, z15),
+ svsub_write_za32_vg1x2 (w8, z0, z15))
+
+/*
+** sub_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sub za\.s\[w8, 0, vgx2\], {z20\.s - z21\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z20_z16, svuint32x2_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x2 (w8, z20, z16),
+ svsub_write_za32_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_u32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_u32_vg1x4.c
new file mode 100644
index 0000000..51f5a4d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za32_u32_vg1x4.c
@@ -0,0 +1,172 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_0_z0_z0, svuint32x4_t,
+ svsub_write_za32_u32_vg1x4 (0, z0, z0),
+ svsub_write_za32_vg1x4 (0, z0, z0))
+
+/*
+** sub_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w0_z0_z0, svuint32x4_t,
+ svsub_write_za32_u32_vg1x4 (w0, z0, z0),
+ svsub_write_za32_vg1x4 (w0, z0, z0))
+
+/*
+** sub_write_w8_z0_z4:
+** sub za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z4, svuint32x4_t,
+ svsub_write_za32_u32_vg1x4 (w8, z0, z4),
+ svsub_write_za32_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_write_w8_z0_z18:
+** ...
+** sub za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z18, svuint32x4_t,
+ svsub_write_za32_u32_vg1x4 (w8, z0, z18),
+ svsub_write_za32_vg1x4 (w8, z0, z18))
+
+/*
+** sub_write_w8_z18_z28:
+** ...
+** sub za\.s\[w8, 0, vgx4\], [^\n]+, {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z18_z28, svuint32x4_t,
+ svsub_write_za32_u32_vg1x4 (w8, z18, z28),
+ svsub_write_za32_vg1x4 (w8, z18, z28))
+
+/*
+** sub_write_w8_z28_z23:
+** ...
+** sub za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z28_z23, svuint32x4_t,
+ svsub_write_za32_u32_vg1x4 (w8, z28, z23),
+ svsub_write_za32_vg1x4 (w8, z28, z23))
+
+/*
+** sub_write_w8p7_z4_z0:
+** sub za\.s\[w8, 7, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p7_z4_z0, svuint32x4_t,
+ svsub_write_za32_u32_vg1x4 (w8 + 7, z4, z0),
+ svsub_write_za32_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** sub_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p8_z4_z4, svuint32x4_t,
+ svsub_write_za32_u32_vg1x4 (w8 + 8, z4, z4),
+ svsub_write_za32_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** sub_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.s\[\1, 0, vgx4\], {z4\.s - z7\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8m1_z4_z0, svuint32x4_t,
+ svsub_write_za32_u32_vg1x4 (w8 - 1, z4, z0),
+ svsub_write_za32_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** sub_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_0_z1_z0, svuint32x4_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x4 (0, z1, z0),
+ svsub_write_za32_vg1x4 (0, z1, z0))
+
+/*
+** sub_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0_z1_z0, svuint32x4_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x4 (w0, z1, z0),
+ svsub_write_za32_vg1x4 (w0, z1, z0))
+
+/*
+** sub_write_single_w8_z1_z0:
+** sub za\.s\[w8, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z1_z0, svuint32x4_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x4 (w8, z1, z0),
+ svsub_write_za32_vg1x4 (w8, z1, z0))
+
+/*
+** sub_write_single_w8p7_z1_z0:
+** sub za\.s\[w8, 7, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p7_z1_z0, svuint32x4_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x4 (w8 + 7, z1, z0),
+ svsub_write_za32_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** sub_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p8_z1_z0, svuint32x4_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x4 (w8 + 8, z1, z0),
+ svsub_write_za32_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** sub_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sub za\.s\[\1, 0, vgx4\], {z1\.s - z4\.s}, z0\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0m1_z1_z0, svuint32x4_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x4 (w0 - 1, z1, z0),
+ svsub_write_za32_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** sub_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sub za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}, z15\.s
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (sub_write_single_w8_z0_z15, svuint32x4_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x4 (w8, z0, z15),
+ svsub_write_za32_vg1x4 (w8, z0, z15))
+
+/*
+** sub_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sub za\.s\[w8, 0, vgx4\], {z20\.s - z23\.s}, \1\.s
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z20_z16, svuint32x4_t, svuint32_t,
+ svsub_write_single_za32_u32_vg1x4 (w8, z20, z16),
+ svsub_write_za32_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_s64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_s64_vg1x2.c
new file mode 100644
index 0000000..db3ec8a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_s64_vg1x2.c
@@ -0,0 +1,182 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_0_z0_z0, svint64x2_t,
+ svsub_write_za64_s64_vg1x2 (0, z0, z0),
+ svsub_write_za64_vg1x2 (0, z0, z0))
+
+/*
+** sub_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w0_z0_z0, svint64x2_t,
+ svsub_write_za64_s64_vg1x2 (w0, z0, z0),
+ svsub_write_za64_vg1x2 (w0, z0, z0))
+
+/*
+** sub_write_w8_z0_z4:
+** sub za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z4, svint64x2_t,
+ svsub_write_za64_s64_vg1x2 (w8, z0, z4),
+ svsub_write_za64_vg1x2 (w8, z0, z4))
+
+/*
+** sub_write_w8_z4_z18:
+** sub za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z4_z18, svint64x2_t,
+ svsub_write_za64_s64_vg1x2 (w8, z4, z18),
+ svsub_write_za64_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_write_w8_z23_z0:
+** ...
+** sub za\.d\[w8, 0, vgx2\], [^\n]+, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z23_z0, svint64x2_t,
+ svsub_write_za64_s64_vg1x2 (w8, z23, z0),
+ svsub_write_za64_vg1x2 (w8, z23, z0))
+
+/*
+** sub_write_w8_z18_z23:
+** ...
+** sub za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z18_z23, svint64x2_t,
+ svsub_write_za64_s64_vg1x2 (w8, z18, z23),
+ svsub_write_za64_vg1x2 (w8, z18, z23))
+
+/*
+** sub_write_w8_z4_z28:
+** sub za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z4_z28, svint64x2_t,
+ svsub_write_za64_s64_vg1x2 (w8, z4, z28),
+ svsub_write_za64_vg1x2 (w8, z4, z28))
+
+/*
+** sub_write_w8p7_z4_z0:
+** sub za\.d\[w8, 7, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p7_z4_z0, svint64x2_t,
+ svsub_write_za64_s64_vg1x2 (w8 + 7, z4, z0),
+ svsub_write_za64_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** sub_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p8_z4_z4, svint64x2_t,
+ svsub_write_za64_s64_vg1x2 (w8 + 8, z4, z4),
+ svsub_write_za64_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** sub_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8m1_z4_z0, svint64x2_t,
+ svsub_write_za64_s64_vg1x2 (w8 - 1, z4, z0),
+ svsub_write_za64_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** sub_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_0_z1_z0, svint64x2_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x2 (0, z1, z0),
+ svsub_write_za64_vg1x2 (0, z1, z0))
+
+/*
+** sub_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0_z1_z0, svint64x2_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x2 (w0, z1, z0),
+ svsub_write_za64_vg1x2 (w0, z1, z0))
+
+/*
+** sub_write_single_w8_z1_z0:
+** sub za\.d\[w8, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z1_z0, svint64x2_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x2 (w8, z1, z0),
+ svsub_write_za64_vg1x2 (w8, z1, z0))
+
+/*
+** sub_write_single_w8p7_z1_z0:
+** sub za\.d\[w8, 7, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p7_z1_z0, svint64x2_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x2 (w8 + 7, z1, z0),
+ svsub_write_za64_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** sub_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p8_z1_z0, svint64x2_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x2 (w8 + 8, z1, z0),
+ svsub_write_za64_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** sub_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sub za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0m1_z1_z0, svint64x2_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x2 (w0 - 1, z1, z0),
+ svsub_write_za64_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** sub_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sub za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (sub_write_single_w8_z0_z15, svint64x2_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x2 (w8, z0, z15),
+ svsub_write_za64_vg1x2 (w8, z0, z15))
+
+/*
+** sub_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sub za\.d\[w8, 0, vgx2\], {z20\.d - z21\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z20_z16, svint64x2_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x2 (w8, z20, z16),
+ svsub_write_za64_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_s64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_s64_vg1x4.c
new file mode 100644
index 0000000..13b2890
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_s64_vg1x4.c
@@ -0,0 +1,174 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_0_z0_z0, svint64x4_t,
+ svsub_write_za64_s64_vg1x4 (0, z0, z0),
+ svsub_write_za64_vg1x4 (0, z0, z0))
+
+/*
+** sub_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w0_z0_z0, svint64x4_t,
+ svsub_write_za64_s64_vg1x4 (w0, z0, z0),
+ svsub_write_za64_vg1x4 (w0, z0, z0))
+
+/*
+** sub_write_w8_z0_z4:
+** sub za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z4, svint64x4_t,
+ svsub_write_za64_s64_vg1x4 (w8, z0, z4),
+ svsub_write_za64_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_write_w8_z0_z18:
+** ...
+** sub za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z18, svint64x4_t,
+ svsub_write_za64_s64_vg1x4 (w8, z0, z18),
+ svsub_write_za64_vg1x4 (w8, z0, z18))
+
+/*
+** sub_write_w8_z18_z28:
+** ...
+** sub za\.d\[w8, 0, vgx4\], [^\n]+, {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z18_z28, svint64x4_t,
+ svsub_write_za64_s64_vg1x4 (w8, z18, z28),
+ svsub_write_za64_vg1x4 (w8, z18, z28))
+
+/*
+** sub_write_w8_z28_z23:
+** ...
+** sub za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z28_z23, svint64x4_t,
+ svsub_write_za64_s64_vg1x4 (w8, z28, z23),
+ svsub_write_za64_vg1x4 (w8, z28, z23))
+
+/*
+** sub_write_w8p7_z4_z0:
+** sub za\.d\[w8, 7, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p7_z4_z0, svint64x4_t,
+ svsub_write_za64_s64_vg1x4 (w8 + 7, z4, z0),
+ svsub_write_za64_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** sub_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p8_z4_z4, svint64x4_t,
+ svsub_write_za64_s64_vg1x4 (w8 + 8, z4, z4),
+ svsub_write_za64_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** sub_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8m1_z4_z0, svint64x4_t,
+ svsub_write_za64_s64_vg1x4 (w8 - 1, z4, z0),
+ svsub_write_za64_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** sub_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_0_z1_z0, svint64x4_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x4 (0, z1, z0),
+ svsub_write_za64_vg1x4 (0, z1, z0))
+
+/*
+** sub_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0_z1_z0, svint64x4_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x4 (w0, z1, z0),
+ svsub_write_za64_vg1x4 (w0, z1, z0))
+
+/*
+** sub_write_single_w8_z1_z0:
+** sub za\.d\[w8, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z1_z0, svint64x4_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x4 (w8, z1, z0),
+ svsub_write_za64_vg1x4 (w8, z1, z0))
+
+/*
+** sub_write_single_w8p7_z1_z0:
+** sub za\.d\[w8, 7, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p7_z1_z0, svint64x4_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x4 (w8 + 7, z1, z0),
+ svsub_write_za64_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** sub_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p8_z1_z0, svint64x4_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x4 (w8 + 8, z1, z0),
+ svsub_write_za64_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** sub_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sub za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0m1_z1_z0, svint64x4_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x4 (w0 - 1, z1, z0),
+ svsub_write_za64_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** sub_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sub za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (sub_write_single_w8_z0_z15, svint64x4_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x4 (w8, z0, z15),
+ svsub_write_za64_vg1x4 (w8, z0, z15))
+
+/*
+** sub_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sub za\.d\[w8, 0, vgx4\], {z20\.d - z23\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z20_z16, svint64x4_t, svint64_t,
+ svsub_write_single_za64_s64_vg1x4 (w8, z20, z16),
+ svsub_write_za64_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_u64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_u64_vg1x2.c
new file mode 100644
index 0000000..32672b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_u64_vg1x2.c
@@ -0,0 +1,182 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_0_z0_z0, svuint64x2_t,
+ svsub_write_za64_u64_vg1x2 (0, z0, z0),
+ svsub_write_za64_vg1x2 (0, z0, z0))
+
+/*
+** sub_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w0_z0_z0, svuint64x2_t,
+ svsub_write_za64_u64_vg1x2 (w0, z0, z0),
+ svsub_write_za64_vg1x2 (w0, z0, z0))
+
+/*
+** sub_write_w8_z0_z4:
+** sub za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z4, svuint64x2_t,
+ svsub_write_za64_u64_vg1x2 (w8, z0, z4),
+ svsub_write_za64_vg1x2 (w8, z0, z4))
+
+/*
+** sub_write_w8_z4_z18:
+** sub za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z4_z18, svuint64x2_t,
+ svsub_write_za64_u64_vg1x2 (w8, z4, z18),
+ svsub_write_za64_vg1x2 (w8, z4, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_write_w8_z23_z0:
+** ...
+** sub za\.d\[w8, 0, vgx2\], [^\n]+, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z23_z0, svuint64x2_t,
+ svsub_write_za64_u64_vg1x2 (w8, z23, z0),
+ svsub_write_za64_vg1x2 (w8, z23, z0))
+
+/*
+** sub_write_w8_z18_z23:
+** ...
+** sub za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z18_z23, svuint64x2_t,
+ svsub_write_za64_u64_vg1x2 (w8, z18, z23),
+ svsub_write_za64_vg1x2 (w8, z18, z23))
+
+/*
+** sub_write_w8_z4_z28:
+** sub za\.d\[w8, 0, vgx2\], {z4\.d - z5\.d}, {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z4_z28, svuint64x2_t,
+ svsub_write_za64_u64_vg1x2 (w8, z4, z28),
+ svsub_write_za64_vg1x2 (w8, z4, z28))
+
+/*
+** sub_write_w8p7_z4_z0:
+** sub za\.d\[w8, 7, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p7_z4_z0, svuint64x2_t,
+ svsub_write_za64_u64_vg1x2 (w8 + 7, z4, z0),
+ svsub_write_za64_vg1x2 (w8 + 7, z4, z0))
+
+/*
+** sub_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p8_z4_z4, svuint64x2_t,
+ svsub_write_za64_u64_vg1x2 (w8 + 8, z4, z4),
+ svsub_write_za64_vg1x2 (w8 + 8, z4, z4))
+
+/*
+** sub_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.d\[\1, 0, vgx2\], {z4\.d - z5\.d}, {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8m1_z4_z0, svuint64x2_t,
+ svsub_write_za64_u64_vg1x2 (w8 - 1, z4, z0),
+ svsub_write_za64_vg1x2 (w8 - 1, z4, z0))
+
+/*
+** sub_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_0_z1_z0, svuint64x2_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x2 (0, z1, z0),
+ svsub_write_za64_vg1x2 (0, z1, z0))
+
+/*
+** sub_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0_z1_z0, svuint64x2_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x2 (w0, z1, z0),
+ svsub_write_za64_vg1x2 (w0, z1, z0))
+
+/*
+** sub_write_single_w8_z1_z0:
+** sub za\.d\[w8, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z1_z0, svuint64x2_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x2 (w8, z1, z0),
+ svsub_write_za64_vg1x2 (w8, z1, z0))
+
+/*
+** sub_write_single_w8p7_z1_z0:
+** sub za\.d\[w8, 7, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p7_z1_z0, svuint64x2_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x2 (w8 + 7, z1, z0),
+ svsub_write_za64_vg1x2 (w8 + 7, z1, z0))
+
+/*
+** sub_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p8_z1_z0, svuint64x2_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x2 (w8 + 8, z1, z0),
+ svsub_write_za64_vg1x2 (w8 + 8, z1, z0))
+
+/*
+** sub_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sub za\.d\[\1, 0, vgx2\], {z1\.d - z2\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0m1_z1_z0, svuint64x2_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x2 (w0 - 1, z1, z0),
+ svsub_write_za64_vg1x2 (w0 - 1, z1, z0))
+
+/*
+** sub_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sub za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (sub_write_single_w8_z0_z15, svuint64x2_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x2 (w8, z0, z15),
+ svsub_write_za64_vg1x2 (w8, z0, z15))
+
+/*
+** sub_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sub za\.d\[w8, 0, vgx2\], {z20\.d - z21\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z20_z16, svuint64x2_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x2 (w8, z20, z16),
+ svsub_write_za64_vg1x2 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_u64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_u64_vg1x4.c
new file mode 100644
index 0000000..84b5410
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_write_za64_u64_vg1x4.c
@@ -0,0 +1,174 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_write_0_z0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_0_z0_z0, svuint64x4_t,
+ svsub_write_za64_u64_vg1x4 (0, z0, z0),
+ svsub_write_za64_vg1x4 (0, z0, z0))
+
+/*
+** sub_write_w0_z0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w0_z0_z0, svuint64x4_t,
+ svsub_write_za64_u64_vg1x4 (w0, z0, z0),
+ svsub_write_za64_vg1x4 (w0, z0, z0))
+
+/*
+** sub_write_w8_z0_z4:
+** sub za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z4, svuint64x4_t,
+ svsub_write_za64_u64_vg1x4 (w8, z0, z4),
+ svsub_write_za64_vg1x4 (w8, z0, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_write_w8_z0_z18:
+** ...
+** sub za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z0_z18, svuint64x4_t,
+ svsub_write_za64_u64_vg1x4 (w8, z0, z18),
+ svsub_write_za64_vg1x4 (w8, z0, z18))
+
+/*
+** sub_write_w8_z18_z28:
+** ...
+** sub za\.d\[w8, 0, vgx4\], [^\n]+, {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z18_z28, svuint64x4_t,
+ svsub_write_za64_u64_vg1x4 (w8, z18, z28),
+ svsub_write_za64_vg1x4 (w8, z18, z28))
+
+/*
+** sub_write_w8_z28_z23:
+** ...
+** sub za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_write_w8_z28_z23, svuint64x4_t,
+ svsub_write_za64_u64_vg1x4 (w8, z28, z23),
+ svsub_write_za64_vg1x4 (w8, z28, z23))
+
+/*
+** sub_write_w8p7_z4_z0:
+** sub za\.d\[w8, 7, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p7_z4_z0, svuint64x4_t,
+ svsub_write_za64_u64_vg1x4 (w8 + 7, z4, z0),
+ svsub_write_za64_vg1x4 (w8 + 7, z4, z0))
+
+/*
+** sub_write_w8p8_z4_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8p8_z4_z4, svuint64x4_t,
+ svsub_write_za64_u64_vg1x4 (w8 + 8, z4, z4),
+ svsub_write_za64_vg1x4 (w8 + 8, z4, z4))
+
+/*
+** sub_write_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.d\[\1, 0, vgx4\], {z4\.d - z7\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_write_w8m1_z4_z0, svuint64x4_t,
+ svsub_write_za64_u64_vg1x4 (w8 - 1, z4, z0),
+ svsub_write_za64_vg1x4 (w8 - 1, z4, z0))
+
+/*
+** sub_write_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_0_z1_z0, svuint64x4_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x4 (0, z1, z0),
+ svsub_write_za64_vg1x4 (0, z1, z0))
+
+/*
+** sub_write_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0_z1_z0, svuint64x4_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x4 (w0, z1, z0),
+ svsub_write_za64_vg1x4 (w0, z1, z0))
+
+/*
+** sub_write_single_w8_z1_z0:
+** sub za\.d\[w8, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z1_z0, svuint64x4_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x4 (w8, z1, z0),
+ svsub_write_za64_vg1x4 (w8, z1, z0))
+
+/*
+** sub_write_single_w8p7_z1_z0:
+** sub za\.d\[w8, 7, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p7_z1_z0, svuint64x4_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x4 (w8 + 7, z1, z0),
+ svsub_write_za64_vg1x4 (w8 + 7, z1, z0))
+
+/*
+** sub_write_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8p8_z1_z0, svuint64x4_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x4 (w8 + 8, z1, z0),
+ svsub_write_za64_vg1x4 (w8 + 8, z1, z0))
+
+/*
+** sub_write_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sub za\.d\[\1, 0, vgx4\], {z1\.d - z4\.d}, z0\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w0m1_z1_z0, svuint64x4_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x4 (w0 - 1, z1, z0),
+ svsub_write_za64_vg1x4 (w0 - 1, z1, z0))
+
+/*
+** sub_write_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sub za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}, z15\.d
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (sub_write_single_w8_z0_z15, svuint64x4_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x4 (w8, z0, z15),
+ svsub_write_za64_vg1x4 (w8, z0, z15))
+
+/*
+** sub_write_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sub za\.d\[w8, 0, vgx4\], {z20\.d - z23\.d}, \1\.d
+** ret
+*/
+TEST_ZA_SINGLE (sub_write_single_w8_z20_z16, svuint64x4_t, svuint64_t,
+ svsub_write_single_za64_u64_vg1x4 (w8, z20, z16),
+ svsub_write_za64_vg1x4 (w8, z20, z16))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_f32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_f32_vg1x2.c
new file mode 100644
index 0000000..0d88996
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_f32_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fsub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (0, z0),
+ svsub_za32_vg1x2 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** fsub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (w0, z0),
+ svsub_za32_vg1x2 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** fsub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (w7, z0),
+ svsub_za32_vg1x2 (w7, z0))
+
+/*
+** sub_w8_z0:
+** fsub za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (w8, z0),
+ svsub_za32_vg1x2 (w8, z0))
+
+/*
+** sub_w11_z0:
+** fsub za\.s\[w11, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (w11, z0),
+ svsub_za32_vg1x2 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** fsub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (w12, z0),
+ svsub_za32_vg1x2 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** fsub za\.s\[w8, 7, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (w8 + 7, z0),
+ svsub_za32_vg1x2 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fsub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (w8 + 8, z0),
+ svsub_za32_vg1x2 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fsub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (w8 - 1, z0),
+ svsub_za32_vg1x2 (w8 - 1, z0))
+
+/*
+** sub_w8_z18:
+** fsub za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (w8, z18),
+ svsub_za32_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** fsub za\.s\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (w8, z23),
+ svsub_za32_vg1x2 (w8, z23))
+
+/*
+** sub_w8_z28:
+** fsub za\.s\[w8, 0, vgx2\], {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svfloat32x2_t,
+ svsub_za32_f32_vg1x2 (w8, z28),
+ svsub_za32_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_f32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_f32_vg1x4.c
new file mode 100644
index 0000000..f2ad7ff
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_f32_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fsub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (0, z0),
+ svsub_za32_vg1x4 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** fsub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w0, z0),
+ svsub_za32_vg1x4 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** fsub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w7, z0),
+ svsub_za32_vg1x4 (w7, z0))
+
+/*
+** sub_w8_z0:
+** fsub za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w8, z0),
+ svsub_za32_vg1x4 (w8, z0))
+
+/*
+** sub_w11_z0:
+** fsub za\.s\[w11, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w11, z0),
+ svsub_za32_vg1x4 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** fsub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w12, z0),
+ svsub_za32_vg1x4 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** fsub za\.s\[w8, 7, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w8 + 7, z0),
+ svsub_za32_vg1x4 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fsub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w8 + 8, z0),
+ svsub_za32_vg1x4 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fsub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w8 - 1, z0),
+ svsub_za32_vg1x4 (w8 - 1, z0))
+
+/*
+** sub_w8_z4:
+** fsub za\.s\[w8, 0, vgx4\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z4, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w8, z4),
+ svsub_za32_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fsub za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w8, z18),
+ svsub_za32_vg1x4 (w8, z18))
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fsub za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w8, z23),
+ svsub_za32_vg1x4 (w8, z23))
+
+/*
+** sub_w8_z28:
+** fsub za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svfloat32x4_t,
+ svsub_za32_f32_vg1x4 (w8, z28),
+ svsub_za32_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_s32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_s32_vg1x2.c
new file mode 100644
index 0000000..f941c43
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_s32_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svint32x2_t,
+ svsub_za32_s32_vg1x2 (0, z0),
+ svsub_za32_vg1x2 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svint32x2_t,
+ svsub_za32_s32_vg1x2 (w0, z0),
+ svsub_za32_vg1x2 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svint32x2_t,
+ svsub_za32_s32_vg1x2 (w7, z0),
+ svsub_za32_vg1x2 (w7, z0))
+
+/*
+** sub_w8_z0:
+** sub za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svint32x2_t,
+ svsub_za32_s32_vg1x2 (w8, z0),
+ svsub_za32_vg1x2 (w8, z0))
+
+/*
+** sub_w11_z0:
+** sub za\.s\[w11, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svint32x2_t,
+ svsub_za32_s32_vg1x2 (w11, z0),
+ svsub_za32_vg1x2 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svint32x2_t,
+ svsub_za32_s32_vg1x2 (w12, z0),
+ svsub_za32_vg1x2 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** sub za\.s\[w8, 7, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svint32x2_t,
+ svsub_za32_s32_vg1x2 (w8 + 7, z0),
+ svsub_za32_vg1x2 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svint32x2_t,
+ svsub_za32_s32_vg1x2 (w8 + 8, z0),
+ svsub_za32_vg1x2 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svint32x2_t,
+ svsub_za32_s32_vg1x2 (w8 - 1, z0),
+ svsub_za32_vg1x2 (w8 - 1, z0))
+
+/*
+** sub_w8_z18:
+** sub za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svint32x2_t,
+ svsub_za32_s32_vg1x2 (w8, z18),
+ svsub_za32_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.s\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svint32x2_t,
+ svsub_za32_s32_vg1x2 (w8, z23),
+ svsub_za32_vg1x2 (w8, z23))
+
+/*
+** sub_w8_z28:
+** sub za\.s\[w8, 0, vgx2\], {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svint32x2_t,
+ svsub_za32_s32_vg1x2 (w8, z28),
+ svsub_za32_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_s32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_s32_vg1x4.c
new file mode 100644
index 0000000..798159c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_s32_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svint32x4_t,
+ svsub_za32_s32_vg1x4 (0, z0),
+ svsub_za32_vg1x4 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w0, z0),
+ svsub_za32_vg1x4 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w7, z0),
+ svsub_za32_vg1x4 (w7, z0))
+
+/*
+** sub_w8_z0:
+** sub za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w8, z0),
+ svsub_za32_vg1x4 (w8, z0))
+
+/*
+** sub_w11_z0:
+** sub za\.s\[w11, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w11, z0),
+ svsub_za32_vg1x4 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w12, z0),
+ svsub_za32_vg1x4 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** sub za\.s\[w8, 7, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w8 + 7, z0),
+ svsub_za32_vg1x4 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w8 + 8, z0),
+ svsub_za32_vg1x4 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w8 - 1, z0),
+ svsub_za32_vg1x4 (w8 - 1, z0))
+
+/*
+** sub_w8_z4:
+** sub za\.s\[w8, 0, vgx4\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z4, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w8, z4),
+ svsub_za32_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w8, z18),
+ svsub_za32_vg1x4 (w8, z18))
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w8, z23),
+ svsub_za32_vg1x4 (w8, z23))
+
+/*
+** sub_w8_z28:
+** sub za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svint32x4_t,
+ svsub_za32_s32_vg1x4 (w8, z28),
+ svsub_za32_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_u32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_u32_vg1x2.c
new file mode 100644
index 0000000..2defb79
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_u32_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (0, z0),
+ svsub_za32_vg1x2 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (w0, z0),
+ svsub_za32_vg1x2 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (w7, z0),
+ svsub_za32_vg1x2 (w7, z0))
+
+/*
+** sub_w8_z0:
+** sub za\.s\[w8, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (w8, z0),
+ svsub_za32_vg1x2 (w8, z0))
+
+/*
+** sub_w11_z0:
+** sub za\.s\[w11, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (w11, z0),
+ svsub_za32_vg1x2 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (w12, z0),
+ svsub_za32_vg1x2 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** sub za\.s\[w8, 7, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (w8 + 7, z0),
+ svsub_za32_vg1x2 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (w8 + 8, z0),
+ svsub_za32_vg1x2 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.s\[\1, 0, vgx2\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (w8 - 1, z0),
+ svsub_za32_vg1x2 (w8 - 1, z0))
+
+/*
+** sub_w8_z18:
+** sub za\.s\[w8, 0, vgx2\], {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (w8, z18),
+ svsub_za32_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.s\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (w8, z23),
+ svsub_za32_vg1x2 (w8, z23))
+
+/*
+** sub_w8_z28:
+** sub za\.s\[w8, 0, vgx2\], {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svuint32x2_t,
+ svsub_za32_u32_vg1x2 (w8, z28),
+ svsub_za32_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_u32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_u32_vg1x4.c
new file mode 100644
index 0000000..615f67e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za32_u32_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (0, z0),
+ svsub_za32_vg1x4 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w0, z0),
+ svsub_za32_vg1x4 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w7, z0),
+ svsub_za32_vg1x4 (w7, z0))
+
+/*
+** sub_w8_z0:
+** sub za\.s\[w8, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w8, z0),
+ svsub_za32_vg1x4 (w8, z0))
+
+/*
+** sub_w11_z0:
+** sub za\.s\[w11, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w11, z0),
+ svsub_za32_vg1x4 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w12, z0),
+ svsub_za32_vg1x4 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** sub za\.s\[w8, 7, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w8 + 7, z0),
+ svsub_za32_vg1x4 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w8 + 8, z0),
+ svsub_za32_vg1x4 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.s\[\1, 0, vgx4\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w8 - 1, z0),
+ svsub_za32_vg1x4 (w8 - 1, z0))
+
+/*
+** sub_w8_z4:
+** sub za\.s\[w8, 0, vgx4\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z4, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w8, z4),
+ svsub_za32_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w8, z18),
+ svsub_za32_vg1x4 (w8, z18))
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.s\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w8, z23),
+ svsub_za32_vg1x4 (w8, z23))
+
+/*
+** sub_w8_z28:
+** sub za\.s\[w8, 0, vgx4\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svuint32x4_t,
+ svsub_za32_u32_vg1x4 (w8, z28),
+ svsub_za32_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_f64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_f64_vg1x2.c
new file mode 100644
index 0000000..6f13649
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_f64_vg1x2.c
@@ -0,0 +1,126 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fsub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (0, z0),
+ svsub_za64_vg1x2 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** fsub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (w0, z0),
+ svsub_za64_vg1x2 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** fsub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (w7, z0),
+ svsub_za64_vg1x2 (w7, z0))
+
+/*
+** sub_w8_z0:
+** fsub za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (w8, z0),
+ svsub_za64_vg1x2 (w8, z0))
+
+/*
+** sub_w11_z0:
+** fsub za\.d\[w11, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (w11, z0),
+ svsub_za64_vg1x2 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** fsub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (w12, z0),
+ svsub_za64_vg1x2 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** fsub za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (w8 + 7, z0),
+ svsub_za64_vg1x2 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fsub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (w8 + 8, z0),
+ svsub_za64_vg1x2 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fsub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (w8 - 1, z0),
+ svsub_za64_vg1x2 (w8 - 1, z0))
+
+/*
+** sub_w8_z18:
+** fsub za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (w8, z18),
+ svsub_za64_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** fsub za\.d\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (w8, z23),
+ svsub_za64_vg1x2 (w8, z23))
+
+/*
+** sub_w8_z28:
+** fsub za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svfloat64x2_t,
+ svsub_za64_f64_vg1x2 (w8, z28),
+ svsub_za64_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_f64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_f64_vg1x4.c
new file mode 100644
index 0000000..032122d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_f64_vg1x4.c
@@ -0,0 +1,141 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#pragma GCC target "+sme-f64f64"
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** fsub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (0, z0),
+ svsub_za64_vg1x4 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** fsub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w0, z0),
+ svsub_za64_vg1x4 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** fsub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w7, z0),
+ svsub_za64_vg1x4 (w7, z0))
+
+/*
+** sub_w8_z0:
+** fsub za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w8, z0),
+ svsub_za64_vg1x4 (w8, z0))
+
+/*
+** sub_w11_z0:
+** fsub za\.d\[w11, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w11, z0),
+ svsub_za64_vg1x4 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** fsub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w12, z0),
+ svsub_za64_vg1x4 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** fsub za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w8 + 7, z0),
+ svsub_za64_vg1x4 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** fsub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w8 + 8, z0),
+ svsub_za64_vg1x4 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** fsub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w8 - 1, z0),
+ svsub_za64_vg1x4 (w8 - 1, z0))
+
+/*
+** sub_w8_z4:
+** fsub za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z4, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w8, z4),
+ svsub_za64_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fsub za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w8, z18),
+ svsub_za64_vg1x4 (w8, z18))
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** fsub za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w8, z23),
+ svsub_za64_vg1x4 (w8, z23))
+
+/*
+** sub_w8_z28:
+** fsub za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svfloat64x4_t,
+ svsub_za64_f64_vg1x4 (w8, z28),
+ svsub_za64_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_s64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_s64_vg1x2.c
new file mode 100644
index 0000000..7244274
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_s64_vg1x2.c
@@ -0,0 +1,124 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svint64x2_t,
+ svsub_za64_s64_vg1x2 (0, z0),
+ svsub_za64_vg1x2 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svint64x2_t,
+ svsub_za64_s64_vg1x2 (w0, z0),
+ svsub_za64_vg1x2 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svint64x2_t,
+ svsub_za64_s64_vg1x2 (w7, z0),
+ svsub_za64_vg1x2 (w7, z0))
+
+/*
+** sub_w8_z0:
+** sub za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svint64x2_t,
+ svsub_za64_s64_vg1x2 (w8, z0),
+ svsub_za64_vg1x2 (w8, z0))
+
+/*
+** sub_w11_z0:
+** sub za\.d\[w11, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svint64x2_t,
+ svsub_za64_s64_vg1x2 (w11, z0),
+ svsub_za64_vg1x2 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svint64x2_t,
+ svsub_za64_s64_vg1x2 (w12, z0),
+ svsub_za64_vg1x2 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** sub za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svint64x2_t,
+ svsub_za64_s64_vg1x2 (w8 + 7, z0),
+ svsub_za64_vg1x2 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svint64x2_t,
+ svsub_za64_s64_vg1x2 (w8 + 8, z0),
+ svsub_za64_vg1x2 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svint64x2_t,
+ svsub_za64_s64_vg1x2 (w8 - 1, z0),
+ svsub_za64_vg1x2 (w8 - 1, z0))
+
+/*
+** sub_w8_z18:
+** sub za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svint64x2_t,
+ svsub_za64_s64_vg1x2 (w8, z18),
+ svsub_za64_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.d\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svint64x2_t,
+ svsub_za64_s64_vg1x2 (w8, z23),
+ svsub_za64_vg1x2 (w8, z23))
+
+/*
+** sub_w8_z28:
+** sub za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svint64x2_t,
+ svsub_za64_s64_vg1x2 (w8, z28),
+ svsub_za64_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_s64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_s64_vg1x4.c
new file mode 100644
index 0000000..57971ee
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_s64_vg1x4.c
@@ -0,0 +1,139 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svint64x4_t,
+ svsub_za64_s64_vg1x4 (0, z0),
+ svsub_za64_vg1x4 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w0, z0),
+ svsub_za64_vg1x4 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w7, z0),
+ svsub_za64_vg1x4 (w7, z0))
+
+/*
+** sub_w8_z0:
+** sub za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w8, z0),
+ svsub_za64_vg1x4 (w8, z0))
+
+/*
+** sub_w11_z0:
+** sub za\.d\[w11, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w11, z0),
+ svsub_za64_vg1x4 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w12, z0),
+ svsub_za64_vg1x4 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** sub za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w8 + 7, z0),
+ svsub_za64_vg1x4 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w8 + 8, z0),
+ svsub_za64_vg1x4 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w8 - 1, z0),
+ svsub_za64_vg1x4 (w8 - 1, z0))
+
+/*
+** sub_w8_z4:
+** sub za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z4, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w8, z4),
+ svsub_za64_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w8, z18),
+ svsub_za64_vg1x4 (w8, z18))
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w8, z23),
+ svsub_za64_vg1x4 (w8, z23))
+
+/*
+** sub_w8_z28:
+** sub za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svint64x4_t,
+ svsub_za64_s64_vg1x4 (w8, z28),
+ svsub_za64_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_u64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_u64_vg1x2.c
new file mode 100644
index 0000000..968e252
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_u64_vg1x2.c
@@ -0,0 +1,124 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (0, z0),
+ svsub_za64_vg1x2 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (w0, z0),
+ svsub_za64_vg1x2 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (w7, z0),
+ svsub_za64_vg1x2 (w7, z0))
+
+/*
+** sub_w8_z0:
+** sub za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (w8, z0),
+ svsub_za64_vg1x2 (w8, z0))
+
+/*
+** sub_w11_z0:
+** sub za\.d\[w11, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (w11, z0),
+ svsub_za64_vg1x2 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (w12, z0),
+ svsub_za64_vg1x2 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** sub za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (w8 + 7, z0),
+ svsub_za64_vg1x2 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (w8 + 8, z0),
+ svsub_za64_vg1x2 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (w8 - 1, z0),
+ svsub_za64_vg1x2 (w8 - 1, z0))
+
+/*
+** sub_w8_z18:
+** sub za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (w8, z18),
+ svsub_za64_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.d\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (w8, z23),
+ svsub_za64_vg1x2 (w8, z23))
+
+/*
+** sub_w8_z28:
+** sub za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svuint64x2_t,
+ svsub_za64_u64_vg1x2 (w8, z28),
+ svsub_za64_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_u64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_u64_vg1x4.c
new file mode 100644
index 0000000..e9a6059
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sub_za64_u64_vg1x4.c
@@ -0,0 +1,139 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** sub_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_0_z0, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (0, z0),
+ svsub_za64_vg1x4 (0, z0))
+
+/*
+** sub_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w0_z0, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w0, z0),
+ svsub_za64_vg1x4 (w0, z0))
+
+/*
+** sub_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w7_z0, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w7, z0),
+ svsub_za64_vg1x4 (w7, z0))
+
+/*
+** sub_w8_z0:
+** sub za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z0, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w8, z0),
+ svsub_za64_vg1x4 (w8, z0))
+
+/*
+** sub_w11_z0:
+** sub za\.d\[w11, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w11_z0, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w11, z0),
+ svsub_za64_vg1x4 (w11, z0))
+
+
+/*
+** sub_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w12_z0, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w12, z0),
+ svsub_za64_vg1x4 (w12, z0))
+
+/*
+** sub_w8p7_z0:
+** sub za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p7_z0, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w8 + 7, z0),
+ svsub_za64_vg1x4 (w8 + 7, z0))
+
+/*
+** sub_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8p8_z0, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w8 + 8, z0),
+ svsub_za64_vg1x4 (w8 + 8, z0))
+
+/*
+** sub_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** sub za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8m1_z0, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w8 - 1, z0),
+ svsub_za64_vg1x4 (w8 - 1, z0))
+
+/*
+** sub_w8_z4:
+** sub za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z4, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w8, z4),
+ svsub_za64_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** sub_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z18, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w8, z18),
+ svsub_za64_vg1x4 (w8, z18))
+
+/*
+** sub_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sub za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (sub_w8_z23, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w8, z23),
+ svsub_za64_vg1x4 (w8, z23))
+
+/*
+** sub_w8_z28:
+** sub za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (sub_w8_z28, svuint64x4_t,
+ svsub_za64_u64_vg1x4 (w8, z28),
+ svsub_za64_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_lane_za32_s8_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_lane_za32_s8_vg1x2.c
new file mode 100644
index 0000000..0bf00bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_lane_za32_s8_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sudot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** sudot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_0_z0_z4_0, svint8x2_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x2 (0, z0, z4, 0),
+ svsudot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** sudot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** sudot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w0_z0_z7_1, svint8x2_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x2 (w0, z0, z7, 1),
+ svsudot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** sudot_lane_w8_z28_z4_2:
+** sudot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8_z28_z4_2, svint8x2_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x2 (w8, z28, z4, 2),
+ svsudot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** sudot_lane_w8p7_z0_z4_3:
+** sudot za\.s\[w8, 7, vgx2\], {z0\.b - z1\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8p7_z0_z4_3, svint8x2_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x2 (w8 + 7, z0, z4, 3),
+ svsudot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** sudot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** sudot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8p8_z0_z4_0, svint8x2_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x2 (w8 + 8, z0, z4, 0),
+ svsudot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** sudot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** sudot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w0m1_z0_z4_1, svint8x2_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x2 (w0 - 1, z0, z4, 1),
+ svsudot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** sudot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** sudot za\.s\[w8, 0, vgx2\], {z4\.b - z5\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (sudot_lane_w8_z4_z15_2, svint8x2_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x2 (w8, z4, z15, 2),
+ svsudot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** sudot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** sudot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8_z28_z16_3, svint8x2_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x2 (w8, z28, z16, 3),
+ svsudot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** sudot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** sudot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8_z17_z7_0, svint8x2_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x2 (w8, z17, z7, 0),
+ svsudot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** sudot_lane_w8_z22_z4_1:
+** sudot za\.s\[w8, 0, vgx2\], {z22\.b - z23\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8_z22_z4_1, svint8x2_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x2 (w8, z22, z4, 1),
+ svsudot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_lane_za32_s8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_lane_za32_s8_vg1x4.c
new file mode 100644
index 0000000..f633040
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_lane_za32_s8_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** sudot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** sudot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_0_z0_z4_0, svint8x4_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x4 (0, z0, z4, 0),
+ svsudot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** sudot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** sudot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w0_z0_z7_1, svint8x4_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x4 (w0, z0, z7, 1),
+ svsudot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** sudot_lane_w8_z28_z4_2:
+** sudot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8_z28_z4_2, svint8x4_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x4 (w8, z28, z4, 2),
+ svsudot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** sudot_lane_w8p7_z0_z4_3:
+** sudot za\.s\[w8, 7, vgx4\], {z0\.b - z3\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8p7_z0_z4_3, svint8x4_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x4 (w8 + 7, z0, z4, 3),
+ svsudot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** sudot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** sudot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8p8_z0_z4_0, svint8x4_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x4 (w8 + 8, z0, z4, 0),
+ svsudot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** sudot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** sudot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w0m1_z0_z4_1, svint8x4_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x4 (w0 - 1, z0, z4, 1),
+ svsudot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** sudot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** sudot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (sudot_lane_w8_z4_z15_2, svint8x4_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x4 (w8, z4, z15, 2),
+ svsudot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** sudot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** sudot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8_z28_z16_3, svint8x4_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x4 (w8, z28, z16, 3),
+ svsudot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** sudot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sudot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8_z17_z7_0, svint8x4_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x4 (w8, z17, z7, 0),
+ svsudot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** sudot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** sudot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (sudot_lane_w8_z22_z4_1, svint8x4_t, svuint8_t,
+ svsudot_lane_za32_s8_vg1x4 (w8, z22, z4, 1),
+ svsudot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_za32_s8_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_za32_s8_vg1x2.c
new file mode 100644
index 0000000..0077bac
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_za32_s8_vg1x2.c
@@ -0,0 +1,243 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z4:
+** mov (w8|w9|w10|w11), #?0
+** usdot za\.s\[\1, 0, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z4, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (0, z0, svreinterpret_u8 (z4)),
+ svsudot_za32_vg1x2 (0, z0, svreinterpret_u8 (z4)))
+
+/*
+** dot_w0_z0_z4:
+** mov (w8|w9|w10|w11), w0
+** usdot za\.s\[\1, 0, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z4, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w0, z0, svreinterpret_u8 (z4)),
+ svsudot_za32_vg1x2 (w0, z0, svreinterpret_u8 (z4)))
+
+/*
+** dot_w8_z0_z18:
+** usdot za\.s\[w8, 0, vgx2\], {z18\.b - z19\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w8, z0, svreinterpret_u8 (z18)),
+ svsudot_za32_vg1x2 (w8, z0, svreinterpret_u8 (z18)))
+
+/*
+** dot_w8_z4_z18:
+** usdot za\.s\[w8, 0, vgx2\], {z18\.b - z19\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z18, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w8, z4, svreinterpret_u8 (z18)),
+ svsudot_za32_vg1x2 (w8, z4, svreinterpret_u8 (z18)))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z23:
+** ...
+** usdot za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w8, z0, svreinterpret_u8 (z23)),
+ svsudot_za32_vg1x2 (w8, z0, svreinterpret_u8 (z23)))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** usdot za\.s\[w8, 0, vgx2\], {z0\.b - z1\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w8, z23, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w8, z23, svreinterpret_u8 (z0)))
+
+/*
+** dot_w8_z18_z28:
+** usdot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z28, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w8, z18, svreinterpret_u8 (z28)),
+ svsudot_za32_vg1x2 (w8, z18, svreinterpret_u8 (z28)))
+
+/*
+** dot_w8_z28_z4:
+** usdot za\.s\[w8, 0, vgx2\], {z4\.b - z5\.b}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z4, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w8, z28, svreinterpret_u8 (z4)),
+ svsudot_za32_vg1x2 (w8, z28, svreinterpret_u8 (z4)))
+
+/*
+** dot_w8p1_z4_z0:
+** usdot za\.s\[w8, 1, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w8 + 1, z4, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w8 + 1, z4, svreinterpret_u8 (z0)))
+
+/*
+** dot_w8p2_z4_z0:
+** usdot za\.s\[w8, 2, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w8 + 2, z4, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w8 + 2, z4, svreinterpret_u8 (z0)))
+
+/*
+** dot_w11p4_z4_z0:
+** usdot za\.s\[w11, 4, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w11 + 4, z4, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w11 + 4, z4, svreinterpret_u8 (z0)))
+
+/*
+** dot_w8p7_z4_z0:
+** usdot za\.s\[w8, 7, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w8 + 7, z4, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w8 + 7, z4, svreinterpret_u8 (z0)))
+
+/*
+** dot_w8p8_z0_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** usdot za\.s\[\1, 0, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z0_z4, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w8 + 8, z0, svreinterpret_u8 (z4)),
+ svsudot_za32_vg1x2 (w8 + 8, z0, svreinterpret_u8 (z4)))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** usdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svint8x2_t,
+ svsudot_za32_s8_vg1x2 (w8 - 1, z4, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w8 - 1, z4, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sudot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svint8x2_t, svint8_t,
+ svsudot_single_za32_s8_vg1x2 (0, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (0, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sudot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svint8x2_t, svint8_t,
+ svsudot_single_za32_s8_vg1x2 (w0, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w0, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8_z1_z0:
+** sudot za\.s\[w8, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svint8x2_t, svint8_t,
+ svsudot_single_za32_s8_vg1x2 (w8, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w8, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8p1_z1_z0:
+** sudot za\.s\[w8, 1, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svint8x2_t, svint8_t,
+ svsudot_single_za32_s8_vg1x2 (w8 + 1, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w8 + 1, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8p2_z20_z0:
+** sudot za\.s\[w8, 2, vgx2\], {z20\.b - z21\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p2_z20_z0, svint8x2_t, svint8_t,
+ svsudot_single_za32_s8_vg1x2 (w8 + 2, z20, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w8 + 2, z20, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w11p4_z27_z0:
+** sudot za\.s\[w11, 4, vgx2\], {z27\.b - z28\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w11p4_z27_z0, svint8x2_t, svint8_t,
+ svsudot_single_za32_s8_vg1x2 (w11 + 4, z27, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w11 + 4, z27, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8p7_z1_z0:
+** sudot za\.s\[w8, 7, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svint8x2_t, svint8_t,
+ svsudot_single_za32_s8_vg1x2 (w8 + 7, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w8 + 7, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sudot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svint8x2_t, svint8_t,
+ svsudot_single_za32_s8_vg1x2 (w8 + 8, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w8 + 8, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sudot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svint8x2_t, svint8_t,
+ svsudot_single_za32_s8_vg1x2 (w0 - 1, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x2 (w0 - 1, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sudot za\.s\[w8, 0, vgx2\], {z0\.b - z1\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svint8x2_t, svint8_t,
+ svsudot_single_za32_s8_vg1x2 (w8, z0, svreinterpret_u8 (z15)),
+ svsudot_za32_vg1x2 (w8, z0, svreinterpret_u8 (z15)))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sudot za\.s\[w8, 0, vgx2\], {z20\.b - z21\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svint8x2_t, svint8_t,
+ svsudot_single_za32_s8_vg1x2 (w8, z20, svreinterpret_u8 (z16)),
+ svsudot_za32_vg1x2 (w8, z20, svreinterpret_u8 (z16)))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_za32_s8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_za32_s8_vg1x4.c
new file mode 100644
index 0000000..788ea01
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/sudot_za32_s8_vg1x4.c
@@ -0,0 +1,254 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z4:
+** mov (w8|w9|w10|w11), #?0
+** usdot za\.s\[\1, 0, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z4, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (0, z0, svreinterpret_u8 (z4)),
+ svsudot_za32_vg1x4 (0, z0, svreinterpret_u8 (z4)))
+
+/*
+** dot_w0_z0_z4:
+** mov (w8|w9|w10|w11), w0
+** usdot za\.s\[\1, 0, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z4, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w0, z0, svreinterpret_u8 (z4)),
+ svsudot_za32_vg1x4 (w0, z0, svreinterpret_u8 (z4)))
+
+/*
+** dot_w8_z4_z0:
+** usdot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z0, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8, z4, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8, z4, svreinterpret_u8 (z0)))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z18:
+** ...
+** usdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8, z0, svreinterpret_u8 (z18)),
+ svsudot_za32_vg1x4 (w8, z0, svreinterpret_u8 (z18)))
+
+/*
+** dot_w8_z18_z0:
+** ...
+** usdot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z0, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8, z18, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8, z18, svreinterpret_u8 (z0)))
+
+/*
+** dot_w8_z0_z23:
+** ...
+** usdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8, z0, svreinterpret_u8 (z23)),
+ svsudot_za32_vg1x4 (w8, z0, svreinterpret_u8 (z23)))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** usdot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8, z23, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8, z23, svreinterpret_u8 (z0)))
+
+/*
+** dot_w8_z4_z28:
+** usdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z28, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8, z4, svreinterpret_u8 (z28)),
+ svsudot_za32_vg1x4 (w8, z4, svreinterpret_u8 (z28)))
+
+/*
+** dot_w8_z28_z0:
+** usdot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z0, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8, z28, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8, z28, svreinterpret_u8 (z0)))
+
+/*
+** dot_w8p1_z4_z0:
+** usdot za\.s\[w8, 1, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8 + 1, z4, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8 + 1, z4, svreinterpret_u8 (z0)))
+
+/*
+** dot_w8p2_z4_z0:
+** usdot za\.s\[w8, 2, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8 + 2, z4, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8 + 2, z4, svreinterpret_u8 (z0)))
+
+/*
+** dot_w11p4_z4_z0:
+** usdot za\.s\[w11, 4, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w11 + 4, z4, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w11 + 4, z4, svreinterpret_u8 (z0)))
+
+/*
+** dot_w8p7_z4_z0:
+** usdot za\.s\[w8, 7, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8 + 7, z4, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8 + 7, z4, svreinterpret_u8 (z0)))
+
+/*
+** dot_w8p8_z0_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** usdot za\.s\[\1, 0, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z0_z4, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8 + 8, z0, svreinterpret_u8 (z4)),
+ svsudot_za32_vg1x4 (w8 + 8, z0, svreinterpret_u8 (z4)))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** usdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svint8x4_t,
+ svsudot_za32_s8_vg1x4 (w8 - 1, z4, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8 - 1, z4, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** sudot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svint8x4_t, svint8_t,
+ svsudot_single_za32_s8_vg1x4 (0, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (0, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** sudot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svint8x4_t, svint8_t,
+ svsudot_single_za32_s8_vg1x4 (w0, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w0, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8_z1_z0:
+** sudot za\.s\[w8, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svint8x4_t, svint8_t,
+ svsudot_single_za32_s8_vg1x4 (w8, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8p1_z1_z0:
+** sudot za\.s\[w8, 1, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svint8x4_t, svint8_t,
+ svsudot_single_za32_s8_vg1x4 (w8 + 1, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8 + 1, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8p4_z20_z0:
+** sudot za\.s\[w8, 4, vgx4\], {z20\.b - z23\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p4_z20_z0, svint8x4_t, svint8_t,
+ svsudot_single_za32_s8_vg1x4 (w8 + 4, z20, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8 + 4, z20, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8p6_z27_z0:
+** sudot za\.s\[w8, 6, vgx4\], {z27\.b - z30\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p6_z27_z0, svint8x4_t, svint8_t,
+ svsudot_single_za32_s8_vg1x4 (w8 + 6, z27, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8 + 6, z27, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8p7_z1_z0:
+** sudot za\.s\[w8, 7, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svint8x4_t, svint8_t,
+ svsudot_single_za32_s8_vg1x4 (w8 + 7, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8 + 7, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** sudot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svint8x4_t, svint8_t,
+ svsudot_single_za32_s8_vg1x4 (w8 + 8, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w8 + 8, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** sudot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svint8x4_t, svint8_t,
+ svsudot_single_za32_s8_vg1x4 (w0 - 1, z1, svreinterpret_u8 (z0)),
+ svsudot_za32_vg1x4 (w0 - 1, z1, svreinterpret_u8 (z0)))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** sudot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svint8x4_t, svint8_t,
+ svsudot_single_za32_s8_vg1x4 (w8, z0, svreinterpret_u8 (z15)),
+ svsudot_za32_vg1x4 (w8, z0, svreinterpret_u8 (z15)))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** sudot za\.s\[w8, 0, vgx4\], {z20\.b - z23\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svint8x4_t, svint8_t,
+ svsudot_single_za32_s8_vg1x4 (w8, z20, svreinterpret_u8 (z16)),
+ svsudot_za32_vg1x4 (w8, z20, svreinterpret_u8 (z16)))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/suvdot_lane_za32_s8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/suvdot_lane_za32_s8_vg1x4.c
new file mode 100644
index 0000000..5ab1c3f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/suvdot_lane_za32_s8_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** suvdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** suvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (suvdot_lane_0_z0_z4_0, svint8x4_t, svuint8_t,
+ svsuvdot_lane_za32_s8_vg1x4 (0, z0, z4, 0),
+ svsuvdot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** suvdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** suvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (suvdot_lane_w0_z0_z7_1, svint8x4_t, svuint8_t,
+ svsuvdot_lane_za32_s8_vg1x4 (w0, z0, z7, 1),
+ svsuvdot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** suvdot_lane_w8_z28_z4_2:
+** suvdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (suvdot_lane_w8_z28_z4_2, svint8x4_t, svuint8_t,
+ svsuvdot_lane_za32_s8_vg1x4 (w8, z28, z4, 2),
+ svsuvdot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** suvdot_lane_w8p7_z0_z4_3:
+** suvdot za\.s\[w8, 7, vgx4\], {z0\.b - z3\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (suvdot_lane_w8p7_z0_z4_3, svint8x4_t, svuint8_t,
+ svsuvdot_lane_za32_s8_vg1x4 (w8 + 7, z0, z4, 3),
+ svsuvdot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** suvdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** suvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (suvdot_lane_w8p8_z0_z4_0, svint8x4_t, svuint8_t,
+ svsuvdot_lane_za32_s8_vg1x4 (w8 + 8, z0, z4, 0),
+ svsuvdot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** suvdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** suvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (suvdot_lane_w0m1_z0_z4_1, svint8x4_t, svuint8_t,
+ svsuvdot_lane_za32_s8_vg1x4 (w0 - 1, z0, z4, 1),
+ svsuvdot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** suvdot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** suvdot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (suvdot_lane_w8_z4_z15_2, svint8x4_t, svuint8_t,
+ svsuvdot_lane_za32_s8_vg1x4 (w8, z4, z15, 2),
+ svsuvdot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** suvdot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** suvdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (suvdot_lane_w8_z28_z16_3, svint8x4_t, svuint8_t,
+ svsuvdot_lane_za32_s8_vg1x4 (w8, z28, z16, 3),
+ svsuvdot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** suvdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** suvdot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (suvdot_lane_w8_z17_z7_0, svint8x4_t, svuint8_t,
+ svsuvdot_lane_za32_s8_vg1x4 (w8, z17, z7, 0),
+ svsuvdot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** suvdot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** suvdot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (suvdot_lane_w8_z22_z4_1, svint8x4_t, svuint8_t,
+ svsuvdot_lane_za32_s8_vg1x4 (w8, z22, z4, 1),
+ svsuvdot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/test_sme2_acle.h b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/test_sme2_acle.h
new file mode 100644
index 0000000..8b982ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/test_sme2_acle.h
@@ -0,0 +1,124 @@
+#ifndef TEST_SME2_ACLE_H
+#define TEST_SME2_ACLE_H 1
+
+#include "../../sme/acle-asm/test_sme_acle.h"
+
+#define TEST_ZA_X1(NAME, ZTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (int w0)) \
+ { \
+ register int w7 __asm ("w7"); \
+ register int w8 __asm ("w8"); \
+ register int w9 __asm ("w9"); \
+ register int w10 __asm ("w10"); \
+ register int w11 __asm ("w11"); \
+ register int w12 __asm ("w12"); \
+ register ZTYPE z0 __asm ("z0"); \
+ register ZTYPE z3 __asm ("z3"); \
+ register ZTYPE z7 __asm ("z7"); \
+ register ZTYPE z16 __asm ("z16"); \
+ register ZTYPE z23 __asm ("z23"); \
+ register ZTYPE z31 __asm ("z31"); \
+ __asm volatile ("" : "=r" (w7), "=r" (w8), "=r" (w9), \
+ "=r" (w10), "=r" (w11), "=r" (w12), \
+ "=w" (z0), "=w" (z3), "=w" (z7), \
+ "=w" (z16), "=w" (z23), "=w" (z31)); \
+ INVOKE (CODE1, CODE2); \
+ }
+
+#define TEST_ZA_XN(NAME, TTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (int w0)) \
+ { \
+ register int w7 __asm ("w7"); \
+ register int w8 __asm ("w8"); \
+ register int w11 __asm ("w11"); \
+ register int w12 __asm ("w12"); \
+ register int w15 __asm ("w15"); \
+ register int w16 __asm ("w16"); \
+ register TTYPE z0 __asm ("z0"); \
+ register TTYPE z4 __asm ("z4"); \
+ register TTYPE z18 __asm ("z18"); \
+ register TTYPE z23 __asm ("z23"); \
+ register TTYPE z28 __asm ("z28"); \
+ __asm volatile ("" : "=r" (w7), "=r" (w8), "=r" (w11), \
+ "=r" (w12), "=r" (w15), "=r" (w16), \
+ "=w" (z0), "=w" (z4), "=w" (z18), \
+ "=w" (z23), "=w" (z28)); \
+ INVOKE (CODE1, CODE2); \
+ }
+
+#define TEST_READ_ZA_XN(NAME, TTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (int w0)) \
+ { \
+ register int w7 __asm ("w7"); \
+ register int w8 __asm ("w8"); \
+ register int w11 __asm ("w11"); \
+ register int w12 __asm ("w12"); \
+ register int w15 __asm ("w15"); \
+ register int w16 __asm ("w16"); \
+ register TTYPE z0 __asm ("z0"); \
+ register TTYPE z4 __asm ("z4"); \
+ register TTYPE z18 __asm ("z18"); \
+ register TTYPE z23 __asm ("z23"); \
+ register TTYPE z28 __asm ("z28"); \
+ __asm volatile ("" : "=r" (w7), "=r" (w8), "=r" (w11), \
+ "=r" (w12), "=r" (w15), "=r" (w16)); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "w" (z0), "w" (z4), "w" (z18), \
+ "w" (z23), "w" (z28)); \
+ }
+
+#define TEST_ZA_SINGLE(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (int w0)) \
+ { \
+ register int w8 __asm ("w8"); \
+ register int w11 __asm ("w11"); \
+ register ZTYPE z0 __asm ("z0"); \
+ register TTYPE z1 __asm ("z1"); \
+ register ZTYPE z16 __asm ("z16"); \
+ register TTYPE z20 __asm ("z20"); \
+ register TTYPE z27 __asm ("z27"); \
+ __asm volatile ("" : "=r" (w8), "=r" (w11), "=w" (z0), \
+ "=w" (z1), "=w" (z16), "=w" (z20), \
+ "=w" (z27)); \
+ INVOKE (CODE1, CODE2); \
+ }
+
+#define TEST_ZA_SINGLE_Z15(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (int w0)) \
+ { \
+ register int w8 __asm ("w8"); \
+ register TTYPE z0 __asm ("z0"); \
+ register ZTYPE z15 __asm ("z15"); \
+ __asm volatile ("" : "=r" (w8), "=w" (z0), "=w" (z15)); \
+ INVOKE (CODE1, CODE2); \
+ }
+
+#define TEST_ZA_LANE(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (int w0)) \
+ { \
+ register int w8 __asm ("w8"); \
+ register int w11 __asm ("w11"); \
+ register TTYPE z0 __asm ("z0"); \
+ register ZTYPE z4 __asm ("z4"); \
+ register ZTYPE z7 __asm ("z7"); \
+ register ZTYPE z16 __asm ("z16"); \
+ register TTYPE z17 __asm ("z17"); \
+ register TTYPE z22 __asm ("z22"); \
+ register TTYPE z28 __asm ("z28"); \
+ __asm volatile ("" : "=r" (w8), "=r" (w11), "=w" (z0), \
+ "=w" (z4), "=w" (z7), "=w" (z16), \
+ "=w" (z17), "=w" (z22), "=w" (z28)); \
+ INVOKE (CODE1, CODE2); \
+ }
+
+#define TEST_ZA_LANE_Z15(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (int w0)) \
+ { \
+ register int w8 __asm ("w8"); \
+ register TTYPE z4 __asm ("z4"); \
+ register ZTYPE z15 __asm ("z15"); \
+ __asm volatile ("" : "=r" (w8), "=w" (z4), "=w" (z15)); \
+ INVOKE (CODE1, CODE2); \
+ }
+
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s16_x2.c
new file mode 100644
index 0000000..c835cda
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** sunpk {z0\.s - z1\.s}, z4\.h
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svint32x2_t, svint16_t, z0,
+ svunpk_s32_s16_x2 (z4),
+ svunpk_s32 (z4))
+
+/*
+** unpk_z4_z0:
+** sunpk {z4\.s - z5\.s}, z0\.h
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svint16_t, svint32x2_t, z4,
+ svunpk_s32_s16_x2 (z0),
+ svunpk_s32 (z0))
+
+/*
+** unpk_z18_z23:
+** sunpk {z18\.s - z19\.s}, z23\.h
+** ret
+*/
+TEST_DUAL_XN (unpk_z18_z23, svint32x2_t, svint16_t, z18,
+ svunpk_s32_s16_x2 (z23),
+ svunpk_s32 (z23))
+
+/*
+** unpk_z23_z28:
+** sunpk [^\n]+, z28\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svint16_t, svint32x2_t, z23,
+ svunpk_s32_s16_x2 (z28),
+ svunpk_s32 (z28))
+
+/*
+** unpk_z28_z4:
+** sunpk {z28\.s - z29\.s}, z4\.h
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svint32x2_t, svint16_t, z28,
+ svunpk_s32_s16_x2 (z4),
+ svunpk_s32 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s16_x4.c
new file mode 100644
index 0000000..eb195c4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s16_x4.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** sunpk {z0\.s - z3\.s}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svint32x4_t, svint16x2_t, z0,
+ svunpk_s32_s16_x4 (z4),
+ svunpk_s32 (z4))
+
+/*
+** unpk_z4_z0:
+** sunpk {z4\.s - z7\.s}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svint16x2_t, svint32x4_t, z4,
+ svunpk_s32_s16_x4 (z0),
+ svunpk_s32 (z0))
+
+/*
+** unpk_z4_z18:
+** sunpk {z4\.s - z7\.s}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z18, svint16x2_t, svint32x4_t, z4,
+ svunpk_s32_s16_x4 (z18),
+ svunpk_s32 (z18))
+
+/*
+** unpk_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sunpk {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z23, svint32x4_t, svint16x2_t, z28,
+ svunpk_s32_s16_x4 (z23),
+ svunpk_s32 (z23))
+
+/*
+** unpk_z23_z28:
+** sunpk [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svint16x2_t, svint32x4_t, z23,
+ svunpk_s32_s16_x4 (z28),
+ svunpk_s32 (z28))
+
+/*
+** unpk_z23_z18:
+** sunpk {z[^\n]+}, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z18, svint16x2_t, svint32x4_t, z23,
+ svunpk_s32_s16_x4 (z18),
+ svunpk_s32 (z18))
+
+/*
+** unpk_z28_z4:
+** sunpk {z28\.s - z31\.s}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svint32x4_t, svint16x2_t, z28,
+ svunpk_s32_s16_x4 (z4),
+ svunpk_s32 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s32_x2.c
new file mode 100644
index 0000000..9a5043b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** sunpk {z0\.d - z1\.d}, z4\.s
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svint64x2_t, svint32_t, z0,
+ svunpk_s64_s32_x2 (z4),
+ svunpk_s64 (z4))
+
+/*
+** unpk_z4_z0:
+** sunpk {z4\.d - z5\.d}, z0\.s
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svint32_t, svint64x2_t, z4,
+ svunpk_s64_s32_x2 (z0),
+ svunpk_s64 (z0))
+
+/*
+** unpk_z18_z23:
+** sunpk {z18\.d - z19\.d}, z23\.s
+** ret
+*/
+TEST_DUAL_XN (unpk_z18_z23, svint64x2_t, svint32_t, z18,
+ svunpk_s64_s32_x2 (z23),
+ svunpk_s64 (z23))
+
+/*
+** unpk_z23_z28:
+** sunpk [^\n]+, z28\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svint32_t, svint64x2_t, z23,
+ svunpk_s64_s32_x2 (z28),
+ svunpk_s64 (z28))
+
+/*
+** unpk_z28_z4:
+** sunpk {z28\.d - z29\.d}, z4\.s
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svint64x2_t, svint32_t, z28,
+ svunpk_s64_s32_x2 (z4),
+ svunpk_s64 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s32_x4.c
new file mode 100644
index 0000000..6e15de1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s32_x4.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** sunpk {z0\.d - z3\.d}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svint64x4_t, svint32x2_t, z0,
+ svunpk_s64_s32_x4 (z4),
+ svunpk_s64 (z4))
+
+/*
+** unpk_z4_z0:
+** sunpk {z4\.d - z7\.d}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svint32x2_t, svint64x4_t, z4,
+ svunpk_s64_s32_x4 (z0),
+ svunpk_s64 (z0))
+
+/*
+** unpk_z4_z18:
+** sunpk {z4\.d - z7\.d}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z18, svint32x2_t, svint64x4_t, z4,
+ svunpk_s64_s32_x4 (z18),
+ svunpk_s64 (z18))
+
+/*
+** unpk_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sunpk {z28\.d - z31\.d}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z23, svint64x4_t, svint32x2_t, z28,
+ svunpk_s64_s32_x4 (z23),
+ svunpk_s64 (z23))
+
+/*
+** unpk_z23_z28:
+** sunpk [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svint32x2_t, svint64x4_t, z23,
+ svunpk_s64_s32_x4 (z28),
+ svunpk_s64 (z28))
+
+/*
+** unpk_z23_z18:
+** sunpk {z[^\n]+}, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z18, svint32x2_t, svint64x4_t, z23,
+ svunpk_s64_s32_x4 (z18),
+ svunpk_s64 (z18))
+
+/*
+** unpk_z28_z4:
+** sunpk {z28\.d - z31\.d}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svint64x4_t, svint32x2_t, z28,
+ svunpk_s64_s32_x4 (z4),
+ svunpk_s64 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s8_x2.c
new file mode 100644
index 0000000..dc87f48
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s8_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** sunpk {z0\.h - z1\.h}, z4\.b
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svint16x2_t, svint8_t, z0,
+ svunpk_s16_s8_x2 (z4),
+ svunpk_s16 (z4))
+
+/*
+** unpk_z4_z0:
+** sunpk {z4\.h - z5\.h}, z0\.b
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svint8_t, svint16x2_t, z4,
+ svunpk_s16_s8_x2 (z0),
+ svunpk_s16 (z0))
+
+/*
+** unpk_z18_z23:
+** sunpk {z18\.h - z19\.h}, z23\.b
+** ret
+*/
+TEST_DUAL_XN (unpk_z18_z23, svint16x2_t, svint8_t, z18,
+ svunpk_s16_s8_x2 (z23),
+ svunpk_s16 (z23))
+
+/*
+** unpk_z23_z28:
+** sunpk [^\n]+, z28\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svint8_t, svint16x2_t, z23,
+ svunpk_s16_s8_x2 (z28),
+ svunpk_s16 (z28))
+
+/*
+** unpk_z28_z4:
+** sunpk {z28\.h - z29\.h}, z4\.b
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svint16x2_t, svint8_t, z28,
+ svunpk_s16_s8_x2 (z4),
+ svunpk_s16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s8_x4.c
new file mode 100644
index 0000000..3bace22
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_s8_x4.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** sunpk {z0\.h - z3\.h}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svint16x4_t, svint8x2_t, z0,
+ svunpk_s16_s8_x4 (z4),
+ svunpk_s16 (z4))
+
+/*
+** unpk_z4_z0:
+** sunpk {z4\.h - z7\.h}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svint8x2_t, svint16x4_t, z4,
+ svunpk_s16_s8_x4 (z0),
+ svunpk_s16 (z0))
+
+/*
+** unpk_z4_z18:
+** sunpk {z4\.h - z7\.h}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z18, svint8x2_t, svint16x4_t, z4,
+ svunpk_s16_s8_x4 (z18),
+ svunpk_s16 (z18))
+
+/*
+** unpk_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** sunpk {z28\.h - z31\.h}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z23, svint16x4_t, svint8x2_t, z28,
+ svunpk_s16_s8_x4 (z23),
+ svunpk_s16 (z23))
+
+/*
+** unpk_z23_z28:
+** sunpk [^\n]+, {z28\.b - z29\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svint8x2_t, svint16x4_t, z23,
+ svunpk_s16_s8_x4 (z28),
+ svunpk_s16 (z28))
+
+/*
+** unpk_z23_z18:
+** sunpk {z[^\n]+}, {z18\.b - z19\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z18, svint8x2_t, svint16x4_t, z23,
+ svunpk_s16_s8_x4 (z18),
+ svunpk_s16 (z18))
+
+/*
+** unpk_z28_z4:
+** sunpk {z28\.h - z31\.h}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svint16x4_t, svint8x2_t, z28,
+ svunpk_s16_s8_x4 (z4),
+ svunpk_s16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u16_x2.c
new file mode 100644
index 0000000..ff37b17
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u16_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** uunpk {z0\.s - z1\.s}, z4\.h
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svuint32x2_t, svuint16_t, z0,
+ svunpk_u32_u16_x2 (z4),
+ svunpk_u32 (z4))
+
+/*
+** unpk_z4_z0:
+** uunpk {z4\.s - z5\.s}, z0\.h
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svuint16_t, svuint32x2_t, z4,
+ svunpk_u32_u16_x2 (z0),
+ svunpk_u32 (z0))
+
+/*
+** unpk_z18_z23:
+** uunpk {z18\.s - z19\.s}, z23\.h
+** ret
+*/
+TEST_DUAL_XN (unpk_z18_z23, svuint32x2_t, svuint16_t, z18,
+ svunpk_u32_u16_x2 (z23),
+ svunpk_u32 (z23))
+
+/*
+** unpk_z23_z28:
+** uunpk [^\n]+, z28\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svuint16_t, svuint32x2_t, z23,
+ svunpk_u32_u16_x2 (z28),
+ svunpk_u32 (z28))
+
+/*
+** unpk_z28_z4:
+** uunpk {z28\.s - z29\.s}, z4\.h
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svuint32x2_t, svuint16_t, z28,
+ svunpk_u32_u16_x2 (z4),
+ svunpk_u32 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u16_x4.c
new file mode 100644
index 0000000..ced14af
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u16_x4.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** uunpk {z0\.s - z3\.s}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svuint32x4_t, svuint16x2_t, z0,
+ svunpk_u32_u16_x4 (z4),
+ svunpk_u32 (z4))
+
+/*
+** unpk_z4_z0:
+** uunpk {z4\.s - z7\.s}, {z0\.h - z1\.h}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svuint16x2_t, svuint32x4_t, z4,
+ svunpk_u32_u16_x4 (z0),
+ svunpk_u32 (z0))
+
+/*
+** unpk_z4_z18:
+** uunpk {z4\.s - z7\.s}, {z18\.h - z19\.h}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z18, svuint16x2_t, svuint32x4_t, z4,
+ svunpk_u32_u16_x4 (z18),
+ svunpk_u32 (z18))
+
+/*
+** unpk_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** uunpk {z28\.s - z31\.s}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z23, svuint32x4_t, svuint16x2_t, z28,
+ svunpk_u32_u16_x4 (z23),
+ svunpk_u32 (z23))
+
+/*
+** unpk_z23_z28:
+** uunpk [^\n]+, {z28\.h - z29\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svuint16x2_t, svuint32x4_t, z23,
+ svunpk_u32_u16_x4 (z28),
+ svunpk_u32 (z28))
+
+/*
+** unpk_z23_z18:
+** uunpk {z[^\n]+}, {z18\.h - z19\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z18, svuint16x2_t, svuint32x4_t, z23,
+ svunpk_u32_u16_x4 (z18),
+ svunpk_u32 (z18))
+
+/*
+** unpk_z28_z4:
+** uunpk {z28\.s - z31\.s}, {z4\.h - z5\.h}
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svuint32x4_t, svuint16x2_t, z28,
+ svunpk_u32_u16_x4 (z4),
+ svunpk_u32 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u32_x2.c
new file mode 100644
index 0000000..48ad1d9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u32_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** uunpk {z0\.d - z1\.d}, z4\.s
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svuint64x2_t, svuint32_t, z0,
+ svunpk_u64_u32_x2 (z4),
+ svunpk_u64 (z4))
+
+/*
+** unpk_z4_z0:
+** uunpk {z4\.d - z5\.d}, z0\.s
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svuint32_t, svuint64x2_t, z4,
+ svunpk_u64_u32_x2 (z0),
+ svunpk_u64 (z0))
+
+/*
+** unpk_z18_z23:
+** uunpk {z18\.d - z19\.d}, z23\.s
+** ret
+*/
+TEST_DUAL_XN (unpk_z18_z23, svuint64x2_t, svuint32_t, z18,
+ svunpk_u64_u32_x2 (z23),
+ svunpk_u64 (z23))
+
+/*
+** unpk_z23_z28:
+** uunpk [^\n]+, z28\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svuint32_t, svuint64x2_t, z23,
+ svunpk_u64_u32_x2 (z28),
+ svunpk_u64 (z28))
+
+/*
+** unpk_z28_z4:
+** uunpk {z28\.d - z29\.d}, z4\.s
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svuint64x2_t, svuint32_t, z28,
+ svunpk_u64_u32_x2 (z4),
+ svunpk_u64 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u32_x4.c
new file mode 100644
index 0000000..1f68172
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u32_x4.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** uunpk {z0\.d - z3\.d}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svuint64x4_t, svuint32x2_t, z0,
+ svunpk_u64_u32_x4 (z4),
+ svunpk_u64 (z4))
+
+/*
+** unpk_z4_z0:
+** uunpk {z4\.d - z7\.d}, {z0\.s - z1\.s}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svuint32x2_t, svuint64x4_t, z4,
+ svunpk_u64_u32_x4 (z0),
+ svunpk_u64 (z0))
+
+/*
+** unpk_z4_z18:
+** uunpk {z4\.d - z7\.d}, {z18\.s - z19\.s}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z18, svuint32x2_t, svuint64x4_t, z4,
+ svunpk_u64_u32_x4 (z18),
+ svunpk_u64 (z18))
+
+/*
+** unpk_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** uunpk {z28\.d - z31\.d}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z23, svuint64x4_t, svuint32x2_t, z28,
+ svunpk_u64_u32_x4 (z23),
+ svunpk_u64 (z23))
+
+/*
+** unpk_z23_z28:
+** uunpk [^\n]+, {z28\.s - z29\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svuint32x2_t, svuint64x4_t, z23,
+ svunpk_u64_u32_x4 (z28),
+ svunpk_u64 (z28))
+
+/*
+** unpk_z23_z18:
+** uunpk {z[^\n]+}, {z18\.s - z19\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z18, svuint32x2_t, svuint64x4_t, z23,
+ svunpk_u64_u32_x4 (z18),
+ svunpk_u64 (z18))
+
+/*
+** unpk_z28_z4:
+** uunpk {z28\.d - z31\.d}, {z4\.s - z5\.s}
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svuint64x4_t, svuint32x2_t, z28,
+ svunpk_u64_u32_x4 (z4),
+ svunpk_u64 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u8_x2.c
new file mode 100644
index 0000000..2e6dc47
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u8_x2.c
@@ -0,0 +1,50 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** uunpk {z0\.h - z1\.h}, z4\.b
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svuint16x2_t, svuint8_t, z0,
+ svunpk_u16_u8_x2 (z4),
+ svunpk_u16 (z4))
+
+/*
+** unpk_z4_z0:
+** uunpk {z4\.h - z5\.h}, z0\.b
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svuint8_t, svuint16x2_t, z4,
+ svunpk_u16_u8_x2 (z0),
+ svunpk_u16 (z0))
+
+/*
+** unpk_z18_z23:
+** uunpk {z18\.h - z19\.h}, z23\.b
+** ret
+*/
+TEST_DUAL_XN (unpk_z18_z23, svuint16x2_t, svuint8_t, z18,
+ svunpk_u16_u8_x2 (z23),
+ svunpk_u16 (z23))
+
+/*
+** unpk_z23_z28:
+** uunpk [^\n]+, z28\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svuint8_t, svuint16x2_t, z23,
+ svunpk_u16_u8_x2 (z28),
+ svunpk_u16 (z28))
+
+/*
+** unpk_z28_z4:
+** uunpk {z28\.h - z29\.h}, z4\.b
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svuint16x2_t, svuint8_t, z28,
+ svunpk_u16_u8_x2 (z4),
+ svunpk_u16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u8_x4.c
new file mode 100644
index 0000000..6aa9c3d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/unpk_u8_x4.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** unpk_z0_z4:
+** uunpk {z0\.h - z3\.h}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_DUAL_XN (unpk_z0_z4, svuint16x4_t, svuint8x2_t, z0,
+ svunpk_u16_u8_x4 (z4),
+ svunpk_u16 (z4))
+
+/*
+** unpk_z4_z0:
+** uunpk {z4\.h - z7\.h}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z0, svuint8x2_t, svuint16x4_t, z4,
+ svunpk_u16_u8_x4 (z0),
+ svunpk_u16 (z0))
+
+/*
+** unpk_z4_z18:
+** uunpk {z4\.h - z7\.h}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_DUAL_XN (unpk_z4_z18, svuint8x2_t, svuint16x4_t, z4,
+ svunpk_u16_u8_x4 (z18),
+ svunpk_u16 (z18))
+
+/*
+** unpk_z28_z23:
+** mov [^\n]+
+** mov [^\n]+
+** uunpk {z28\.h - z31\.h}, [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z23, svuint16x4_t, svuint8x2_t, z28,
+ svunpk_u16_u8_x4 (z23),
+ svunpk_u16 (z23))
+
+/*
+** unpk_z23_z28:
+** uunpk [^\n]+, {z28\.b - z29\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z28, svuint8x2_t, svuint16x4_t, z23,
+ svunpk_u16_u8_x4 (z28),
+ svunpk_u16 (z28))
+
+/*
+** unpk_z23_z18:
+** uunpk {z[^\n]+}, {z18\.b - z19\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_DUAL_XN (unpk_z23_z18, svuint8x2_t, svuint16x4_t, z23,
+ svunpk_u16_u8_x4 (z18),
+ svunpk_u16 (z18))
+
+/*
+** unpk_z28_z4:
+** uunpk {z28\.h - z31\.h}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_DUAL_XN (unpk_z28_z4, svuint16x4_t, svuint8x2_t, z28,
+ svunpk_u16_u8_x4 (z4),
+ svunpk_u16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_lane_za32_u8_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_lane_za32_u8_vg1x2.c
new file mode 100644
index 0000000..38bedd2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_lane_za32_u8_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** usdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** usdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_0_z0_z4_0, svuint8x2_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x2 (0, z0, z4, 0),
+ svusdot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** usdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** usdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w0_z0_z7_1, svuint8x2_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x2 (w0, z0, z7, 1),
+ svusdot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** usdot_lane_w8_z28_z4_2:
+** usdot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8_z28_z4_2, svuint8x2_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x2 (w8, z28, z4, 2),
+ svusdot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** usdot_lane_w8p7_z0_z4_3:
+** usdot za\.s\[w8, 7, vgx2\], {z0\.b - z1\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8p7_z0_z4_3, svuint8x2_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x2 (w8 + 7, z0, z4, 3),
+ svusdot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** usdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** usdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8p8_z0_z4_0, svuint8x2_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x2 (w8 + 8, z0, z4, 0),
+ svusdot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** usdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** usdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w0m1_z0_z4_1, svuint8x2_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x2 (w0 - 1, z0, z4, 1),
+ svusdot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** usdot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** usdot za\.s\[w8, 0, vgx2\], {z4\.b - z5\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (usdot_lane_w8_z4_z15_2, svuint8x2_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x2 (w8, z4, z15, 2),
+ svusdot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** usdot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** usdot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8_z28_z16_3, svuint8x2_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x2 (w8, z28, z16, 3),
+ svusdot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** usdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** usdot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8_z17_z7_0, svuint8x2_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x2 (w8, z17, z7, 0),
+ svusdot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** usdot_lane_w8_z22_z4_1:
+** usdot za\.s\[w8, 0, vgx2\], {z22\.b - z23\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8_z22_z4_1, svuint8x2_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x2 (w8, z22, z4, 1),
+ svusdot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_lane_za32_u8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_lane_za32_u8_vg1x4.c
new file mode 100644
index 0000000..b087bcc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_lane_za32_u8_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** usdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** usdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_0_z0_z4_0, svuint8x4_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x4 (0, z0, z4, 0),
+ svusdot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** usdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** usdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w0_z0_z7_1, svuint8x4_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x4 (w0, z0, z7, 1),
+ svusdot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** usdot_lane_w8_z28_z4_2:
+** usdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8_z28_z4_2, svuint8x4_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x4 (w8, z28, z4, 2),
+ svusdot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** usdot_lane_w8p7_z0_z4_3:
+** usdot za\.s\[w8, 7, vgx4\], {z0\.b - z3\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8p7_z0_z4_3, svuint8x4_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x4 (w8 + 7, z0, z4, 3),
+ svusdot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** usdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** usdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8p8_z0_z4_0, svuint8x4_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x4 (w8 + 8, z0, z4, 0),
+ svusdot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** usdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** usdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w0m1_z0_z4_1, svuint8x4_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x4 (w0 - 1, z0, z4, 1),
+ svusdot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** usdot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** usdot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (usdot_lane_w8_z4_z15_2, svuint8x4_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x4 (w8, z4, z15, 2),
+ svusdot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** usdot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** usdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8_z28_z16_3, svuint8x4_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x4 (w8, z28, z16, 3),
+ svusdot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** usdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** usdot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8_z17_z7_0, svuint8x4_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x4 (w8, z17, z7, 0),
+ svusdot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** usdot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** usdot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (usdot_lane_w8_z22_z4_1, svuint8x4_t, svint8_t,
+ svusdot_lane_za32_u8_vg1x4 (w8, z22, z4, 1),
+ svusdot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_za32_u8_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_za32_u8_vg1x2.c
new file mode 100644
index 0000000..14189ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_za32_u8_vg1x2.c
@@ -0,0 +1,243 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z4:
+** mov (w8|w9|w10|w11), #?0
+** usdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z4, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (0, z0, svreinterpret_s8 (z4)),
+ svusdot_za32_vg1x2 (0, z0, svreinterpret_s8 (z4)))
+
+/*
+** dot_w0_z0_z4:
+** mov (w8|w9|w10|w11), w0
+** usdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z4, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w0, z0, svreinterpret_s8 (z4)),
+ svusdot_za32_vg1x2 (w0, z0, svreinterpret_s8 (z4)))
+
+/*
+** dot_w8_z0_z18:
+** usdot za\.s\[w8, 0, vgx2\], {z0\.b - z1\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w8, z0, svreinterpret_s8 (z18)),
+ svusdot_za32_vg1x2 (w8, z0, svreinterpret_s8 (z18)))
+
+/*
+** dot_w8_z4_z18:
+** usdot za\.s\[w8, 0, vgx2\], {z4\.b - z5\.b}, {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z18, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w8, z4, svreinterpret_s8 (z18)),
+ svusdot_za32_vg1x2 (w8, z4, svreinterpret_s8 (z18)))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z23:
+** ...
+** usdot za\.s\[w8, 0, vgx2\], {z0\.b - z1\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w8, z0, svreinterpret_s8 (z23)),
+ svusdot_za32_vg1x2 (w8, z0, svreinterpret_s8 (z23)))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** usdot za\.s\[w8, 0, vgx2\], [^\n]+, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w8, z23, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w8, z23, svreinterpret_s8 (z0)))
+
+/*
+** dot_w8_z18_z28:
+** usdot za\.s\[w8, 0, vgx2\], {z18\.b - z19\.b}, {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z28, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w8, z18, svreinterpret_s8 (z28)),
+ svusdot_za32_vg1x2 (w8, z18, svreinterpret_s8 (z28)))
+
+/*
+** dot_w8_z28_z4:
+** usdot za\.s\[w8, 0, vgx2\], {z28\.b - z29\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z4, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w8, z28, svreinterpret_s8 (z4)),
+ svusdot_za32_vg1x2 (w8, z28, svreinterpret_s8 (z4)))
+
+/*
+** dot_w8p1_z4_z0:
+** usdot za\.s\[w8, 1, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w8 + 1, z4, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w8 + 1, z4, svreinterpret_s8 (z0)))
+
+/*
+** dot_w8p2_z4_z0:
+** usdot za\.s\[w8, 2, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w8 + 2, z4, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w8 + 2, z4, svreinterpret_s8 (z0)))
+
+/*
+** dot_w11p4_z4_z0:
+** usdot za\.s\[w11, 4, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w11 + 4, z4, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w11 + 4, z4, svreinterpret_s8 (z0)))
+
+/*
+** dot_w8p7_z4_z0:
+** usdot za\.s\[w8, 7, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w8 + 7, z4, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w8 + 7, z4, svreinterpret_s8 (z0)))
+
+/*
+** dot_w8p8_z0_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** usdot za\.s\[\1, 0, vgx2\], {z0\.b - z1\.b}, {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z0_z4, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w8 + 8, z0, svreinterpret_s8 (z4)),
+ svusdot_za32_vg1x2 (w8 + 8, z0, svreinterpret_s8 (z4)))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** usdot za\.s\[\1, 0, vgx2\], {z4\.b - z5\.b}, {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svuint8x2_t,
+ svusdot_za32_u8_vg1x2 (w8 - 1, z4, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w8 - 1, z4, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** usdot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svuint8x2_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x2 (0, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (0, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** usdot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svuint8x2_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x2 (w0, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w0, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8_z1_z0:
+** usdot za\.s\[w8, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svuint8x2_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x2 (w8, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w8, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8p1_z1_z0:
+** usdot za\.s\[w8, 1, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svuint8x2_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x2 (w8 + 1, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w8 + 1, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8p2_z20_z0:
+** usdot za\.s\[w8, 2, vgx2\], {z20\.b - z21\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p2_z20_z0, svuint8x2_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x2 (w8 + 2, z20, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w8 + 2, z20, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w11p4_z27_z0:
+** usdot za\.s\[w11, 4, vgx2\], {z27\.b - z28\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w11p4_z27_z0, svuint8x2_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x2 (w11 + 4, z27, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w11 + 4, z27, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8p7_z1_z0:
+** usdot za\.s\[w8, 7, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svuint8x2_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x2 (w8 + 7, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w8 + 7, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** usdot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svuint8x2_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x2 (w8 + 8, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w8 + 8, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** usdot za\.s\[\1, 0, vgx2\], {z1\.b - z2\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svuint8x2_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x2 (w0 - 1, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x2 (w0 - 1, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** usdot za\.s\[w8, 0, vgx2\], {z0\.b - z1\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svuint8x2_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x2 (w8, z0, svreinterpret_s8 (z15)),
+ svusdot_za32_vg1x2 (w8, z0, svreinterpret_s8 (z15)))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** usdot za\.s\[w8, 0, vgx2\], {z20\.b - z21\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svuint8x2_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x2 (w8, z20, svreinterpret_s8 (z16)),
+ svusdot_za32_vg1x2 (w8, z20, svreinterpret_s8 (z16)))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_za32_u8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_za32_u8_vg1x4.c
new file mode 100644
index 0000000..6e9ae03
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usdot_za32_u8_vg1x4.c
@@ -0,0 +1,254 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** dot_0_z0_z4:
+** mov (w8|w9|w10|w11), #?0
+** usdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_0_z0_z4, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (0, z0, svreinterpret_s8 (z4)),
+ svusdot_za32_vg1x4 (0, z0, svreinterpret_s8 (z4)))
+
+/*
+** dot_w0_z0_z4:
+** mov (w8|w9|w10|w11), w0
+** usdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w0_z0_z4, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w0, z0, svreinterpret_s8 (z4)),
+ svusdot_za32_vg1x4 (w0, z0, svreinterpret_s8 (z4)))
+
+/*
+** dot_w8_z4_z0:
+** usdot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z0, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8, z4, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8, z4, svreinterpret_s8 (z0)))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** dot_w8_z0_z18:
+** ...
+** usdot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z18, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8, z0, svreinterpret_s8 (z18)),
+ svusdot_za32_vg1x4 (w8, z0, svreinterpret_s8 (z18)))
+
+/*
+** dot_w8_z18_z0:
+** ...
+** usdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z18_z0, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8, z18, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8, z18, svreinterpret_s8 (z0)))
+
+/*
+** dot_w8_z0_z23:
+** ...
+** usdot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, [^\n]+
+** ret
+*/
+TEST_ZA_XN (dot_w8_z0_z23, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8, z0, svreinterpret_s8 (z23)),
+ svusdot_za32_vg1x4 (w8, z0, svreinterpret_s8 (z23)))
+
+/*
+** dot_w8_z23_z0:
+** ...
+** usdot za\.s\[w8, 0, vgx4\], [^\n]+, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z23_z0, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8, z23, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8, z23, svreinterpret_s8 (z0)))
+
+/*
+** dot_w8_z4_z28:
+** usdot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z4_z28, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8, z4, svreinterpret_s8 (z28)),
+ svusdot_za32_vg1x4 (w8, z4, svreinterpret_s8 (z28)))
+
+/*
+** dot_w8_z28_z0:
+** usdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8_z28_z0, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8, z28, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8, z28, svreinterpret_s8 (z0)))
+
+/*
+** dot_w8p1_z4_z0:
+** usdot za\.s\[w8, 1, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p1_z4_z0, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8 + 1, z4, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8 + 1, z4, svreinterpret_s8 (z0)))
+
+/*
+** dot_w8p2_z4_z0:
+** usdot za\.s\[w8, 2, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p2_z4_z0, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8 + 2, z4, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8 + 2, z4, svreinterpret_s8 (z0)))
+
+/*
+** dot_w11p4_z4_z0:
+** usdot za\.s\[w11, 4, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w11p4_z4_z0, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w11 + 4, z4, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w11 + 4, z4, svreinterpret_s8 (z0)))
+
+/*
+** dot_w8p7_z4_z0:
+** usdot za\.s\[w8, 7, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p7_z4_z0, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8 + 7, z4, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8 + 7, z4, svreinterpret_s8 (z0)))
+
+/*
+** dot_w8p8_z0_z4:
+** add (w8|w9|w10|w11), w8, #?8
+** usdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8p8_z0_z4, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8 + 8, z0, svreinterpret_s8 (z4)),
+ svusdot_za32_vg1x4 (w8 + 8, z0, svreinterpret_s8 (z4)))
+
+/*
+** dot_w8m1_z4_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** usdot za\.s\[\1, 0, vgx4\], {z4\.b - z7\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (dot_w8m1_z4_z0, svuint8x4_t,
+ svusdot_za32_u8_vg1x4 (w8 - 1, z4, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8 - 1, z4, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_0_z1_z0:
+** mov (w8|w9|w10|w11), #?0
+** usdot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_0_z1_z0, svuint8x4_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x4 (0, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (0, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w0_z1_z0:
+** mov (w8|w9|w10|w11), w0
+** usdot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0_z1_z0, svuint8x4_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x4 (w0, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w0, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8_z1_z0:
+** usdot za\.s\[w8, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z1_z0, svuint8x4_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x4 (w8, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8p1_z1_z0:
+** usdot za\.s\[w8, 1, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p1_z1_z0, svuint8x4_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x4 (w8 + 1, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8 + 1, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8p4_z20_z0:
+** usdot za\.s\[w8, 4, vgx4\], {z20\.b - z23\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p4_z20_z0, svuint8x4_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x4 (w8 + 4, z20, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8 + 4, z20, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8p6_z27_z0:
+** usdot za\.s\[w8, 6, vgx4\], {z27\.b - z30\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p6_z27_z0, svuint8x4_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x4 (w8 + 6, z27, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8 + 6, z27, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8p7_z1_z0:
+** usdot za\.s\[w8, 7, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p7_z1_z0, svuint8x4_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x4 (w8 + 7, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8 + 7, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8p8_z1_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** usdot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8p8_z1_z0, svuint8x4_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x4 (w8 + 8, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w8 + 8, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w0m1_z1_z0:
+** sub (w8|w9|w10|w11), w0, #?1
+** usdot za\.s\[\1, 0, vgx4\], {z1\.b - z4\.b}, z0\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w0m1_z1_z0, svuint8x4_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x4 (w0 - 1, z1, svreinterpret_s8 (z0)),
+ svusdot_za32_vg1x4 (w0 - 1, z1, svreinterpret_s8 (z0)))
+
+/*
+** dot_single_w8_z0_z15:
+** str d15, \[sp, #?-16\]!
+** usdot za\.s\[w8, 0, vgx4\], {z0\.b - z3\.b}, z15\.b
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_SINGLE_Z15 (dot_single_w8_z0_z15, svuint8x4_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x4 (w8, z0, svreinterpret_s8 (z15)),
+ svusdot_za32_vg1x4 (w8, z0, svreinterpret_s8 (z15)))
+
+/*
+** dot_single_w8_z20_z16:
+** mov (z[0-7]).d, z16.d
+** usdot za\.s\[w8, 0, vgx4\], {z20\.b - z23\.b}, \1\.b
+** ret
+*/
+TEST_ZA_SINGLE (dot_single_w8_z20_z16, svuint8x4_t, svuint8_t,
+ svusdot_single_za32_u8_vg1x4 (w8, z20, svreinterpret_s8 (z16)),
+ svusdot_za32_vg1x4 (w8, z20, svreinterpret_s8 (z16)))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usvdot_lane_za32_u8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usvdot_lane_za32_u8_vg1x4.c
new file mode 100644
index 0000000..62938dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/usvdot_lane_za32_u8_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** usvdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** usvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (usvdot_lane_0_z0_z4_0, svuint8x4_t, svint8_t,
+ svusvdot_lane_za32_u8_vg1x4 (0, z0, z4, 0),
+ svusvdot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** usvdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** usvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (usvdot_lane_w0_z0_z7_1, svuint8x4_t, svint8_t,
+ svusvdot_lane_za32_u8_vg1x4 (w0, z0, z7, 1),
+ svusvdot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** usvdot_lane_w8_z28_z4_2:
+** usvdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (usvdot_lane_w8_z28_z4_2, svuint8x4_t, svint8_t,
+ svusvdot_lane_za32_u8_vg1x4 (w8, z28, z4, 2),
+ svusvdot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** usvdot_lane_w8p7_z0_z4_3:
+** usvdot za\.s\[w8, 7, vgx4\], {z0\.b - z3\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (usvdot_lane_w8p7_z0_z4_3, svuint8x4_t, svint8_t,
+ svusvdot_lane_za32_u8_vg1x4 (w8 + 7, z0, z4, 3),
+ svusvdot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** usvdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** usvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (usvdot_lane_w8p8_z0_z4_0, svuint8x4_t, svint8_t,
+ svusvdot_lane_za32_u8_vg1x4 (w8 + 8, z0, z4, 0),
+ svusvdot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** usvdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** usvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (usvdot_lane_w0m1_z0_z4_1, svuint8x4_t, svint8_t,
+ svusvdot_lane_za32_u8_vg1x4 (w0 - 1, z0, z4, 1),
+ svusvdot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** usvdot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** usvdot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (usvdot_lane_w8_z4_z15_2, svuint8x4_t, svint8_t,
+ svusvdot_lane_za32_u8_vg1x4 (w8, z4, z15, 2),
+ svusvdot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** usvdot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** usvdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (usvdot_lane_w8_z28_z16_3, svuint8x4_t, svint8_t,
+ svusvdot_lane_za32_u8_vg1x4 (w8, z28, z16, 3),
+ svusvdot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** usvdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** usvdot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (usvdot_lane_w8_z17_z7_0, svuint8x4_t, svint8_t,
+ svusvdot_lane_za32_u8_vg1x4 (w8, z17, z7, 0),
+ svusvdot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** usvdot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** usvdot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (usvdot_lane_w8_z22_z4_1, svuint8x4_t, svint8_t,
+ svusvdot_lane_za32_u8_vg1x4 (w8, z22, z4, 1),
+ svusvdot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_bf16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_bf16_x2.c
new file mode 100644
index 0000000..17b952e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_bf16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.h - z1\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (uzp_z0_z0, svbfloat16x2_t, z0,
+ svuzp_bf16_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.h - z1\.h}, z4\.h, z5\.h
+** ret
+*/
+TEST_XN (uzp_z0_z4, svbfloat16x2_t, z0,
+ svuzp_bf16_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.h - z5\.h}, z18\.h, z19\.h
+** ret
+*/
+TEST_XN (uzp_z4_z18, svbfloat16x2_t, z4,
+ svuzp_bf16_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.h - z19\.h}, z23\.h, z24\.h
+** ret
+*/
+TEST_XN (uzp_z18_z23, svbfloat16x2_t, z18,
+ svuzp_bf16_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.h, z29\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svbfloat16x2_t, z23,
+ svuzp_bf16_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.h - z29\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (uzp_z28_z0, svbfloat16x2_t, z28,
+ svuzp_bf16_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.h - z29\.h}, z0\.h, z23\.h
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svbfloat16x2_t, z28,
+ svuzp_bf16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.h - z29\.h}, z5\.h, z19\.h
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svbfloat16x2_t, z28,
+ svuzp_bf16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_bf16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_bf16_x4.c
new file mode 100644
index 0000000..bbdb1df
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_bf16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svbfloat16x4_t, z0,
+ svuzp_bf16_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svbfloat16x4_t, z0,
+ svuzp_bf16_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.h - z7\.h}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svbfloat16x4_t, z4,
+ svuzp_bf16_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svbfloat16x4_t, z18,
+ svuzp_bf16_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svbfloat16x4_t, z23,
+ svuzp_bf16_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svbfloat16x4_t, z28,
+ svuzp_bf16_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f16_x2.c
new file mode 100644
index 0000000..a4361ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.h - z1\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (uzp_z0_z0, svfloat16x2_t, z0,
+ svuzp_f16_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.h - z1\.h}, z4\.h, z5\.h
+** ret
+*/
+TEST_XN (uzp_z0_z4, svfloat16x2_t, z0,
+ svuzp_f16_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.h - z5\.h}, z18\.h, z19\.h
+** ret
+*/
+TEST_XN (uzp_z4_z18, svfloat16x2_t, z4,
+ svuzp_f16_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.h - z19\.h}, z23\.h, z24\.h
+** ret
+*/
+TEST_XN (uzp_z18_z23, svfloat16x2_t, z18,
+ svuzp_f16_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.h, z29\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svfloat16x2_t, z23,
+ svuzp_f16_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.h - z29\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (uzp_z28_z0, svfloat16x2_t, z28,
+ svuzp_f16_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.h - z29\.h}, z0\.h, z23\.h
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svfloat16x2_t, z28,
+ svuzp_f16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.h - z29\.h}, z5\.h, z19\.h
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svfloat16x2_t, z28,
+ svuzp_f16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f16_x4.c
new file mode 100644
index 0000000..a7abeda
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svfloat16x4_t, z0,
+ svuzp_f16_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svfloat16x4_t, z0,
+ svuzp_f16_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.h - z7\.h}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svfloat16x4_t, z4,
+ svuzp_f16_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svfloat16x4_t, z18,
+ svuzp_f16_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svfloat16x4_t, z23,
+ svuzp_f16_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svfloat16x4_t, z28,
+ svuzp_f16_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f32_x2.c
new file mode 100644
index 0000000..dbc9165
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.s - z1\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (uzp_z0_z0, svfloat32x2_t, z0,
+ svuzp_f32_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.s - z1\.s}, z4\.s, z5\.s
+** ret
+*/
+TEST_XN (uzp_z0_z4, svfloat32x2_t, z0,
+ svuzp_f32_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.s - z5\.s}, z18\.s, z19\.s
+** ret
+*/
+TEST_XN (uzp_z4_z18, svfloat32x2_t, z4,
+ svuzp_f32_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.s - z19\.s}, z23\.s, z24\.s
+** ret
+*/
+TEST_XN (uzp_z18_z23, svfloat32x2_t, z18,
+ svuzp_f32_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.s, z29\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svfloat32x2_t, z23,
+ svuzp_f32_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.s - z29\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (uzp_z28_z0, svfloat32x2_t, z28,
+ svuzp_f32_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.s - z29\.s}, z0\.s, z23\.s
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svfloat32x2_t, z28,
+ svuzp_f32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.s - z29\.s}, z5\.s, z19\.s
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svfloat32x2_t, z28,
+ svuzp_f32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f32_x4.c
new file mode 100644
index 0000000..3c42d3c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svfloat32x4_t, z0,
+ svuzp_f32_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svfloat32x4_t, z0,
+ svuzp_f32_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svfloat32x4_t, z4,
+ svuzp_f32_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svfloat32x4_t, z18,
+ svuzp_f32_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svfloat32x4_t, z23,
+ svuzp_f32_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svfloat32x4_t, z28,
+ svuzp_f32_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f64_x2.c
new file mode 100644
index 0000000..c893d31
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.d - z1\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (uzp_z0_z0, svfloat64x2_t, z0,
+ svuzp_f64_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.d - z1\.d}, z4\.d, z5\.d
+** ret
+*/
+TEST_XN (uzp_z0_z4, svfloat64x2_t, z0,
+ svuzp_f64_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.d - z5\.d}, z18\.d, z19\.d
+** ret
+*/
+TEST_XN (uzp_z4_z18, svfloat64x2_t, z4,
+ svuzp_f64_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.d - z19\.d}, z23\.d, z24\.d
+** ret
+*/
+TEST_XN (uzp_z18_z23, svfloat64x2_t, z18,
+ svuzp_f64_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.d, z29\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svfloat64x2_t, z23,
+ svuzp_f64_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.d - z29\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (uzp_z28_z0, svfloat64x2_t, z28,
+ svuzp_f64_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.d - z29\.d}, z0\.d, z23\.d
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svfloat64x2_t, z28,
+ svuzp_f64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.d - z29\.d}, z5\.d, z19\.d
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svfloat64x2_t, z28,
+ svuzp_f64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f64_x4.c
new file mode 100644
index 0000000..076a9a8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_f64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svfloat64x4_t, z0,
+ svuzp_f64_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svfloat64x4_t, z0,
+ svuzp_f64_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.d - z7\.d}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svfloat64x4_t, z4,
+ svuzp_f64_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svfloat64x4_t, z18,
+ svuzp_f64_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svfloat64x4_t, z23,
+ svuzp_f64_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svfloat64x4_t, z28,
+ svuzp_f64_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s16_x2.c
new file mode 100644
index 0000000..54607d4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.h - z1\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (uzp_z0_z0, svint16x2_t, z0,
+ svuzp_s16_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.h - z1\.h}, z4\.h, z5\.h
+** ret
+*/
+TEST_XN (uzp_z0_z4, svint16x2_t, z0,
+ svuzp_s16_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.h - z5\.h}, z18\.h, z19\.h
+** ret
+*/
+TEST_XN (uzp_z4_z18, svint16x2_t, z4,
+ svuzp_s16_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.h - z19\.h}, z23\.h, z24\.h
+** ret
+*/
+TEST_XN (uzp_z18_z23, svint16x2_t, z18,
+ svuzp_s16_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.h, z29\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svint16x2_t, z23,
+ svuzp_s16_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.h - z29\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (uzp_z28_z0, svint16x2_t, z28,
+ svuzp_s16_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.h - z29\.h}, z0\.h, z23\.h
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svint16x2_t, z28,
+ svuzp_s16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.h - z29\.h}, z5\.h, z19\.h
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svint16x2_t, z28,
+ svuzp_s16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s16_x4.c
new file mode 100644
index 0000000..8c4af1e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svint16x4_t, z0,
+ svuzp_s16_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svint16x4_t, z0,
+ svuzp_s16_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.h - z7\.h}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svint16x4_t, z4,
+ svuzp_s16_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svint16x4_t, z18,
+ svuzp_s16_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svint16x4_t, z23,
+ svuzp_s16_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svint16x4_t, z28,
+ svuzp_s16_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s32_x2.c
new file mode 100644
index 0000000..9b1a81f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.s - z1\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (uzp_z0_z0, svint32x2_t, z0,
+ svuzp_s32_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.s - z1\.s}, z4\.s, z5\.s
+** ret
+*/
+TEST_XN (uzp_z0_z4, svint32x2_t, z0,
+ svuzp_s32_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.s - z5\.s}, z18\.s, z19\.s
+** ret
+*/
+TEST_XN (uzp_z4_z18, svint32x2_t, z4,
+ svuzp_s32_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.s - z19\.s}, z23\.s, z24\.s
+** ret
+*/
+TEST_XN (uzp_z18_z23, svint32x2_t, z18,
+ svuzp_s32_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.s, z29\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svint32x2_t, z23,
+ svuzp_s32_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.s - z29\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (uzp_z28_z0, svint32x2_t, z28,
+ svuzp_s32_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.s - z29\.s}, z0\.s, z23\.s
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svint32x2_t, z28,
+ svuzp_s32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.s - z29\.s}, z5\.s, z19\.s
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svint32x2_t, z28,
+ svuzp_s32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s32_x4.c
new file mode 100644
index 0000000..0f1ec0e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svint32x4_t, z0,
+ svuzp_s32_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svint32x4_t, z0,
+ svuzp_s32_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svint32x4_t, z4,
+ svuzp_s32_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svint32x4_t, z18,
+ svuzp_s32_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svint32x4_t, z23,
+ svuzp_s32_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svint32x4_t, z28,
+ svuzp_s32_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s64_x2.c
new file mode 100644
index 0000000..91527ce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.d - z1\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (uzp_z0_z0, svint64x2_t, z0,
+ svuzp_s64_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.d - z1\.d}, z4\.d, z5\.d
+** ret
+*/
+TEST_XN (uzp_z0_z4, svint64x2_t, z0,
+ svuzp_s64_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.d - z5\.d}, z18\.d, z19\.d
+** ret
+*/
+TEST_XN (uzp_z4_z18, svint64x2_t, z4,
+ svuzp_s64_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.d - z19\.d}, z23\.d, z24\.d
+** ret
+*/
+TEST_XN (uzp_z18_z23, svint64x2_t, z18,
+ svuzp_s64_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.d, z29\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svint64x2_t, z23,
+ svuzp_s64_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.d - z29\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (uzp_z28_z0, svint64x2_t, z28,
+ svuzp_s64_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.d - z29\.d}, z0\.d, z23\.d
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svint64x2_t, z28,
+ svuzp_s64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.d - z29\.d}, z5\.d, z19\.d
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svint64x2_t, z28,
+ svuzp_s64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s64_x4.c
new file mode 100644
index 0000000..b28ed72
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svint64x4_t, z0,
+ svuzp_s64_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svint64x4_t, z0,
+ svuzp_s64_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.d - z7\.d}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svint64x4_t, z4,
+ svuzp_s64_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svint64x4_t, z18,
+ svuzp_s64_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svint64x4_t, z23,
+ svuzp_s64_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svint64x4_t, z28,
+ svuzp_s64_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s8_x2.c
new file mode 100644
index 0000000..406d227
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s8_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.b - z1\.b}, z0\.b, z1\.b
+** ret
+*/
+TEST_XN (uzp_z0_z0, svint8x2_t, z0,
+ svuzp_s8_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.b - z1\.b}, z4\.b, z5\.b
+** ret
+*/
+TEST_XN (uzp_z0_z4, svint8x2_t, z0,
+ svuzp_s8_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.b - z5\.b}, z18\.b, z19\.b
+** ret
+*/
+TEST_XN (uzp_z4_z18, svint8x2_t, z4,
+ svuzp_s8_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.b - z19\.b}, z23\.b, z24\.b
+** ret
+*/
+TEST_XN (uzp_z18_z23, svint8x2_t, z18,
+ svuzp_s8_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.b, z29\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svint8x2_t, z23,
+ svuzp_s8_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.b - z29\.b}, z0\.b, z1\.b
+** ret
+*/
+TEST_XN (uzp_z28_z0, svint8x2_t, z28,
+ svuzp_s8_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.b - z29\.b}, z0\.b, z23\.b
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svint8x2_t, z28,
+ svuzp_s8_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.b - z29\.b}, z5\.b, z19\.b
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svint8x2_t, z28,
+ svuzp_s8_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s8_x4.c
new file mode 100644
index 0000000..d29bbd5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_s8_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svint8x4_t, z0,
+ svuzp_s8_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svint8x4_t, z0,
+ svuzp_s8_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.b - z7\.b}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svint8x4_t, z4,
+ svuzp_s8_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svint8x4_t, z18,
+ svuzp_s8_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svint8x4_t, z23,
+ svuzp_s8_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svint8x4_t, z28,
+ svuzp_s8_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u16_x2.c
new file mode 100644
index 0000000..9990a59
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.h - z1\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (uzp_z0_z0, svuint16x2_t, z0,
+ svuzp_u16_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.h - z1\.h}, z4\.h, z5\.h
+** ret
+*/
+TEST_XN (uzp_z0_z4, svuint16x2_t, z0,
+ svuzp_u16_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.h - z5\.h}, z18\.h, z19\.h
+** ret
+*/
+TEST_XN (uzp_z4_z18, svuint16x2_t, z4,
+ svuzp_u16_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.h - z19\.h}, z23\.h, z24\.h
+** ret
+*/
+TEST_XN (uzp_z18_z23, svuint16x2_t, z18,
+ svuzp_u16_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.h, z29\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svuint16x2_t, z23,
+ svuzp_u16_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.h - z29\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (uzp_z28_z0, svuint16x2_t, z28,
+ svuzp_u16_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.h - z29\.h}, z0\.h, z23\.h
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svuint16x2_t, z28,
+ svuzp_u16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.h - z29\.h}, z5\.h, z19\.h
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svuint16x2_t, z28,
+ svuzp_u16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u16_x4.c
new file mode 100644
index 0000000..5e72ba8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svuint16x4_t, z0,
+ svuzp_u16_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svuint16x4_t, z0,
+ svuzp_u16_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.h - z7\.h}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svuint16x4_t, z4,
+ svuzp_u16_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svuint16x4_t, z18,
+ svuzp_u16_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svuint16x4_t, z23,
+ svuzp_u16_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svuint16x4_t, z28,
+ svuzp_u16_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u32_x2.c
new file mode 100644
index 0000000..4bdcaa3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.s - z1\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (uzp_z0_z0, svuint32x2_t, z0,
+ svuzp_u32_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.s - z1\.s}, z4\.s, z5\.s
+** ret
+*/
+TEST_XN (uzp_z0_z4, svuint32x2_t, z0,
+ svuzp_u32_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.s - z5\.s}, z18\.s, z19\.s
+** ret
+*/
+TEST_XN (uzp_z4_z18, svuint32x2_t, z4,
+ svuzp_u32_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.s - z19\.s}, z23\.s, z24\.s
+** ret
+*/
+TEST_XN (uzp_z18_z23, svuint32x2_t, z18,
+ svuzp_u32_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.s, z29\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svuint32x2_t, z23,
+ svuzp_u32_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.s - z29\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (uzp_z28_z0, svuint32x2_t, z28,
+ svuzp_u32_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.s - z29\.s}, z0\.s, z23\.s
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svuint32x2_t, z28,
+ svuzp_u32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.s - z29\.s}, z5\.s, z19\.s
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svuint32x2_t, z28,
+ svuzp_u32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u32_x4.c
new file mode 100644
index 0000000..ede11a9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svuint32x4_t, z0,
+ svuzp_u32_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svuint32x4_t, z0,
+ svuzp_u32_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svuint32x4_t, z4,
+ svuzp_u32_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svuint32x4_t, z18,
+ svuzp_u32_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svuint32x4_t, z23,
+ svuzp_u32_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svuint32x4_t, z28,
+ svuzp_u32_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u64_x2.c
new file mode 100644
index 0000000..95b69d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.d - z1\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (uzp_z0_z0, svuint64x2_t, z0,
+ svuzp_u64_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.d - z1\.d}, z4\.d, z5\.d
+** ret
+*/
+TEST_XN (uzp_z0_z4, svuint64x2_t, z0,
+ svuzp_u64_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.d - z5\.d}, z18\.d, z19\.d
+** ret
+*/
+TEST_XN (uzp_z4_z18, svuint64x2_t, z4,
+ svuzp_u64_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.d - z19\.d}, z23\.d, z24\.d
+** ret
+*/
+TEST_XN (uzp_z18_z23, svuint64x2_t, z18,
+ svuzp_u64_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.d, z29\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svuint64x2_t, z23,
+ svuzp_u64_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.d - z29\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (uzp_z28_z0, svuint64x2_t, z28,
+ svuzp_u64_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.d - z29\.d}, z0\.d, z23\.d
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svuint64x2_t, z28,
+ svuzp_u64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.d - z29\.d}, z5\.d, z19\.d
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svuint64x2_t, z28,
+ svuzp_u64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u64_x4.c
new file mode 100644
index 0000000..a5a7234
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svuint64x4_t, z0,
+ svuzp_u64_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svuint64x4_t, z0,
+ svuzp_u64_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.d - z7\.d}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svuint64x4_t, z4,
+ svuzp_u64_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svuint64x4_t, z18,
+ svuzp_u64_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svuint64x4_t, z23,
+ svuzp_u64_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svuint64x4_t, z28,
+ svuzp_u64_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u8_x2.c
new file mode 100644
index 0000000..5200292
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u8_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.b - z1\.b}, z0\.b, z1\.b
+** ret
+*/
+TEST_XN (uzp_z0_z0, svuint8x2_t, z0,
+ svuzp_u8_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.b - z1\.b}, z4\.b, z5\.b
+** ret
+*/
+TEST_XN (uzp_z0_z4, svuint8x2_t, z0,
+ svuzp_u8_x2 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** uzp {z4\.b - z5\.b}, z18\.b, z19\.b
+** ret
+*/
+TEST_XN (uzp_z4_z18, svuint8x2_t, z4,
+ svuzp_u8_x2 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** uzp {z18\.b - z19\.b}, z23\.b, z24\.b
+** ret
+*/
+TEST_XN (uzp_z18_z23, svuint8x2_t, z18,
+ svuzp_u8_x2 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, z28\.b, z29\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svuint8x2_t, z23,
+ svuzp_u8_x2 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.b - z29\.b}, z0\.b, z1\.b
+** ret
+*/
+TEST_XN (uzp_z28_z0, svuint8x2_t, z28,
+ svuzp_u8_x2 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z28_z0_z23:
+** uzp {z28\.b - z29\.b}, z0\.b, z23\.b
+** ret
+*/
+TEST_XN (uzp_z28_z0_z23, svuint8x2_t, z28,
+ svuzp_u8_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzp (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzp_z28_z5_z19:
+** uzp {z28\.b - z29\.b}, z5\.b, z19\.b
+** ret
+*/
+TEST_XN (uzp_z28_z5_z19, svuint8x2_t, z28,
+ svuzp_u8_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzp (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u8_x4.c
new file mode 100644
index 0000000..59240fa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzp_u8_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzp_z0_z0:
+** uzp {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (uzp_z0_z0, svuint8x4_t, z0,
+ svuzp_u8_x4 (z0),
+ svuzp (z0))
+
+/*
+** uzp_z0_z4:
+** uzp {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (uzp_z0_z4, svuint8x4_t, z0,
+ svuzp_u8_x4 (z4),
+ svuzp (z4))
+
+/*
+** uzp_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.b - z7\.b}, [^\n]+
+** ret
+*/
+TEST_XN (uzp_z4_z18, svuint8x4_t, z4,
+ svuzp_u8_x4 (z18),
+ svuzp (z18))
+
+/*
+** uzp_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z18_z23, svuint8x4_t, z18,
+ svuzp_u8_x4 (z23),
+ svuzp (z23))
+
+/*
+** uzp_z23_z28:
+** uzp [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzp_z23_z28, svuint8x4_t, z23,
+ svuzp_u8_x4 (z28),
+ svuzp (z28))
+
+/*
+** uzp_z28_z0:
+** uzp {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (uzp_z28_z0, svuint8x4_t, z28,
+ svuzp_u8_x4 (z0),
+ svuzp (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_bf16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_bf16_x2.c
new file mode 100644
index 0000000..6d9ae62
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_bf16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svbfloat16x2_t, z0,
+ svuzpq_bf16_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svbfloat16x2_t, z0,
+ svuzpq_bf16_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svbfloat16x2_t, z4,
+ svuzpq_bf16_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svbfloat16x2_t, z18,
+ svuzpq_bf16_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svbfloat16x2_t, z23,
+ svuzpq_bf16_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svbfloat16x2_t, z28,
+ svuzpq_bf16_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svbfloat16x2_t, z28,
+ svuzpq_bf16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svbfloat16x2_t, z28,
+ svuzpq_bf16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_bf16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_bf16_x4.c
new file mode 100644
index 0000000..5a3c32f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_bf16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svbfloat16x4_t, z0,
+ svuzpq_bf16_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svbfloat16x4_t, z0,
+ svuzpq_bf16_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svbfloat16x4_t, z4,
+ svuzpq_bf16_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svbfloat16x4_t, z18,
+ svuzpq_bf16_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svbfloat16x4_t, z23,
+ svuzpq_bf16_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svbfloat16x4_t, z28,
+ svuzpq_bf16_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f16_x2.c
new file mode 100644
index 0000000..e8add86
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svfloat16x2_t, z0,
+ svuzpq_f16_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svfloat16x2_t, z0,
+ svuzpq_f16_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svfloat16x2_t, z4,
+ svuzpq_f16_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svfloat16x2_t, z18,
+ svuzpq_f16_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svfloat16x2_t, z23,
+ svuzpq_f16_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svfloat16x2_t, z28,
+ svuzpq_f16_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svfloat16x2_t, z28,
+ svuzpq_f16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svfloat16x2_t, z28,
+ svuzpq_f16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f16_x4.c
new file mode 100644
index 0000000..d01f1e0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svfloat16x4_t, z0,
+ svuzpq_f16_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svfloat16x4_t, z0,
+ svuzpq_f16_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svfloat16x4_t, z4,
+ svuzpq_f16_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svfloat16x4_t, z18,
+ svuzpq_f16_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svfloat16x4_t, z23,
+ svuzpq_f16_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svfloat16x4_t, z28,
+ svuzpq_f16_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f32_x2.c
new file mode 100644
index 0000000..d4487f4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svfloat32x2_t, z0,
+ svuzpq_f32_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svfloat32x2_t, z0,
+ svuzpq_f32_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svfloat32x2_t, z4,
+ svuzpq_f32_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svfloat32x2_t, z18,
+ svuzpq_f32_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svfloat32x2_t, z23,
+ svuzpq_f32_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svfloat32x2_t, z28,
+ svuzpq_f32_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svfloat32x2_t, z28,
+ svuzpq_f32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svfloat32x2_t, z28,
+ svuzpq_f32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f32_x4.c
new file mode 100644
index 0000000..998294e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svfloat32x4_t, z0,
+ svuzpq_f32_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svfloat32x4_t, z0,
+ svuzpq_f32_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svfloat32x4_t, z4,
+ svuzpq_f32_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svfloat32x4_t, z18,
+ svuzpq_f32_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svfloat32x4_t, z23,
+ svuzpq_f32_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svfloat32x4_t, z28,
+ svuzpq_f32_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f64_x2.c
new file mode 100644
index 0000000..8f7bc9f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svfloat64x2_t, z0,
+ svuzpq_f64_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svfloat64x2_t, z0,
+ svuzpq_f64_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svfloat64x2_t, z4,
+ svuzpq_f64_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svfloat64x2_t, z18,
+ svuzpq_f64_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svfloat64x2_t, z23,
+ svuzpq_f64_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svfloat64x2_t, z28,
+ svuzpq_f64_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svfloat64x2_t, z28,
+ svuzpq_f64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svfloat64x2_t, z28,
+ svuzpq_f64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f64_x4.c
new file mode 100644
index 0000000..dc7b7bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_f64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svfloat64x4_t, z0,
+ svuzpq_f64_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svfloat64x4_t, z0,
+ svuzpq_f64_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svfloat64x4_t, z4,
+ svuzpq_f64_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svfloat64x4_t, z18,
+ svuzpq_f64_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svfloat64x4_t, z23,
+ svuzpq_f64_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svfloat64x4_t, z28,
+ svuzpq_f64_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s16_x2.c
new file mode 100644
index 0000000..2cfe7af
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svint16x2_t, z0,
+ svuzpq_s16_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svint16x2_t, z0,
+ svuzpq_s16_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svint16x2_t, z4,
+ svuzpq_s16_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svint16x2_t, z18,
+ svuzpq_s16_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svint16x2_t, z23,
+ svuzpq_s16_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svint16x2_t, z28,
+ svuzpq_s16_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svint16x2_t, z28,
+ svuzpq_s16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svint16x2_t, z28,
+ svuzpq_s16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s16_x4.c
new file mode 100644
index 0000000..0cedde8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svint16x4_t, z0,
+ svuzpq_s16_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svint16x4_t, z0,
+ svuzpq_s16_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svint16x4_t, z4,
+ svuzpq_s16_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svint16x4_t, z18,
+ svuzpq_s16_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svint16x4_t, z23,
+ svuzpq_s16_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svint16x4_t, z28,
+ svuzpq_s16_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s32_x2.c
new file mode 100644
index 0000000..bd583bb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svint32x2_t, z0,
+ svuzpq_s32_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svint32x2_t, z0,
+ svuzpq_s32_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svint32x2_t, z4,
+ svuzpq_s32_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svint32x2_t, z18,
+ svuzpq_s32_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svint32x2_t, z23,
+ svuzpq_s32_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svint32x2_t, z28,
+ svuzpq_s32_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svint32x2_t, z28,
+ svuzpq_s32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svint32x2_t, z28,
+ svuzpq_s32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s32_x4.c
new file mode 100644
index 0000000..787ffed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svint32x4_t, z0,
+ svuzpq_s32_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svint32x4_t, z0,
+ svuzpq_s32_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svint32x4_t, z4,
+ svuzpq_s32_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svint32x4_t, z18,
+ svuzpq_s32_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svint32x4_t, z23,
+ svuzpq_s32_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svint32x4_t, z28,
+ svuzpq_s32_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s64_x2.c
new file mode 100644
index 0000000..4b2aa57
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svint64x2_t, z0,
+ svuzpq_s64_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svint64x2_t, z0,
+ svuzpq_s64_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svint64x2_t, z4,
+ svuzpq_s64_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svint64x2_t, z18,
+ svuzpq_s64_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svint64x2_t, z23,
+ svuzpq_s64_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svint64x2_t, z28,
+ svuzpq_s64_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svint64x2_t, z28,
+ svuzpq_s64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svint64x2_t, z28,
+ svuzpq_s64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s64_x4.c
new file mode 100644
index 0000000..838697a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svint64x4_t, z0,
+ svuzpq_s64_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svint64x4_t, z0,
+ svuzpq_s64_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svint64x4_t, z4,
+ svuzpq_s64_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svint64x4_t, z18,
+ svuzpq_s64_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svint64x4_t, z23,
+ svuzpq_s64_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svint64x4_t, z28,
+ svuzpq_s64_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s8_x2.c
new file mode 100644
index 0000000..fe50987
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s8_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svint8x2_t, z0,
+ svuzpq_s8_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svint8x2_t, z0,
+ svuzpq_s8_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svint8x2_t, z4,
+ svuzpq_s8_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svint8x2_t, z18,
+ svuzpq_s8_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svint8x2_t, z23,
+ svuzpq_s8_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svint8x2_t, z28,
+ svuzpq_s8_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svint8x2_t, z28,
+ svuzpq_s8_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svint8x2_t, z28,
+ svuzpq_s8_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s8_x4.c
new file mode 100644
index 0000000..592cdc1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_s8_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svint8x4_t, z0,
+ svuzpq_s8_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svint8x4_t, z0,
+ svuzpq_s8_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svint8x4_t, z4,
+ svuzpq_s8_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svint8x4_t, z18,
+ svuzpq_s8_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svint8x4_t, z23,
+ svuzpq_s8_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svint8x4_t, z28,
+ svuzpq_s8_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u16_x2.c
new file mode 100644
index 0000000..69a7aa6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svuint16x2_t, z0,
+ svuzpq_u16_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svuint16x2_t, z0,
+ svuzpq_u16_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svuint16x2_t, z4,
+ svuzpq_u16_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svuint16x2_t, z18,
+ svuzpq_u16_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svuint16x2_t, z23,
+ svuzpq_u16_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svuint16x2_t, z28,
+ svuzpq_u16_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svuint16x2_t, z28,
+ svuzpq_u16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svuint16x2_t, z28,
+ svuzpq_u16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u16_x4.c
new file mode 100644
index 0000000..e448d62
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svuint16x4_t, z0,
+ svuzpq_u16_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svuint16x4_t, z0,
+ svuzpq_u16_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svuint16x4_t, z4,
+ svuzpq_u16_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svuint16x4_t, z18,
+ svuzpq_u16_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svuint16x4_t, z23,
+ svuzpq_u16_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svuint16x4_t, z28,
+ svuzpq_u16_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u32_x2.c
new file mode 100644
index 0000000..94a89c8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svuint32x2_t, z0,
+ svuzpq_u32_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svuint32x2_t, z0,
+ svuzpq_u32_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svuint32x2_t, z4,
+ svuzpq_u32_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svuint32x2_t, z18,
+ svuzpq_u32_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svuint32x2_t, z23,
+ svuzpq_u32_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svuint32x2_t, z28,
+ svuzpq_u32_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svuint32x2_t, z28,
+ svuzpq_u32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svuint32x2_t, z28,
+ svuzpq_u32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u32_x4.c
new file mode 100644
index 0000000..efb71c7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svuint32x4_t, z0,
+ svuzpq_u32_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svuint32x4_t, z0,
+ svuzpq_u32_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svuint32x4_t, z4,
+ svuzpq_u32_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svuint32x4_t, z18,
+ svuzpq_u32_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svuint32x4_t, z23,
+ svuzpq_u32_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svuint32x4_t, z28,
+ svuzpq_u32_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u64_x2.c
new file mode 100644
index 0000000..779906c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svuint64x2_t, z0,
+ svuzpq_u64_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svuint64x2_t, z0,
+ svuzpq_u64_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svuint64x2_t, z4,
+ svuzpq_u64_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svuint64x2_t, z18,
+ svuzpq_u64_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svuint64x2_t, z23,
+ svuzpq_u64_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svuint64x2_t, z28,
+ svuzpq_u64_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svuint64x2_t, z28,
+ svuzpq_u64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svuint64x2_t, z28,
+ svuzpq_u64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u64_x4.c
new file mode 100644
index 0000000..039f42e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svuint64x4_t, z0,
+ svuzpq_u64_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svuint64x4_t, z0,
+ svuzpq_u64_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svuint64x4_t, z4,
+ svuzpq_u64_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svuint64x4_t, z18,
+ svuzpq_u64_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svuint64x4_t, z23,
+ svuzpq_u64_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svuint64x4_t, z28,
+ svuzpq_u64_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u8_x2.c
new file mode 100644
index 0000000..04fe9e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u8_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svuint8x2_t, z0,
+ svuzpq_u8_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svuint8x2_t, z0,
+ svuzpq_u8_x2 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** uzp {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svuint8x2_t, z4,
+ svuzpq_u8_x2 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** uzp {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svuint8x2_t, z18,
+ svuzpq_u8_x2 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svuint8x2_t, z23,
+ svuzpq_u8_x2 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svuint8x2_t, z28,
+ svuzpq_u8_x2 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z28_z0_z23:
+** uzp {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z0_z23, svuint8x2_t, z28,
+ svuzpq_u8_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svuzpq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** uzpq_z28_z5_z19:
+** uzp {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (uzpq_z28_z5_z19, svuint8x2_t, z28,
+ svuzpq_u8_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svuzpq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u8_x4.c
new file mode 100644
index 0000000..133d95a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/uzpq_u8_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** uzpq_z0_z0:
+** uzp {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z0, svuint8x4_t, z0,
+ svuzpq_u8_x4 (z0),
+ svuzpq (z0))
+
+/*
+** uzpq_z0_z4:
+** uzp {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (uzpq_z0_z4, svuint8x4_t, z0,
+ svuzpq_u8_x4 (z4),
+ svuzpq (z4))
+
+/*
+** uzpq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z4_z18, svuint8x4_t, z4,
+ svuzpq_u8_x4 (z18),
+ svuzpq (z18))
+
+/*
+** uzpq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uzp {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z18_z23, svuint8x4_t, z18,
+ svuzpq_u8_x4 (z23),
+ svuzpq (z23))
+
+/*
+** uzpq_z23_z28:
+** uzp [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (uzpq_z23_z28, svuint8x4_t, z23,
+ svuzpq_u8_x4 (z28),
+ svuzpq (z28))
+
+/*
+** uzpq_z28_z0:
+** uzp {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (uzpq_z28_z0, svuint8x4_t, z28,
+ svuzpq_u8_x4 (z0),
+ svuzpq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_bf16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_bf16_vg1x2.c
new file mode 100644
index 0000000..9edf823
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_bf16_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** vdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** bfvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_0_z0_z4_0, svbfloat16x2_t, svbfloat16_t,
+ svvdot_lane_za32_bf16_vg1x2 (0, z0, z4, 0),
+ svvdot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** vdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** bfvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0_z0_z7_1, svbfloat16x2_t, svbfloat16_t,
+ svvdot_lane_za32_bf16_vg1x2 (w0, z0, z7, 1),
+ svvdot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** vdot_lane_w8_z28_z4_2:
+** bfvdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z4_2, svbfloat16x2_t, svbfloat16_t,
+ svvdot_lane_za32_bf16_vg1x2 (w8, z28, z4, 2),
+ svvdot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** vdot_lane_w8p7_z0_z4_3:
+** bfvdot za\.s\[w8, 7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p7_z0_z4_3, svbfloat16x2_t, svbfloat16_t,
+ svvdot_lane_za32_bf16_vg1x2 (w8 + 7, z0, z4, 3),
+ svvdot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** vdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** bfvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p8_z0_z4_0, svbfloat16x2_t, svbfloat16_t,
+ svvdot_lane_za32_bf16_vg1x2 (w8 + 8, z0, z4, 0),
+ svvdot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** vdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** bfvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0m1_z0_z4_1, svbfloat16x2_t, svbfloat16_t,
+ svvdot_lane_za32_bf16_vg1x2 (w0 - 1, z0, z4, 1),
+ svvdot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** vdot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** bfvdot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (vdot_lane_w8_z4_z15_2, svbfloat16x2_t, svbfloat16_t,
+ svvdot_lane_za32_bf16_vg1x2 (w8, z4, z15, 2),
+ svvdot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** vdot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** bfvdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z16_3, svbfloat16x2_t, svbfloat16_t,
+ svvdot_lane_za32_bf16_vg1x2 (w8, z28, z16, 3),
+ svvdot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** vdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** bfvdot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z17_z7_0, svbfloat16x2_t, svbfloat16_t,
+ svvdot_lane_za32_bf16_vg1x2 (w8, z17, z7, 0),
+ svvdot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** vdot_lane_w8_z22_z4_1:
+** bfvdot za\.s\[w8, 0, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z22_z4_1, svbfloat16x2_t, svbfloat16_t,
+ svvdot_lane_za32_bf16_vg1x2 (w8, z22, z4, 1),
+ svvdot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_f16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_f16_vg1x2.c
new file mode 100644
index 0000000..75acf9f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_f16_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** vdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** fvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_0_z0_z4_0, svfloat16x2_t, svfloat16_t,
+ svvdot_lane_za32_f16_vg1x2 (0, z0, z4, 0),
+ svvdot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** vdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** fvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0_z0_z7_1, svfloat16x2_t, svfloat16_t,
+ svvdot_lane_za32_f16_vg1x2 (w0, z0, z7, 1),
+ svvdot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** vdot_lane_w8_z28_z4_2:
+** fvdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z4_2, svfloat16x2_t, svfloat16_t,
+ svvdot_lane_za32_f16_vg1x2 (w8, z28, z4, 2),
+ svvdot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** vdot_lane_w8p7_z0_z4_3:
+** fvdot za\.s\[w8, 7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p7_z0_z4_3, svfloat16x2_t, svfloat16_t,
+ svvdot_lane_za32_f16_vg1x2 (w8 + 7, z0, z4, 3),
+ svvdot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** vdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** fvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p8_z0_z4_0, svfloat16x2_t, svfloat16_t,
+ svvdot_lane_za32_f16_vg1x2 (w8 + 8, z0, z4, 0),
+ svvdot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** vdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** fvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0m1_z0_z4_1, svfloat16x2_t, svfloat16_t,
+ svvdot_lane_za32_f16_vg1x2 (w0 - 1, z0, z4, 1),
+ svvdot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** vdot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** fvdot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (vdot_lane_w8_z4_z15_2, svfloat16x2_t, svfloat16_t,
+ svvdot_lane_za32_f16_vg1x2 (w8, z4, z15, 2),
+ svvdot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** vdot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** fvdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z16_3, svfloat16x2_t, svfloat16_t,
+ svvdot_lane_za32_f16_vg1x2 (w8, z28, z16, 3),
+ svvdot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** vdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** fvdot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z17_z7_0, svfloat16x2_t, svfloat16_t,
+ svvdot_lane_za32_f16_vg1x2 (w8, z17, z7, 0),
+ svvdot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** vdot_lane_w8_z22_z4_1:
+** fvdot za\.s\[w8, 0, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z22_z4_1, svfloat16x2_t, svfloat16_t,
+ svvdot_lane_za32_f16_vg1x2 (w8, z22, z4, 1),
+ svvdot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_s16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_s16_vg1x2.c
new file mode 100644
index 0000000..cbb0c66
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_s16_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** vdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** svdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_0_z0_z4_0, svint16x2_t, svint16_t,
+ svvdot_lane_za32_s16_vg1x2 (0, z0, z4, 0),
+ svvdot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** vdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** svdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0_z0_z7_1, svint16x2_t, svint16_t,
+ svvdot_lane_za32_s16_vg1x2 (w0, z0, z7, 1),
+ svvdot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** vdot_lane_w8_z28_z4_2:
+** svdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z4_2, svint16x2_t, svint16_t,
+ svvdot_lane_za32_s16_vg1x2 (w8, z28, z4, 2),
+ svvdot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** vdot_lane_w8p7_z0_z4_3:
+** svdot za\.s\[w8, 7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p7_z0_z4_3, svint16x2_t, svint16_t,
+ svvdot_lane_za32_s16_vg1x2 (w8 + 7, z0, z4, 3),
+ svvdot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** vdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** svdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p8_z0_z4_0, svint16x2_t, svint16_t,
+ svvdot_lane_za32_s16_vg1x2 (w8 + 8, z0, z4, 0),
+ svvdot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** vdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** svdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0m1_z0_z4_1, svint16x2_t, svint16_t,
+ svvdot_lane_za32_s16_vg1x2 (w0 - 1, z0, z4, 1),
+ svvdot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** vdot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** svdot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (vdot_lane_w8_z4_z15_2, svint16x2_t, svint16_t,
+ svvdot_lane_za32_s16_vg1x2 (w8, z4, z15, 2),
+ svvdot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** vdot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** svdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z16_3, svint16x2_t, svint16_t,
+ svvdot_lane_za32_s16_vg1x2 (w8, z28, z16, 3),
+ svvdot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** vdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** svdot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z17_z7_0, svint16x2_t, svint16_t,
+ svvdot_lane_za32_s16_vg1x2 (w8, z17, z7, 0),
+ svvdot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** vdot_lane_w8_z22_z4_1:
+** svdot za\.s\[w8, 0, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z22_z4_1, svint16x2_t, svint16_t,
+ svvdot_lane_za32_s16_vg1x2 (w8, z22, z4, 1),
+ svvdot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_s8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_s8_vg1x4.c
new file mode 100644
index 0000000..c43a3d0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_s8_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** vdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** svdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_0_z0_z4_0, svint8x4_t, svint8_t,
+ svvdot_lane_za32_s8_vg1x4 (0, z0, z4, 0),
+ svvdot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** vdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** svdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0_z0_z7_1, svint8x4_t, svint8_t,
+ svvdot_lane_za32_s8_vg1x4 (w0, z0, z7, 1),
+ svvdot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** vdot_lane_w8_z28_z4_2:
+** svdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z4_2, svint8x4_t, svint8_t,
+ svvdot_lane_za32_s8_vg1x4 (w8, z28, z4, 2),
+ svvdot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** vdot_lane_w8p7_z0_z4_3:
+** svdot za\.s\[w8, 7, vgx4\], {z0\.b - z3\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p7_z0_z4_3, svint8x4_t, svint8_t,
+ svvdot_lane_za32_s8_vg1x4 (w8 + 7, z0, z4, 3),
+ svvdot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** vdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** svdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p8_z0_z4_0, svint8x4_t, svint8_t,
+ svvdot_lane_za32_s8_vg1x4 (w8 + 8, z0, z4, 0),
+ svvdot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** vdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** svdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0m1_z0_z4_1, svint8x4_t, svint8_t,
+ svvdot_lane_za32_s8_vg1x4 (w0 - 1, z0, z4, 1),
+ svvdot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** vdot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** svdot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (vdot_lane_w8_z4_z15_2, svint8x4_t, svint8_t,
+ svvdot_lane_za32_s8_vg1x4 (w8, z4, z15, 2),
+ svvdot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** vdot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** svdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z16_3, svint8x4_t, svint8_t,
+ svvdot_lane_za32_s8_vg1x4 (w8, z28, z16, 3),
+ svvdot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** vdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** svdot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z17_z7_0, svint8x4_t, svint8_t,
+ svvdot_lane_za32_s8_vg1x4 (w8, z17, z7, 0),
+ svvdot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** vdot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** svdot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z22_z4_1, svint8x4_t, svint8_t,
+ svvdot_lane_za32_s8_vg1x4 (w8, z22, z4, 1),
+ svvdot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_u16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_u16_vg1x2.c
new file mode 100644
index 0000000..1694a7d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_u16_vg1x2.c
@@ -0,0 +1,102 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** vdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** uvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_0_z0_z4_0, svuint16x2_t, svuint16_t,
+ svvdot_lane_za32_u16_vg1x2 (0, z0, z4, 0),
+ svvdot_lane_za32_vg1x2 (0, z0, z4, 0))
+
+/*
+** vdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** uvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0_z0_z7_1, svuint16x2_t, svuint16_t,
+ svvdot_lane_za32_u16_vg1x2 (w0, z0, z7, 1),
+ svvdot_lane_za32_vg1x2 (w0, z0, z7, 1))
+
+/*
+** vdot_lane_w8_z28_z4_2:
+** uvdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, z4\.h\[2\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z4_2, svuint16x2_t, svuint16_t,
+ svvdot_lane_za32_u16_vg1x2 (w8, z28, z4, 2),
+ svvdot_lane_za32_vg1x2 (w8, z28, z4, 2))
+
+/*
+** vdot_lane_w8p7_z0_z4_3:
+** uvdot za\.s\[w8, 7, vgx2\], {z0\.h - z1\.h}, z4\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p7_z0_z4_3, svuint16x2_t, svuint16_t,
+ svvdot_lane_za32_u16_vg1x2 (w8 + 7, z0, z4, 3),
+ svvdot_lane_za32_vg1x2 (w8 + 7, z0, z4, 3))
+
+/*
+** vdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** uvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p8_z0_z4_0, svuint16x2_t, svuint16_t,
+ svvdot_lane_za32_u16_vg1x2 (w8 + 8, z0, z4, 0),
+ svvdot_lane_za32_vg1x2 (w8 + 8, z0, z4, 0))
+
+/*
+** vdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** uvdot za\.s\[\1, 0, vgx2\], {z0\.h - z1\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0m1_z0_z4_1, svuint16x2_t, svuint16_t,
+ svvdot_lane_za32_u16_vg1x2 (w0 - 1, z0, z4, 1),
+ svvdot_lane_za32_vg1x2 (w0 - 1, z0, z4, 1))
+
+/*
+** vdot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** uvdot za\.s\[w8, 0, vgx2\], {z4\.h - z5\.h}, z15\.h\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (vdot_lane_w8_z4_z15_2, svuint16x2_t, svuint16_t,
+ svvdot_lane_za32_u16_vg1x2 (w8, z4, z15, 2),
+ svvdot_lane_za32_vg1x2 (w8, z4, z15, 2))
+
+/*
+** vdot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** uvdot za\.s\[w8, 0, vgx2\], {z28\.h - z29\.h}, \1\.h\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z16_3, svuint16x2_t, svuint16_t,
+ svvdot_lane_za32_u16_vg1x2 (w8, z28, z16, 3),
+ svvdot_lane_za32_vg1x2 (w8, z28, z16, 3))
+
+/*
+** vdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** uvdot za\.s\[w8, 0, vgx2\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z17_z7_0, svuint16x2_t, svuint16_t,
+ svvdot_lane_za32_u16_vg1x2 (w8, z17, z7, 0),
+ svvdot_lane_za32_vg1x2 (w8, z17, z7, 0))
+
+/*
+** vdot_lane_w8_z22_z4_1:
+** uvdot za\.s\[w8, 0, vgx2\], {z22\.h - z23\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z22_z4_1, svuint16x2_t, svuint16_t,
+ svvdot_lane_za32_u16_vg1x2 (w8, z22, z4, 1),
+ svvdot_lane_za32_vg1x2 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_u8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_u8_vg1x4.c
new file mode 100644
index 0000000..d37ef5c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za32_u8_vg1x4.c
@@ -0,0 +1,108 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** vdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** uvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_0_z0_z4_0, svuint8x4_t, svuint8_t,
+ svvdot_lane_za32_u8_vg1x4 (0, z0, z4, 0),
+ svvdot_lane_za32_vg1x4 (0, z0, z4, 0))
+
+/*
+** vdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** uvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z7\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0_z0_z7_1, svuint8x4_t, svuint8_t,
+ svvdot_lane_za32_u8_vg1x4 (w0, z0, z7, 1),
+ svvdot_lane_za32_vg1x4 (w0, z0, z7, 1))
+
+/*
+** vdot_lane_w8_z28_z4_2:
+** uvdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, z4\.b\[2\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z4_2, svuint8x4_t, svuint8_t,
+ svvdot_lane_za32_u8_vg1x4 (w8, z28, z4, 2),
+ svvdot_lane_za32_vg1x4 (w8, z28, z4, 2))
+
+/*
+** vdot_lane_w8p7_z0_z4_3:
+** uvdot za\.s\[w8, 7, vgx4\], {z0\.b - z3\.b}, z4\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p7_z0_z4_3, svuint8x4_t, svuint8_t,
+ svvdot_lane_za32_u8_vg1x4 (w8 + 7, z0, z4, 3),
+ svvdot_lane_za32_vg1x4 (w8 + 7, z0, z4, 3))
+
+/*
+** vdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** uvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p8_z0_z4_0, svuint8x4_t, svuint8_t,
+ svvdot_lane_za32_u8_vg1x4 (w8 + 8, z0, z4, 0),
+ svvdot_lane_za32_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** vdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** uvdot za\.s\[\1, 0, vgx4\], {z0\.b - z3\.b}, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0m1_z0_z4_1, svuint8x4_t, svuint8_t,
+ svvdot_lane_za32_u8_vg1x4 (w0 - 1, z0, z4, 1),
+ svvdot_lane_za32_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** vdot_lane_w8_z4_z15_2:
+** str d15, \[sp, #?-16\]!
+** uvdot za\.s\[w8, 0, vgx4\], {z4\.b - z7\.b}, z15\.b\[2\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (vdot_lane_w8_z4_z15_2, svuint8x4_t, svuint8_t,
+ svvdot_lane_za32_u8_vg1x4 (w8, z4, z15, 2),
+ svvdot_lane_za32_vg1x4 (w8, z4, z15, 2))
+
+/*
+** vdot_lane_w8_z28_z16_3:
+** mov (z[0-7]).d, z16.d
+** uvdot za\.s\[w8, 0, vgx4\], {z28\.b - z31\.b}, \1\.b\[3\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z16_3, svuint8x4_t, svuint8_t,
+ svvdot_lane_za32_u8_vg1x4 (w8, z28, z16, 3),
+ svvdot_lane_za32_vg1x4 (w8, z28, z16, 3))
+
+/*
+** vdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uvdot za\.s\[w8, 0, vgx4\], [^\n]+, z7\.b\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z17_z7_0, svuint8x4_t, svuint8_t,
+ svvdot_lane_za32_u8_vg1x4 (w8, z17, z7, 0),
+ svvdot_lane_za32_vg1x4 (w8, z17, z7, 0))
+
+/*
+** vdot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uvdot za\.s\[w8, 0, vgx4\], [^\n]+, z4\.b\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z22_z4_1, svuint8x4_t, svuint8_t,
+ svvdot_lane_za32_u8_vg1x4 (w8, z22, z4, 1),
+ svvdot_lane_za32_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za64_s16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za64_s16_vg1x4.c
new file mode 100644
index 0000000..972d56f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za64_s16_vg1x4.c
@@ -0,0 +1,110 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** vdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** svdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_0_z0_z4_0, svint16x4_t, svint16_t,
+ svvdot_lane_za64_s16_vg1x4 (0, z0, z4, 0),
+ svvdot_lane_za64_vg1x4 (0, z0, z4, 0))
+
+/*
+** vdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** svdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0_z0_z7_1, svint16x4_t, svint16_t,
+ svvdot_lane_za64_s16_vg1x4 (w0, z0, z7, 1),
+ svvdot_lane_za64_vg1x4 (w0, z0, z7, 1))
+
+/*
+** vdot_lane_w8_z28_z4_0:
+** svdot za\.d\[w8, 0, vgx4\], {z28\.h - z31\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z4_0, svint16x4_t, svint16_t,
+ svvdot_lane_za64_s16_vg1x4 (w8, z28, z4, 0),
+ svvdot_lane_za64_vg1x4 (w8, z28, z4, 0))
+
+/*
+** vdot_lane_w8p7_z0_z4_1:
+** svdot za\.d\[w8, 7, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p7_z0_z4_1, svint16x4_t, svint16_t,
+ svvdot_lane_za64_s16_vg1x4 (w8 + 7, z0, z4, 1),
+ svvdot_lane_za64_vg1x4 (w8 + 7, z0, z4, 1))
+
+/*
+** vdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** svdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p8_z0_z4_0, svint16x4_t, svint16_t,
+ svvdot_lane_za64_s16_vg1x4 (w8 + 8, z0, z4, 0),
+ svvdot_lane_za64_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** vdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** svdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0m1_z0_z4_1, svint16x4_t, svint16_t,
+ svvdot_lane_za64_s16_vg1x4 (w0 - 1, z0, z4, 1),
+ svvdot_lane_za64_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** vdot_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** svdot za\.d\[w8, 0, vgx4\], {z4\.h - z7\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (vdot_lane_w8_z4_z15_0, svint16x4_t, svint16_t,
+ svvdot_lane_za64_s16_vg1x4 (w8, z4, z15, 0),
+ svvdot_lane_za64_vg1x4 (w8, z4, z15, 0))
+
+/*
+** vdot_lane_w8_z28_z16_1:
+** mov (z[0-7]).d, z16.d
+** svdot za\.d\[w8, 0, vgx4\], {z28\.h - z31\.h}, \1\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z16_1, svint16x4_t, svint16_t,
+ svvdot_lane_za64_s16_vg1x4 (w8, z28, z16, 1),
+ svvdot_lane_za64_vg1x4 (w8, z28, z16, 1))
+
+/*
+** vdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** svdot za\.d\[w8, 0, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z17_z7_0, svint16x4_t, svint16_t,
+ svvdot_lane_za64_s16_vg1x4 (w8, z17, z7, 0),
+ svvdot_lane_za64_vg1x4 (w8, z17, z7, 0))
+
+/*
+** vdot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** svdot za\.d\[w8, 0, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z22_z4_1, svint16x4_t, svint16_t,
+ svvdot_lane_za64_s16_vg1x4 (w8, z22, z4, 1),
+ svvdot_lane_za64_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za64_u16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za64_u16_vg1x4.c
new file mode 100644
index 0000000..6015938
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/vdot_lane_za64_u16_vg1x4.c
@@ -0,0 +1,110 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sme-i16i64"
+
+#include "test_sme2_acle.h"
+
+/*
+** vdot_lane_0_z0_z4_0:
+** mov (w8|w9|w10|w11), #?0
+** uvdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_0_z0_z4_0, svuint16x4_t, svuint16_t,
+ svvdot_lane_za64_u16_vg1x4 (0, z0, z4, 0),
+ svvdot_lane_za64_vg1x4 (0, z0, z4, 0))
+
+/*
+** vdot_lane_w0_z0_z7_1:
+** mov (w8|w9|w10|w11), w0
+** uvdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z7\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0_z0_z7_1, svuint16x4_t, svuint16_t,
+ svvdot_lane_za64_u16_vg1x4 (w0, z0, z7, 1),
+ svvdot_lane_za64_vg1x4 (w0, z0, z7, 1))
+
+/*
+** vdot_lane_w8_z28_z4_0:
+** uvdot za\.d\[w8, 0, vgx4\], {z28\.h - z31\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z4_0, svuint16x4_t, svuint16_t,
+ svvdot_lane_za64_u16_vg1x4 (w8, z28, z4, 0),
+ svvdot_lane_za64_vg1x4 (w8, z28, z4, 0))
+
+/*
+** vdot_lane_w8p7_z0_z4_1:
+** uvdot za\.d\[w8, 7, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p7_z0_z4_1, svuint16x4_t, svuint16_t,
+ svvdot_lane_za64_u16_vg1x4 (w8 + 7, z0, z4, 1),
+ svvdot_lane_za64_vg1x4 (w8 + 7, z0, z4, 1))
+
+/*
+** vdot_lane_w8p8_z0_z4_0:
+** add (w8|w9|w10|w11), w8, #?8
+** uvdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8p8_z0_z4_0, svuint16x4_t, svuint16_t,
+ svvdot_lane_za64_u16_vg1x4 (w8 + 8, z0, z4, 0),
+ svvdot_lane_za64_vg1x4 (w8 + 8, z0, z4, 0))
+
+/*
+** vdot_lane_w0m1_z0_z4_1:
+** sub (w8|w9|w10|w11), w0, #?1
+** uvdot za\.d\[\1, 0, vgx4\], {z0\.h - z3\.h}, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w0m1_z0_z4_1, svuint16x4_t, svuint16_t,
+ svvdot_lane_za64_u16_vg1x4 (w0 - 1, z0, z4, 1),
+ svvdot_lane_za64_vg1x4 (w0 - 1, z0, z4, 1))
+
+/*
+** vdot_lane_w8_z4_z15_0:
+** str d15, \[sp, #?-16\]!
+** uvdot za\.d\[w8, 0, vgx4\], {z4\.h - z7\.h}, z15\.h\[0\]
+** ldr d15, \[sp\], #?16
+** ret
+*/
+TEST_ZA_LANE_Z15 (vdot_lane_w8_z4_z15_0, svuint16x4_t, svuint16_t,
+ svvdot_lane_za64_u16_vg1x4 (w8, z4, z15, 0),
+ svvdot_lane_za64_vg1x4 (w8, z4, z15, 0))
+
+/*
+** vdot_lane_w8_z28_z16_1:
+** mov (z[0-7]).d, z16.d
+** uvdot za\.d\[w8, 0, vgx4\], {z28\.h - z31\.h}, \1\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z28_z16_1, svuint16x4_t, svuint16_t,
+ svvdot_lane_za64_u16_vg1x4 (w8, z28, z16, 1),
+ svvdot_lane_za64_vg1x4 (w8, z28, z16, 1))
+
+/*
+** vdot_lane_w8_z17_z7_0:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uvdot za\.d\[w8, 0, vgx4\], [^\n]+, z7\.h\[0\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z17_z7_0, svuint16x4_t, svuint16_t,
+ svvdot_lane_za64_u16_vg1x4 (w8, z17, z7, 0),
+ svvdot_lane_za64_vg1x4 (w8, z17, z7, 0))
+
+/*
+** vdot_lane_w8_z22_z4_1:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** uvdot za\.d\[w8, 0, vgx4\], [^\n]+, z4\.h\[1\]
+** ret
+*/
+TEST_ZA_LANE (vdot_lane_w8_z22_z4_1, svuint16x4_t, svuint16_t,
+ svvdot_lane_za64_u16_vg1x4 (w8, z22, z4, 1),
+ svvdot_lane_za64_vg1x4 (w8, z22, z4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b16.c
new file mode 100644
index 0000000..1e18622
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b16.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilege_p1_rr_s64:
+** whilege {p[0-9]+\.h, p[0-9]+\.h}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p1_rr_s64, int64_t,
+ p1 = svwhilege_b16_s64_x2 (x0, x1),
+ p1 = svwhilege_b16_x2 (x0, x1))
+
+/*
+** whilege_p4_rr_s64:
+** whilege {p4\.h, p5\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_rr_s64, int64_t,
+ p4 = svwhilege_b16_s64_x2 (x0, x1),
+ p4 = svwhilege_b16_x2 (x0, x1))
+
+/*
+** whilege_p9_rr_s64:
+** whilege {p[0-9]+\.h, p[0-9]+\.h}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p9_rr_s64, int64_t,
+ p9 = svwhilege_b16_s64_x2 (x0, x1),
+ p9 = svwhilege_b16_x2 (x0, x1))
+
+/*
+** whilege_p14_rr_s64:
+** whilege {p14\.h, p15\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p14_rr_s64, int64_t,
+ p14 = svwhilege_b16_s64_x2 (x0, x1),
+ p14 = svwhilege_b16_x2 (x0, x1))
+
+/*
+** whilege_p4_0r_s64:
+** whilege {p4\.h, p5\.h}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_0r_s64, int64_t,
+ p4 = svwhilege_b16_x2 ((int64_t) 0, x1),
+ p4 = svwhilege_b16_s64_x2 (0, x1))
+
+/*
+** whilege_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilege {p4\.h, p5\.h}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_5r_s64, int64_t,
+ p4 = svwhilege_b16_x2 ((int64_t) 5, x1),
+ p4 = svwhilege_b16_s64_x2 (5, x1))
+
+/*
+** whilege_p4_r0_s64:
+** whilege {p4\.h, p5\.h}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_r0_s64, int64_t,
+ p4 = svwhilege_b16_x2 (x0, (int64_t) 0),
+ p4 = svwhilege_b16_s64_x2 (x0, 0))
+
+/*
+** whilege_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilege {p14\.h, p15\.h}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p14_r5_s64, int64_t,
+ p14 = svwhilege_b16_x2 (x0, (int64_t) 5),
+ p14 = svwhilege_b16_s64_x2 (x0, 5))
+
+/*
+** whilege_p4_rr_u64:
+** whilehs {p4\.h, p5\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_rr_u64, uint64_t,
+ p4 = svwhilege_b16_u64_x2 (x0, x1),
+ p4 = svwhilege_b16_x2 (x0, x1))
+
+/*
+** whilege_p4_0r_u64:
+** whilehs {p4\.h, p5\.h}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_0r_u64, uint64_t,
+ p4 = svwhilege_b16_x2 ((uint64_t) 0, x1),
+ p4 = svwhilege_b16_u64_x2 (0, x1))
+
+/*
+** whilege_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilehs {p4\.h, p5\.h}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_5r_u64, uint64_t,
+ p4 = svwhilege_b16_x2 ((uint64_t) 5, x1),
+ p4 = svwhilege_b16_u64_x2 (5, x1))
+
+/*
+** whilege_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilehs {p4\.h, p5\.h}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_r5_u64, uint64_t,
+ p4 = svwhilege_b16_x2 (x0, (uint64_t) 5),
+ p4 = svwhilege_b16_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b32.c
new file mode 100644
index 0000000..fc6cb42
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b32.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilege_p1_rr_s64:
+** whilege {p[0-9]+\.s, p[0-9]+\.s}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p1_rr_s64, int64_t,
+ p1 = svwhilege_b32_s64_x2 (x0, x1),
+ p1 = svwhilege_b32_x2 (x0, x1))
+
+/*
+** whilege_p4_rr_s64:
+** whilege {p4\.s, p5\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_rr_s64, int64_t,
+ p4 = svwhilege_b32_s64_x2 (x0, x1),
+ p4 = svwhilege_b32_x2 (x0, x1))
+
+/*
+** whilege_p9_rr_s64:
+** whilege {p[0-9]+\.s, p[0-9]+\.s}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p9_rr_s64, int64_t,
+ p9 = svwhilege_b32_s64_x2 (x0, x1),
+ p9 = svwhilege_b32_x2 (x0, x1))
+
+/*
+** whilege_p14_rr_s64:
+** whilege {p14\.s, p15\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p14_rr_s64, int64_t,
+ p14 = svwhilege_b32_s64_x2 (x0, x1),
+ p14 = svwhilege_b32_x2 (x0, x1))
+
+/*
+** whilege_p4_0r_s64:
+** whilege {p4\.s, p5\.s}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_0r_s64, int64_t,
+ p4 = svwhilege_b32_x2 ((int64_t) 0, x1),
+ p4 = svwhilege_b32_s64_x2 (0, x1))
+
+/*
+** whilege_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilege {p4\.s, p5\.s}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_5r_s64, int64_t,
+ p4 = svwhilege_b32_x2 ((int64_t) 5, x1),
+ p4 = svwhilege_b32_s64_x2 (5, x1))
+
+/*
+** whilege_p4_r0_s64:
+** whilege {p4\.s, p5\.s}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_r0_s64, int64_t,
+ p4 = svwhilege_b32_x2 (x0, (int64_t) 0),
+ p4 = svwhilege_b32_s64_x2 (x0, 0))
+
+/*
+** whilege_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilege {p14\.s, p15\.s}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p14_r5_s64, int64_t,
+ p14 = svwhilege_b32_x2 (x0, (int64_t) 5),
+ p14 = svwhilege_b32_s64_x2 (x0, 5))
+
+/*
+** whilege_p4_rr_u64:
+** whilehs {p4\.s, p5\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_rr_u64, uint64_t,
+ p4 = svwhilege_b32_u64_x2 (x0, x1),
+ p4 = svwhilege_b32_x2 (x0, x1))
+
+/*
+** whilege_p4_0r_u64:
+** whilehs {p4\.s, p5\.s}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_0r_u64, uint64_t,
+ p4 = svwhilege_b32_x2 ((uint64_t) 0, x1),
+ p4 = svwhilege_b32_u64_x2 (0, x1))
+
+/*
+** whilege_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilehs {p4\.s, p5\.s}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_5r_u64, uint64_t,
+ p4 = svwhilege_b32_x2 ((uint64_t) 5, x1),
+ p4 = svwhilege_b32_u64_x2 (5, x1))
+
+/*
+** whilege_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilehs {p4\.s, p5\.s}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_r5_u64, uint64_t,
+ p4 = svwhilege_b32_x2 (x0, (uint64_t) 5),
+ p4 = svwhilege_b32_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b64.c
new file mode 100644
index 0000000..ecb8631
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b64.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilege_p1_rr_s64:
+** whilege {p[0-9]+\.d, p[0-9]+\.d}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p1_rr_s64, int64_t,
+ p1 = svwhilege_b64_s64_x2 (x0, x1),
+ p1 = svwhilege_b64_x2 (x0, x1))
+
+/*
+** whilege_p4_rr_s64:
+** whilege {p4\.d, p5\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_rr_s64, int64_t,
+ p4 = svwhilege_b64_s64_x2 (x0, x1),
+ p4 = svwhilege_b64_x2 (x0, x1))
+
+/*
+** whilege_p9_rr_s64:
+** whilege {p[0-9]+\.d, p[0-9]+\.d}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p9_rr_s64, int64_t,
+ p9 = svwhilege_b64_s64_x2 (x0, x1),
+ p9 = svwhilege_b64_x2 (x0, x1))
+
+/*
+** whilege_p14_rr_s64:
+** whilege {p14\.d, p15\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p14_rr_s64, int64_t,
+ p14 = svwhilege_b64_s64_x2 (x0, x1),
+ p14 = svwhilege_b64_x2 (x0, x1))
+
+/*
+** whilege_p4_0r_s64:
+** whilege {p4\.d, p5\.d}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_0r_s64, int64_t,
+ p4 = svwhilege_b64_x2 ((int64_t) 0, x1),
+ p4 = svwhilege_b64_s64_x2 (0, x1))
+
+/*
+** whilege_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilege {p4\.d, p5\.d}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_5r_s64, int64_t,
+ p4 = svwhilege_b64_x2 ((int64_t) 5, x1),
+ p4 = svwhilege_b64_s64_x2 (5, x1))
+
+/*
+** whilege_p4_r0_s64:
+** whilege {p4\.d, p5\.d}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_r0_s64, int64_t,
+ p4 = svwhilege_b64_x2 (x0, (int64_t) 0),
+ p4 = svwhilege_b64_s64_x2 (x0, 0))
+
+/*
+** whilege_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilege {p14\.d, p15\.d}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p14_r5_s64, int64_t,
+ p14 = svwhilege_b64_x2 (x0, (int64_t) 5),
+ p14 = svwhilege_b64_s64_x2 (x0, 5))
+
+/*
+** whilege_p4_rr_u64:
+** whilehs {p4\.d, p5\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_rr_u64, uint64_t,
+ p4 = svwhilege_b64_u64_x2 (x0, x1),
+ p4 = svwhilege_b64_x2 (x0, x1))
+
+/*
+** whilege_p4_0r_u64:
+** whilehs {p4\.d, p5\.d}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_0r_u64, uint64_t,
+ p4 = svwhilege_b64_x2 ((uint64_t) 0, x1),
+ p4 = svwhilege_b64_u64_x2 (0, x1))
+
+/*
+** whilege_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilehs {p4\.d, p5\.d}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_5r_u64, uint64_t,
+ p4 = svwhilege_b64_x2 ((uint64_t) 5, x1),
+ p4 = svwhilege_b64_u64_x2 (5, x1))
+
+/*
+** whilege_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilehs {p4\.d, p5\.d}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_r5_u64, uint64_t,
+ p4 = svwhilege_b64_x2 (x0, (uint64_t) 5),
+ p4 = svwhilege_b64_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b8.c
new file mode 100644
index 0000000..96e7997
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_b8.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilege_p1_rr_s64:
+** whilege {p[0-9]+\.b, p[0-9]+\.b}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p1_rr_s64, int64_t,
+ p1 = svwhilege_b8_s64_x2 (x0, x1),
+ p1 = svwhilege_b8_x2 (x0, x1))
+
+/*
+** whilege_p4_rr_s64:
+** whilege {p4\.b, p5\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_rr_s64, int64_t,
+ p4 = svwhilege_b8_s64_x2 (x0, x1),
+ p4 = svwhilege_b8_x2 (x0, x1))
+
+/*
+** whilege_p9_rr_s64:
+** whilege {p[0-9]+\.b, p[0-9]+\.b}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p9_rr_s64, int64_t,
+ p9 = svwhilege_b8_s64_x2 (x0, x1),
+ p9 = svwhilege_b8_x2 (x0, x1))
+
+/*
+** whilege_p14_rr_s64:
+** whilege {p14\.b, p15\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p14_rr_s64, int64_t,
+ p14 = svwhilege_b8_s64_x2 (x0, x1),
+ p14 = svwhilege_b8_x2 (x0, x1))
+
+/*
+** whilege_p4_0r_s64:
+** whilege {p4\.b, p5\.b}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_0r_s64, int64_t,
+ p4 = svwhilege_b8_x2 ((int64_t) 0, x1),
+ p4 = svwhilege_b8_s64_x2 (0, x1))
+
+/*
+** whilege_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilege {p4\.b, p5\.b}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_5r_s64, int64_t,
+ p4 = svwhilege_b8_x2 ((int64_t) 5, x1),
+ p4 = svwhilege_b8_s64_x2 (5, x1))
+
+/*
+** whilege_p4_r0_s64:
+** whilege {p4\.b, p5\.b}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_r0_s64, int64_t,
+ p4 = svwhilege_b8_x2 (x0, (int64_t) 0),
+ p4 = svwhilege_b8_s64_x2 (x0, 0))
+
+/*
+** whilege_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilege {p14\.b, p15\.b}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p14_r5_s64, int64_t,
+ p14 = svwhilege_b8_x2 (x0, (int64_t) 5),
+ p14 = svwhilege_b8_s64_x2 (x0, 5))
+
+/*
+** whilege_p4_rr_u64:
+** whilehs {p4\.b, p5\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_rr_u64, uint64_t,
+ p4 = svwhilege_b8_u64_x2 (x0, x1),
+ p4 = svwhilege_b8_x2 (x0, x1))
+
+/*
+** whilege_p4_0r_u64:
+** whilehs {p4\.b, p5\.b}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_0r_u64, uint64_t,
+ p4 = svwhilege_b8_x2 ((uint64_t) 0, x1),
+ p4 = svwhilege_b8_u64_x2 (0, x1))
+
+/*
+** whilege_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilehs {p4\.b, p5\.b}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_5r_u64, uint64_t,
+ p4 = svwhilege_b8_x2 ((uint64_t) 5, x1),
+ p4 = svwhilege_b8_u64_x2 (5, x1))
+
+/*
+** whilege_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilehs {p4\.b, p5\.b}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilege_p4_r5_u64, uint64_t,
+ p4 = svwhilege_b8_x2 (x0, (uint64_t) 5),
+ p4 = svwhilege_b8_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c16.c
new file mode 100644
index 0000000..9bf9a3d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c16.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilege_pn0_rr_2_s64:
+** whilege pn[0-9]+\.h, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilege_c16_s64 (x0, x1, 2),
+ pn0 = svwhilege_c16 (x0, x1, 2))
+
+/*
+** whilege_pn7_rr_4_s64:
+** whilege pn[0-9]+\.h, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilege_c16_s64 (x0, x1, 4),
+ pn7 = svwhilege_c16 (x0, x1, 4))
+
+/*
+** whilege_pn8_rr_2_s64:
+** whilege pn8\.h, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilege_c16_s64 (x0, x1, 2),
+ pn8 = svwhilege_c16 (x0, x1, 2))
+
+/*
+** whilege_pn15_rr_4_s64:
+** whilege pn15\.h, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilege_c16_s64 (x0, x1, 4),
+ pn15 = svwhilege_c16 (x0, x1, 4))
+
+/*
+** whilege_pn8_0r_2_s64:
+** whilege pn8\.h, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilege_c16 ((int64_t) 0, x1, 2),
+ pn8 = svwhilege_c16_s64 (0, x1, 2))
+
+/*
+** whilege_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilege pn8\.h, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilege_c16 ((int64_t) 5, x1, 4),
+ pn8 = svwhilege_c16_s64 (5, x1, 4))
+
+/*
+** whilege_pn8_r0_2_s64:
+** whilege pn8\.h, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilege_c16 (x0, (int64_t) 0, 2),
+ pn8 = svwhilege_c16_s64 (x0, 0, 2))
+
+/*
+** whilege_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilege pn15\.h, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilege_c16 (x0, (int64_t) 5, 4),
+ pn15 = svwhilege_c16_s64 (x0, 5, 4))
+
+/*
+** whilege_pn8_rr_2_u64:
+** whilehs pn8\.h, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilege_c16_u64 (x0, x1, 2),
+ pn8 = svwhilege_c16 (x0, x1, 2))
+
+/*
+** whilege_pn8_0r_4_u64:
+** whilehs pn8\.h, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilege_c16 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilege_c16_u64 (0, x1, 4))
+
+/*
+** whilege_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilehs pn8\.h, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilege_c16 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilege_c16_u64 (5, x1, 2))
+
+/*
+** whilege_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilehs pn8\.h, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilege_c16 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilege_c16_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c32.c
new file mode 100644
index 0000000..8c098e0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c32.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilege_pn0_rr_2_s64:
+** whilege pn[0-9]+\.s, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilege_c32_s64 (x0, x1, 2),
+ pn0 = svwhilege_c32 (x0, x1, 2))
+
+/*
+** whilege_pn7_rr_4_s64:
+** whilege pn[0-9]+\.s, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilege_c32_s64 (x0, x1, 4),
+ pn7 = svwhilege_c32 (x0, x1, 4))
+
+/*
+** whilege_pn8_rr_2_s64:
+** whilege pn8\.s, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilege_c32_s64 (x0, x1, 2),
+ pn8 = svwhilege_c32 (x0, x1, 2))
+
+/*
+** whilege_pn15_rr_4_s64:
+** whilege pn15\.s, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilege_c32_s64 (x0, x1, 4),
+ pn15 = svwhilege_c32 (x0, x1, 4))
+
+/*
+** whilege_pn8_0r_2_s64:
+** whilege pn8\.s, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilege_c32 ((int64_t) 0, x1, 2),
+ pn8 = svwhilege_c32_s64 (0, x1, 2))
+
+/*
+** whilege_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilege pn8\.s, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilege_c32 ((int64_t) 5, x1, 4),
+ pn8 = svwhilege_c32_s64 (5, x1, 4))
+
+/*
+** whilege_pn8_r0_2_s64:
+** whilege pn8\.s, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilege_c32 (x0, (int64_t) 0, 2),
+ pn8 = svwhilege_c32_s64 (x0, 0, 2))
+
+/*
+** whilege_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilege pn15\.s, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilege_c32 (x0, (int64_t) 5, 4),
+ pn15 = svwhilege_c32_s64 (x0, 5, 4))
+
+/*
+** whilege_pn8_rr_2_u64:
+** whilehs pn8\.s, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilege_c32_u64 (x0, x1, 2),
+ pn8 = svwhilege_c32 (x0, x1, 2))
+
+/*
+** whilege_pn8_0r_4_u64:
+** whilehs pn8\.s, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilege_c32 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilege_c32_u64 (0, x1, 4))
+
+/*
+** whilege_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilehs pn8\.s, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilege_c32 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilege_c32_u64 (5, x1, 2))
+
+/*
+** whilege_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilehs pn8\.s, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilege_c32 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilege_c32_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c64.c
new file mode 100644
index 0000000..a1a98d6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c64.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilege_pn0_rr_2_s64:
+** whilege pn[0-9]+\.d, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilege_c64_s64 (x0, x1, 2),
+ pn0 = svwhilege_c64 (x0, x1, 2))
+
+/*
+** whilege_pn7_rr_4_s64:
+** whilege pn[0-9]+\.d, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilege_c64_s64 (x0, x1, 4),
+ pn7 = svwhilege_c64 (x0, x1, 4))
+
+/*
+** whilege_pn8_rr_2_s64:
+** whilege pn8\.d, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilege_c64_s64 (x0, x1, 2),
+ pn8 = svwhilege_c64 (x0, x1, 2))
+
+/*
+** whilege_pn15_rr_4_s64:
+** whilege pn15\.d, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilege_c64_s64 (x0, x1, 4),
+ pn15 = svwhilege_c64 (x0, x1, 4))
+
+/*
+** whilege_pn8_0r_2_s64:
+** whilege pn8\.d, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilege_c64 ((int64_t) 0, x1, 2),
+ pn8 = svwhilege_c64_s64 (0, x1, 2))
+
+/*
+** whilege_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilege pn8\.d, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilege_c64 ((int64_t) 5, x1, 4),
+ pn8 = svwhilege_c64_s64 (5, x1, 4))
+
+/*
+** whilege_pn8_r0_2_s64:
+** whilege pn8\.d, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilege_c64 (x0, (int64_t) 0, 2),
+ pn8 = svwhilege_c64_s64 (x0, 0, 2))
+
+/*
+** whilege_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilege pn15\.d, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilege_c64 (x0, (int64_t) 5, 4),
+ pn15 = svwhilege_c64_s64 (x0, 5, 4))
+
+/*
+** whilege_pn8_rr_2_u64:
+** whilehs pn8\.d, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilege_c64_u64 (x0, x1, 2),
+ pn8 = svwhilege_c64 (x0, x1, 2))
+
+/*
+** whilege_pn8_0r_4_u64:
+** whilehs pn8\.d, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilege_c64 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilege_c64_u64 (0, x1, 4))
+
+/*
+** whilege_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilehs pn8\.d, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilege_c64 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilege_c64_u64 (5, x1, 2))
+
+/*
+** whilege_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilehs pn8\.d, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilege_c64 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilege_c64_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c8.c
new file mode 100644
index 0000000..1d52f8b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilege_c8.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilege_pn0_rr_2_s64:
+** whilege pn[0-9]+\.b, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilege_c8_s64 (x0, x1, 2),
+ pn0 = svwhilege_c8 (x0, x1, 2))
+
+/*
+** whilege_pn7_rr_4_s64:
+** whilege pn[0-9]+\.b, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilege_c8_s64 (x0, x1, 4),
+ pn7 = svwhilege_c8 (x0, x1, 4))
+
+/*
+** whilege_pn8_rr_2_s64:
+** whilege pn8\.b, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilege_c8_s64 (x0, x1, 2),
+ pn8 = svwhilege_c8 (x0, x1, 2))
+
+/*
+** whilege_pn15_rr_4_s64:
+** whilege pn15\.b, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilege_c8_s64 (x0, x1, 4),
+ pn15 = svwhilege_c8 (x0, x1, 4))
+
+/*
+** whilege_pn8_0r_2_s64:
+** whilege pn8\.b, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilege_c8 ((int64_t) 0, x1, 2),
+ pn8 = svwhilege_c8_s64 (0, x1, 2))
+
+/*
+** whilege_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilege pn8\.b, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilege_c8 ((int64_t) 5, x1, 4),
+ pn8 = svwhilege_c8_s64 (5, x1, 4))
+
+/*
+** whilege_pn8_r0_2_s64:
+** whilege pn8\.b, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilege_c8 (x0, (int64_t) 0, 2),
+ pn8 = svwhilege_c8_s64 (x0, 0, 2))
+
+/*
+** whilege_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilege pn15\.b, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilege_c8 (x0, (int64_t) 5, 4),
+ pn15 = svwhilege_c8_s64 (x0, 5, 4))
+
+/*
+** whilege_pn8_rr_2_u64:
+** whilehs pn8\.b, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilege_c8_u64 (x0, x1, 2),
+ pn8 = svwhilege_c8 (x0, x1, 2))
+
+/*
+** whilege_pn8_0r_4_u64:
+** whilehs pn8\.b, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilege_c8 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilege_c8_u64 (0, x1, 4))
+
+/*
+** whilege_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilehs pn8\.b, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilege_c8 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilege_c8_u64 (5, x1, 2))
+
+/*
+** whilege_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilehs pn8\.b, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilege_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilege_c8 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilege_c8_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b16.c
new file mode 100644
index 0000000..eee8417
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b16.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilegt_p1_rr_s64:
+** whilegt {p[0-9]+\.h, p[0-9]+\.h}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p1_rr_s64, int64_t,
+ p1 = svwhilegt_b16_s64_x2 (x0, x1),
+ p1 = svwhilegt_b16_x2 (x0, x1))
+
+/*
+** whilegt_p4_rr_s64:
+** whilegt {p4\.h, p5\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_rr_s64, int64_t,
+ p4 = svwhilegt_b16_s64_x2 (x0, x1),
+ p4 = svwhilegt_b16_x2 (x0, x1))
+
+/*
+** whilegt_p9_rr_s64:
+** whilegt {p[0-9]+\.h, p[0-9]+\.h}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p9_rr_s64, int64_t,
+ p9 = svwhilegt_b16_s64_x2 (x0, x1),
+ p9 = svwhilegt_b16_x2 (x0, x1))
+
+/*
+** whilegt_p14_rr_s64:
+** whilegt {p14\.h, p15\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p14_rr_s64, int64_t,
+ p14 = svwhilegt_b16_s64_x2 (x0, x1),
+ p14 = svwhilegt_b16_x2 (x0, x1))
+
+/*
+** whilegt_p4_0r_s64:
+** whilegt {p4\.h, p5\.h}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_0r_s64, int64_t,
+ p4 = svwhilegt_b16_x2 ((int64_t) 0, x1),
+ p4 = svwhilegt_b16_s64_x2 (0, x1))
+
+/*
+** whilegt_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilegt {p4\.h, p5\.h}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_5r_s64, int64_t,
+ p4 = svwhilegt_b16_x2 ((int64_t) 5, x1),
+ p4 = svwhilegt_b16_s64_x2 (5, x1))
+
+/*
+** whilegt_p4_r0_s64:
+** whilegt {p4\.h, p5\.h}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_r0_s64, int64_t,
+ p4 = svwhilegt_b16_x2 (x0, (int64_t) 0),
+ p4 = svwhilegt_b16_s64_x2 (x0, 0))
+
+/*
+** whilegt_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilegt {p14\.h, p15\.h}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p14_r5_s64, int64_t,
+ p14 = svwhilegt_b16_x2 (x0, (int64_t) 5),
+ p14 = svwhilegt_b16_s64_x2 (x0, 5))
+
+/*
+** whilegt_p4_rr_u64:
+** whilehi {p4\.h, p5\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_rr_u64, uint64_t,
+ p4 = svwhilegt_b16_u64_x2 (x0, x1),
+ p4 = svwhilegt_b16_x2 (x0, x1))
+
+/*
+** whilegt_p4_0r_u64:
+** whilehi {p4\.h, p5\.h}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_0r_u64, uint64_t,
+ p4 = svwhilegt_b16_x2 ((uint64_t) 0, x1),
+ p4 = svwhilegt_b16_u64_x2 (0, x1))
+
+/*
+** whilegt_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilehi {p4\.h, p5\.h}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_5r_u64, uint64_t,
+ p4 = svwhilegt_b16_x2 ((uint64_t) 5, x1),
+ p4 = svwhilegt_b16_u64_x2 (5, x1))
+
+/*
+** whilegt_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilehi {p4\.h, p5\.h}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_r5_u64, uint64_t,
+ p4 = svwhilegt_b16_x2 (x0, (uint64_t) 5),
+ p4 = svwhilegt_b16_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b32.c
new file mode 100644
index 0000000..d82ad0e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b32.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilegt_p1_rr_s64:
+** whilegt {p[0-9]+\.s, p[0-9]+\.s}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p1_rr_s64, int64_t,
+ p1 = svwhilegt_b32_s64_x2 (x0, x1),
+ p1 = svwhilegt_b32_x2 (x0, x1))
+
+/*
+** whilegt_p4_rr_s64:
+** whilegt {p4\.s, p5\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_rr_s64, int64_t,
+ p4 = svwhilegt_b32_s64_x2 (x0, x1),
+ p4 = svwhilegt_b32_x2 (x0, x1))
+
+/*
+** whilegt_p9_rr_s64:
+** whilegt {p[0-9]+\.s, p[0-9]+\.s}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p9_rr_s64, int64_t,
+ p9 = svwhilegt_b32_s64_x2 (x0, x1),
+ p9 = svwhilegt_b32_x2 (x0, x1))
+
+/*
+** whilegt_p14_rr_s64:
+** whilegt {p14\.s, p15\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p14_rr_s64, int64_t,
+ p14 = svwhilegt_b32_s64_x2 (x0, x1),
+ p14 = svwhilegt_b32_x2 (x0, x1))
+
+/*
+** whilegt_p4_0r_s64:
+** whilegt {p4\.s, p5\.s}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_0r_s64, int64_t,
+ p4 = svwhilegt_b32_x2 ((int64_t) 0, x1),
+ p4 = svwhilegt_b32_s64_x2 (0, x1))
+
+/*
+** whilegt_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilegt {p4\.s, p5\.s}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_5r_s64, int64_t,
+ p4 = svwhilegt_b32_x2 ((int64_t) 5, x1),
+ p4 = svwhilegt_b32_s64_x2 (5, x1))
+
+/*
+** whilegt_p4_r0_s64:
+** whilegt {p4\.s, p5\.s}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_r0_s64, int64_t,
+ p4 = svwhilegt_b32_x2 (x0, (int64_t) 0),
+ p4 = svwhilegt_b32_s64_x2 (x0, 0))
+
+/*
+** whilegt_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilegt {p14\.s, p15\.s}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p14_r5_s64, int64_t,
+ p14 = svwhilegt_b32_x2 (x0, (int64_t) 5),
+ p14 = svwhilegt_b32_s64_x2 (x0, 5))
+
+/*
+** whilegt_p4_rr_u64:
+** whilehi {p4\.s, p5\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_rr_u64, uint64_t,
+ p4 = svwhilegt_b32_u64_x2 (x0, x1),
+ p4 = svwhilegt_b32_x2 (x0, x1))
+
+/*
+** whilegt_p4_0r_u64:
+** whilehi {p4\.s, p5\.s}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_0r_u64, uint64_t,
+ p4 = svwhilegt_b32_x2 ((uint64_t) 0, x1),
+ p4 = svwhilegt_b32_u64_x2 (0, x1))
+
+/*
+** whilegt_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilehi {p4\.s, p5\.s}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_5r_u64, uint64_t,
+ p4 = svwhilegt_b32_x2 ((uint64_t) 5, x1),
+ p4 = svwhilegt_b32_u64_x2 (5, x1))
+
+/*
+** whilegt_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilehi {p4\.s, p5\.s}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_r5_u64, uint64_t,
+ p4 = svwhilegt_b32_x2 (x0, (uint64_t) 5),
+ p4 = svwhilegt_b32_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b64.c
new file mode 100644
index 0000000..b25fa79
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b64.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilegt_p1_rr_s64:
+** whilegt {p[0-9]+\.d, p[0-9]+\.d}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p1_rr_s64, int64_t,
+ p1 = svwhilegt_b64_s64_x2 (x0, x1),
+ p1 = svwhilegt_b64_x2 (x0, x1))
+
+/*
+** whilegt_p4_rr_s64:
+** whilegt {p4\.d, p5\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_rr_s64, int64_t,
+ p4 = svwhilegt_b64_s64_x2 (x0, x1),
+ p4 = svwhilegt_b64_x2 (x0, x1))
+
+/*
+** whilegt_p9_rr_s64:
+** whilegt {p[0-9]+\.d, p[0-9]+\.d}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p9_rr_s64, int64_t,
+ p9 = svwhilegt_b64_s64_x2 (x0, x1),
+ p9 = svwhilegt_b64_x2 (x0, x1))
+
+/*
+** whilegt_p14_rr_s64:
+** whilegt {p14\.d, p15\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p14_rr_s64, int64_t,
+ p14 = svwhilegt_b64_s64_x2 (x0, x1),
+ p14 = svwhilegt_b64_x2 (x0, x1))
+
+/*
+** whilegt_p4_0r_s64:
+** whilegt {p4\.d, p5\.d}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_0r_s64, int64_t,
+ p4 = svwhilegt_b64_x2 ((int64_t) 0, x1),
+ p4 = svwhilegt_b64_s64_x2 (0, x1))
+
+/*
+** whilegt_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilegt {p4\.d, p5\.d}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_5r_s64, int64_t,
+ p4 = svwhilegt_b64_x2 ((int64_t) 5, x1),
+ p4 = svwhilegt_b64_s64_x2 (5, x1))
+
+/*
+** whilegt_p4_r0_s64:
+** whilegt {p4\.d, p5\.d}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_r0_s64, int64_t,
+ p4 = svwhilegt_b64_x2 (x0, (int64_t) 0),
+ p4 = svwhilegt_b64_s64_x2 (x0, 0))
+
+/*
+** whilegt_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilegt {p14\.d, p15\.d}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p14_r5_s64, int64_t,
+ p14 = svwhilegt_b64_x2 (x0, (int64_t) 5),
+ p14 = svwhilegt_b64_s64_x2 (x0, 5))
+
+/*
+** whilegt_p4_rr_u64:
+** whilehi {p4\.d, p5\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_rr_u64, uint64_t,
+ p4 = svwhilegt_b64_u64_x2 (x0, x1),
+ p4 = svwhilegt_b64_x2 (x0, x1))
+
+/*
+** whilegt_p4_0r_u64:
+** whilehi {p4\.d, p5\.d}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_0r_u64, uint64_t,
+ p4 = svwhilegt_b64_x2 ((uint64_t) 0, x1),
+ p4 = svwhilegt_b64_u64_x2 (0, x1))
+
+/*
+** whilegt_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilehi {p4\.d, p5\.d}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_5r_u64, uint64_t,
+ p4 = svwhilegt_b64_x2 ((uint64_t) 5, x1),
+ p4 = svwhilegt_b64_u64_x2 (5, x1))
+
+/*
+** whilegt_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilehi {p4\.d, p5\.d}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_r5_u64, uint64_t,
+ p4 = svwhilegt_b64_x2 (x0, (uint64_t) 5),
+ p4 = svwhilegt_b64_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b8.c
new file mode 100644
index 0000000..cfc4246
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_b8.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilegt_p1_rr_s64:
+** whilegt {p[0-9]+\.b, p[0-9]+\.b}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p1_rr_s64, int64_t,
+ p1 = svwhilegt_b8_s64_x2 (x0, x1),
+ p1 = svwhilegt_b8_x2 (x0, x1))
+
+/*
+** whilegt_p4_rr_s64:
+** whilegt {p4\.b, p5\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_rr_s64, int64_t,
+ p4 = svwhilegt_b8_s64_x2 (x0, x1),
+ p4 = svwhilegt_b8_x2 (x0, x1))
+
+/*
+** whilegt_p9_rr_s64:
+** whilegt {p[0-9]+\.b, p[0-9]+\.b}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p9_rr_s64, int64_t,
+ p9 = svwhilegt_b8_s64_x2 (x0, x1),
+ p9 = svwhilegt_b8_x2 (x0, x1))
+
+/*
+** whilegt_p14_rr_s64:
+** whilegt {p14\.b, p15\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p14_rr_s64, int64_t,
+ p14 = svwhilegt_b8_s64_x2 (x0, x1),
+ p14 = svwhilegt_b8_x2 (x0, x1))
+
+/*
+** whilegt_p4_0r_s64:
+** whilegt {p4\.b, p5\.b}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_0r_s64, int64_t,
+ p4 = svwhilegt_b8_x2 ((int64_t) 0, x1),
+ p4 = svwhilegt_b8_s64_x2 (0, x1))
+
+/*
+** whilegt_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilegt {p4\.b, p5\.b}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_5r_s64, int64_t,
+ p4 = svwhilegt_b8_x2 ((int64_t) 5, x1),
+ p4 = svwhilegt_b8_s64_x2 (5, x1))
+
+/*
+** whilegt_p4_r0_s64:
+** whilegt {p4\.b, p5\.b}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_r0_s64, int64_t,
+ p4 = svwhilegt_b8_x2 (x0, (int64_t) 0),
+ p4 = svwhilegt_b8_s64_x2 (x0, 0))
+
+/*
+** whilegt_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilegt {p14\.b, p15\.b}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p14_r5_s64, int64_t,
+ p14 = svwhilegt_b8_x2 (x0, (int64_t) 5),
+ p14 = svwhilegt_b8_s64_x2 (x0, 5))
+
+/*
+** whilegt_p4_rr_u64:
+** whilehi {p4\.b, p5\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_rr_u64, uint64_t,
+ p4 = svwhilegt_b8_u64_x2 (x0, x1),
+ p4 = svwhilegt_b8_x2 (x0, x1))
+
+/*
+** whilegt_p4_0r_u64:
+** whilehi {p4\.b, p5\.b}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_0r_u64, uint64_t,
+ p4 = svwhilegt_b8_x2 ((uint64_t) 0, x1),
+ p4 = svwhilegt_b8_u64_x2 (0, x1))
+
+/*
+** whilegt_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilehi {p4\.b, p5\.b}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_5r_u64, uint64_t,
+ p4 = svwhilegt_b8_x2 ((uint64_t) 5, x1),
+ p4 = svwhilegt_b8_u64_x2 (5, x1))
+
+/*
+** whilegt_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilehi {p4\.b, p5\.b}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilegt_p4_r5_u64, uint64_t,
+ p4 = svwhilegt_b8_x2 (x0, (uint64_t) 5),
+ p4 = svwhilegt_b8_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c16.c
new file mode 100644
index 0000000..838aa19
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c16.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilegt_pn0_rr_2_s64:
+** whilegt pn[0-9]+\.h, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilegt_c16_s64 (x0, x1, 2),
+ pn0 = svwhilegt_c16 (x0, x1, 2))
+
+/*
+** whilegt_pn7_rr_4_s64:
+** whilegt pn[0-9]+\.h, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilegt_c16_s64 (x0, x1, 4),
+ pn7 = svwhilegt_c16 (x0, x1, 4))
+
+/*
+** whilegt_pn8_rr_2_s64:
+** whilegt pn8\.h, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilegt_c16_s64 (x0, x1, 2),
+ pn8 = svwhilegt_c16 (x0, x1, 2))
+
+/*
+** whilegt_pn15_rr_4_s64:
+** whilegt pn15\.h, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilegt_c16_s64 (x0, x1, 4),
+ pn15 = svwhilegt_c16 (x0, x1, 4))
+
+/*
+** whilegt_pn8_0r_2_s64:
+** whilegt pn8\.h, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilegt_c16 ((int64_t) 0, x1, 2),
+ pn8 = svwhilegt_c16_s64 (0, x1, 2))
+
+/*
+** whilegt_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilegt pn8\.h, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilegt_c16 ((int64_t) 5, x1, 4),
+ pn8 = svwhilegt_c16_s64 (5, x1, 4))
+
+/*
+** whilegt_pn8_r0_2_s64:
+** whilegt pn8\.h, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilegt_c16 (x0, (int64_t) 0, 2),
+ pn8 = svwhilegt_c16_s64 (x0, 0, 2))
+
+/*
+** whilegt_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilegt pn15\.h, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilegt_c16 (x0, (int64_t) 5, 4),
+ pn15 = svwhilegt_c16_s64 (x0, 5, 4))
+
+/*
+** whilegt_pn8_rr_2_u64:
+** whilehi pn8\.h, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilegt_c16_u64 (x0, x1, 2),
+ pn8 = svwhilegt_c16 (x0, x1, 2))
+
+/*
+** whilegt_pn8_0r_4_u64:
+** whilehi pn8\.h, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilegt_c16 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilegt_c16_u64 (0, x1, 4))
+
+/*
+** whilegt_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilehi pn8\.h, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilegt_c16 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilegt_c16_u64 (5, x1, 2))
+
+/*
+** whilegt_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilehi pn8\.h, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilegt_c16 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilegt_c16_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c32.c
new file mode 100644
index 0000000..008e5ea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c32.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilegt_pn0_rr_2_s64:
+** whilegt pn[0-9]+\.s, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilegt_c32_s64 (x0, x1, 2),
+ pn0 = svwhilegt_c32 (x0, x1, 2))
+
+/*
+** whilegt_pn7_rr_4_s64:
+** whilegt pn[0-9]+\.s, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilegt_c32_s64 (x0, x1, 4),
+ pn7 = svwhilegt_c32 (x0, x1, 4))
+
+/*
+** whilegt_pn8_rr_2_s64:
+** whilegt pn8\.s, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilegt_c32_s64 (x0, x1, 2),
+ pn8 = svwhilegt_c32 (x0, x1, 2))
+
+/*
+** whilegt_pn15_rr_4_s64:
+** whilegt pn15\.s, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilegt_c32_s64 (x0, x1, 4),
+ pn15 = svwhilegt_c32 (x0, x1, 4))
+
+/*
+** whilegt_pn8_0r_2_s64:
+** whilegt pn8\.s, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilegt_c32 ((int64_t) 0, x1, 2),
+ pn8 = svwhilegt_c32_s64 (0, x1, 2))
+
+/*
+** whilegt_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilegt pn8\.s, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilegt_c32 ((int64_t) 5, x1, 4),
+ pn8 = svwhilegt_c32_s64 (5, x1, 4))
+
+/*
+** whilegt_pn8_r0_2_s64:
+** whilegt pn8\.s, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilegt_c32 (x0, (int64_t) 0, 2),
+ pn8 = svwhilegt_c32_s64 (x0, 0, 2))
+
+/*
+** whilegt_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilegt pn15\.s, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilegt_c32 (x0, (int64_t) 5, 4),
+ pn15 = svwhilegt_c32_s64 (x0, 5, 4))
+
+/*
+** whilegt_pn8_rr_2_u64:
+** whilehi pn8\.s, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilegt_c32_u64 (x0, x1, 2),
+ pn8 = svwhilegt_c32 (x0, x1, 2))
+
+/*
+** whilegt_pn8_0r_4_u64:
+** whilehi pn8\.s, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilegt_c32 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilegt_c32_u64 (0, x1, 4))
+
+/*
+** whilegt_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilehi pn8\.s, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilegt_c32 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilegt_c32_u64 (5, x1, 2))
+
+/*
+** whilegt_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilehi pn8\.s, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilegt_c32 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilegt_c32_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c64.c
new file mode 100644
index 0000000..a89d6ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c64.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilegt_pn0_rr_2_s64:
+** whilegt pn[0-9]+\.d, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilegt_c64_s64 (x0, x1, 2),
+ pn0 = svwhilegt_c64 (x0, x1, 2))
+
+/*
+** whilegt_pn7_rr_4_s64:
+** whilegt pn[0-9]+\.d, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilegt_c64_s64 (x0, x1, 4),
+ pn7 = svwhilegt_c64 (x0, x1, 4))
+
+/*
+** whilegt_pn8_rr_2_s64:
+** whilegt pn8\.d, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilegt_c64_s64 (x0, x1, 2),
+ pn8 = svwhilegt_c64 (x0, x1, 2))
+
+/*
+** whilegt_pn15_rr_4_s64:
+** whilegt pn15\.d, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilegt_c64_s64 (x0, x1, 4),
+ pn15 = svwhilegt_c64 (x0, x1, 4))
+
+/*
+** whilegt_pn8_0r_2_s64:
+** whilegt pn8\.d, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilegt_c64 ((int64_t) 0, x1, 2),
+ pn8 = svwhilegt_c64_s64 (0, x1, 2))
+
+/*
+** whilegt_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilegt pn8\.d, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilegt_c64 ((int64_t) 5, x1, 4),
+ pn8 = svwhilegt_c64_s64 (5, x1, 4))
+
+/*
+** whilegt_pn8_r0_2_s64:
+** whilegt pn8\.d, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilegt_c64 (x0, (int64_t) 0, 2),
+ pn8 = svwhilegt_c64_s64 (x0, 0, 2))
+
+/*
+** whilegt_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilegt pn15\.d, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilegt_c64 (x0, (int64_t) 5, 4),
+ pn15 = svwhilegt_c64_s64 (x0, 5, 4))
+
+/*
+** whilegt_pn8_rr_2_u64:
+** whilehi pn8\.d, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilegt_c64_u64 (x0, x1, 2),
+ pn8 = svwhilegt_c64 (x0, x1, 2))
+
+/*
+** whilegt_pn8_0r_4_u64:
+** whilehi pn8\.d, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilegt_c64 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilegt_c64_u64 (0, x1, 4))
+
+/*
+** whilegt_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilehi pn8\.d, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilegt_c64 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilegt_c64_u64 (5, x1, 2))
+
+/*
+** whilegt_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilehi pn8\.d, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilegt_c64 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilegt_c64_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c8.c
new file mode 100644
index 0000000..f644757
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilegt_c8.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilegt_pn0_rr_2_s64:
+** whilegt pn[0-9]+\.b, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilegt_c8_s64 (x0, x1, 2),
+ pn0 = svwhilegt_c8 (x0, x1, 2))
+
+/*
+** whilegt_pn7_rr_4_s64:
+** whilegt pn[0-9]+\.b, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilegt_c8_s64 (x0, x1, 4),
+ pn7 = svwhilegt_c8 (x0, x1, 4))
+
+/*
+** whilegt_pn8_rr_2_s64:
+** whilegt pn8\.b, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilegt_c8_s64 (x0, x1, 2),
+ pn8 = svwhilegt_c8 (x0, x1, 2))
+
+/*
+** whilegt_pn15_rr_4_s64:
+** whilegt pn15\.b, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilegt_c8_s64 (x0, x1, 4),
+ pn15 = svwhilegt_c8 (x0, x1, 4))
+
+/*
+** whilegt_pn8_0r_2_s64:
+** whilegt pn8\.b, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilegt_c8 ((int64_t) 0, x1, 2),
+ pn8 = svwhilegt_c8_s64 (0, x1, 2))
+
+/*
+** whilegt_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilegt pn8\.b, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilegt_c8 ((int64_t) 5, x1, 4),
+ pn8 = svwhilegt_c8_s64 (5, x1, 4))
+
+/*
+** whilegt_pn8_r0_2_s64:
+** whilegt pn8\.b, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilegt_c8 (x0, (int64_t) 0, 2),
+ pn8 = svwhilegt_c8_s64 (x0, 0, 2))
+
+/*
+** whilegt_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilegt pn15\.b, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilegt_c8 (x0, (int64_t) 5, 4),
+ pn15 = svwhilegt_c8_s64 (x0, 5, 4))
+
+/*
+** whilegt_pn8_rr_2_u64:
+** whilehi pn8\.b, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilegt_c8_u64 (x0, x1, 2),
+ pn8 = svwhilegt_c8 (x0, x1, 2))
+
+/*
+** whilegt_pn8_0r_4_u64:
+** whilehi pn8\.b, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilegt_c8 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilegt_c8_u64 (0, x1, 4))
+
+/*
+** whilegt_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilehi pn8\.b, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilegt_c8 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilegt_c8_u64 (5, x1, 2))
+
+/*
+** whilegt_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilehi pn8\.b, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilegt_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilegt_c8 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilegt_c8_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b16.c
new file mode 100644
index 0000000..c74a21e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b16.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilele_p1_rr_s64:
+** whilele {p[0-9]+\.h, p[0-9]+\.h}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p1_rr_s64, int64_t,
+ p1 = svwhilele_b16_s64_x2 (x0, x1),
+ p1 = svwhilele_b16_x2 (x0, x1))
+
+/*
+** whilele_p4_rr_s64:
+** whilele {p4\.h, p5\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_rr_s64, int64_t,
+ p4 = svwhilele_b16_s64_x2 (x0, x1),
+ p4 = svwhilele_b16_x2 (x0, x1))
+
+/*
+** whilele_p9_rr_s64:
+** whilele {p[0-9]+\.h, p[0-9]+\.h}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p9_rr_s64, int64_t,
+ p9 = svwhilele_b16_s64_x2 (x0, x1),
+ p9 = svwhilele_b16_x2 (x0, x1))
+
+/*
+** whilele_p14_rr_s64:
+** whilele {p14\.h, p15\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p14_rr_s64, int64_t,
+ p14 = svwhilele_b16_s64_x2 (x0, x1),
+ p14 = svwhilele_b16_x2 (x0, x1))
+
+/*
+** whilele_p4_0r_s64:
+** whilele {p4\.h, p5\.h}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_0r_s64, int64_t,
+ p4 = svwhilele_b16_x2 ((int64_t) 0, x1),
+ p4 = svwhilele_b16_s64_x2 (0, x1))
+
+/*
+** whilele_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilele {p4\.h, p5\.h}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_5r_s64, int64_t,
+ p4 = svwhilele_b16_x2 ((int64_t) 5, x1),
+ p4 = svwhilele_b16_s64_x2 (5, x1))
+
+/*
+** whilele_p4_r0_s64:
+** whilele {p4\.h, p5\.h}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_r0_s64, int64_t,
+ p4 = svwhilele_b16_x2 (x0, (int64_t) 0),
+ p4 = svwhilele_b16_s64_x2 (x0, 0))
+
+/*
+** whilele_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilele {p14\.h, p15\.h}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p14_r5_s64, int64_t,
+ p14 = svwhilele_b16_x2 (x0, (int64_t) 5),
+ p14 = svwhilele_b16_s64_x2 (x0, 5))
+
+/*
+** whilele_p4_rr_u64:
+** whilels {p4\.h, p5\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_rr_u64, uint64_t,
+ p4 = svwhilele_b16_u64_x2 (x0, x1),
+ p4 = svwhilele_b16_x2 (x0, x1))
+
+/*
+** whilele_p4_0r_u64:
+** whilels {p4\.h, p5\.h}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_0r_u64, uint64_t,
+ p4 = svwhilele_b16_x2 ((uint64_t) 0, x1),
+ p4 = svwhilele_b16_u64_x2 (0, x1))
+
+/*
+** whilele_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilels {p4\.h, p5\.h}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_5r_u64, uint64_t,
+ p4 = svwhilele_b16_x2 ((uint64_t) 5, x1),
+ p4 = svwhilele_b16_u64_x2 (5, x1))
+
+/*
+** whilele_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilels {p4\.h, p5\.h}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_r5_u64, uint64_t,
+ p4 = svwhilele_b16_x2 (x0, (uint64_t) 5),
+ p4 = svwhilele_b16_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b32.c
new file mode 100644
index 0000000..0f5ba61
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b32.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilele_p1_rr_s64:
+** whilele {p[0-9]+\.s, p[0-9]+\.s}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p1_rr_s64, int64_t,
+ p1 = svwhilele_b32_s64_x2 (x0, x1),
+ p1 = svwhilele_b32_x2 (x0, x1))
+
+/*
+** whilele_p4_rr_s64:
+** whilele {p4\.s, p5\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_rr_s64, int64_t,
+ p4 = svwhilele_b32_s64_x2 (x0, x1),
+ p4 = svwhilele_b32_x2 (x0, x1))
+
+/*
+** whilele_p9_rr_s64:
+** whilele {p[0-9]+\.s, p[0-9]+\.s}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p9_rr_s64, int64_t,
+ p9 = svwhilele_b32_s64_x2 (x0, x1),
+ p9 = svwhilele_b32_x2 (x0, x1))
+
+/*
+** whilele_p14_rr_s64:
+** whilele {p14\.s, p15\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p14_rr_s64, int64_t,
+ p14 = svwhilele_b32_s64_x2 (x0, x1),
+ p14 = svwhilele_b32_x2 (x0, x1))
+
+/*
+** whilele_p4_0r_s64:
+** whilele {p4\.s, p5\.s}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_0r_s64, int64_t,
+ p4 = svwhilele_b32_x2 ((int64_t) 0, x1),
+ p4 = svwhilele_b32_s64_x2 (0, x1))
+
+/*
+** whilele_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilele {p4\.s, p5\.s}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_5r_s64, int64_t,
+ p4 = svwhilele_b32_x2 ((int64_t) 5, x1),
+ p4 = svwhilele_b32_s64_x2 (5, x1))
+
+/*
+** whilele_p4_r0_s64:
+** whilele {p4\.s, p5\.s}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_r0_s64, int64_t,
+ p4 = svwhilele_b32_x2 (x0, (int64_t) 0),
+ p4 = svwhilele_b32_s64_x2 (x0, 0))
+
+/*
+** whilele_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilele {p14\.s, p15\.s}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p14_r5_s64, int64_t,
+ p14 = svwhilele_b32_x2 (x0, (int64_t) 5),
+ p14 = svwhilele_b32_s64_x2 (x0, 5))
+
+/*
+** whilele_p4_rr_u64:
+** whilels {p4\.s, p5\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_rr_u64, uint64_t,
+ p4 = svwhilele_b32_u64_x2 (x0, x1),
+ p4 = svwhilele_b32_x2 (x0, x1))
+
+/*
+** whilele_p4_0r_u64:
+** whilels {p4\.s, p5\.s}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_0r_u64, uint64_t,
+ p4 = svwhilele_b32_x2 ((uint64_t) 0, x1),
+ p4 = svwhilele_b32_u64_x2 (0, x1))
+
+/*
+** whilele_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilels {p4\.s, p5\.s}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_5r_u64, uint64_t,
+ p4 = svwhilele_b32_x2 ((uint64_t) 5, x1),
+ p4 = svwhilele_b32_u64_x2 (5, x1))
+
+/*
+** whilele_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilels {p4\.s, p5\.s}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_r5_u64, uint64_t,
+ p4 = svwhilele_b32_x2 (x0, (uint64_t) 5),
+ p4 = svwhilele_b32_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b64.c
new file mode 100644
index 0000000..e26a8f6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b64.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilele_p1_rr_s64:
+** whilele {p[0-9]+\.d, p[0-9]+\.d}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p1_rr_s64, int64_t,
+ p1 = svwhilele_b64_s64_x2 (x0, x1),
+ p1 = svwhilele_b64_x2 (x0, x1))
+
+/*
+** whilele_p4_rr_s64:
+** whilele {p4\.d, p5\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_rr_s64, int64_t,
+ p4 = svwhilele_b64_s64_x2 (x0, x1),
+ p4 = svwhilele_b64_x2 (x0, x1))
+
+/*
+** whilele_p9_rr_s64:
+** whilele {p[0-9]+\.d, p[0-9]+\.d}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p9_rr_s64, int64_t,
+ p9 = svwhilele_b64_s64_x2 (x0, x1),
+ p9 = svwhilele_b64_x2 (x0, x1))
+
+/*
+** whilele_p14_rr_s64:
+** whilele {p14\.d, p15\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p14_rr_s64, int64_t,
+ p14 = svwhilele_b64_s64_x2 (x0, x1),
+ p14 = svwhilele_b64_x2 (x0, x1))
+
+/*
+** whilele_p4_0r_s64:
+** whilele {p4\.d, p5\.d}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_0r_s64, int64_t,
+ p4 = svwhilele_b64_x2 ((int64_t) 0, x1),
+ p4 = svwhilele_b64_s64_x2 (0, x1))
+
+/*
+** whilele_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilele {p4\.d, p5\.d}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_5r_s64, int64_t,
+ p4 = svwhilele_b64_x2 ((int64_t) 5, x1),
+ p4 = svwhilele_b64_s64_x2 (5, x1))
+
+/*
+** whilele_p4_r0_s64:
+** whilele {p4\.d, p5\.d}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_r0_s64, int64_t,
+ p4 = svwhilele_b64_x2 (x0, (int64_t) 0),
+ p4 = svwhilele_b64_s64_x2 (x0, 0))
+
+/*
+** whilele_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilele {p14\.d, p15\.d}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p14_r5_s64, int64_t,
+ p14 = svwhilele_b64_x2 (x0, (int64_t) 5),
+ p14 = svwhilele_b64_s64_x2 (x0, 5))
+
+/*
+** whilele_p4_rr_u64:
+** whilels {p4\.d, p5\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_rr_u64, uint64_t,
+ p4 = svwhilele_b64_u64_x2 (x0, x1),
+ p4 = svwhilele_b64_x2 (x0, x1))
+
+/*
+** whilele_p4_0r_u64:
+** whilels {p4\.d, p5\.d}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_0r_u64, uint64_t,
+ p4 = svwhilele_b64_x2 ((uint64_t) 0, x1),
+ p4 = svwhilele_b64_u64_x2 (0, x1))
+
+/*
+** whilele_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilels {p4\.d, p5\.d}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_5r_u64, uint64_t,
+ p4 = svwhilele_b64_x2 ((uint64_t) 5, x1),
+ p4 = svwhilele_b64_u64_x2 (5, x1))
+
+/*
+** whilele_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilels {p4\.d, p5\.d}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_r5_u64, uint64_t,
+ p4 = svwhilele_b64_x2 (x0, (uint64_t) 5),
+ p4 = svwhilele_b64_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b8.c
new file mode 100644
index 0000000..b6cc1e0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_b8.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilele_p1_rr_s64:
+** whilele {p[0-9]+\.b, p[0-9]+\.b}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p1_rr_s64, int64_t,
+ p1 = svwhilele_b8_s64_x2 (x0, x1),
+ p1 = svwhilele_b8_x2 (x0, x1))
+
+/*
+** whilele_p4_rr_s64:
+** whilele {p4\.b, p5\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_rr_s64, int64_t,
+ p4 = svwhilele_b8_s64_x2 (x0, x1),
+ p4 = svwhilele_b8_x2 (x0, x1))
+
+/*
+** whilele_p9_rr_s64:
+** whilele {p[0-9]+\.b, p[0-9]+\.b}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p9_rr_s64, int64_t,
+ p9 = svwhilele_b8_s64_x2 (x0, x1),
+ p9 = svwhilele_b8_x2 (x0, x1))
+
+/*
+** whilele_p14_rr_s64:
+** whilele {p14\.b, p15\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p14_rr_s64, int64_t,
+ p14 = svwhilele_b8_s64_x2 (x0, x1),
+ p14 = svwhilele_b8_x2 (x0, x1))
+
+/*
+** whilele_p4_0r_s64:
+** whilele {p4\.b, p5\.b}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_0r_s64, int64_t,
+ p4 = svwhilele_b8_x2 ((int64_t) 0, x1),
+ p4 = svwhilele_b8_s64_x2 (0, x1))
+
+/*
+** whilele_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilele {p4\.b, p5\.b}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_5r_s64, int64_t,
+ p4 = svwhilele_b8_x2 ((int64_t) 5, x1),
+ p4 = svwhilele_b8_s64_x2 (5, x1))
+
+/*
+** whilele_p4_r0_s64:
+** whilele {p4\.b, p5\.b}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_r0_s64, int64_t,
+ p4 = svwhilele_b8_x2 (x0, (int64_t) 0),
+ p4 = svwhilele_b8_s64_x2 (x0, 0))
+
+/*
+** whilele_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilele {p14\.b, p15\.b}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p14_r5_s64, int64_t,
+ p14 = svwhilele_b8_x2 (x0, (int64_t) 5),
+ p14 = svwhilele_b8_s64_x2 (x0, 5))
+
+/*
+** whilele_p4_rr_u64:
+** whilels {p4\.b, p5\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_rr_u64, uint64_t,
+ p4 = svwhilele_b8_u64_x2 (x0, x1),
+ p4 = svwhilele_b8_x2 (x0, x1))
+
+/*
+** whilele_p4_0r_u64:
+** whilels {p4\.b, p5\.b}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_0r_u64, uint64_t,
+ p4 = svwhilele_b8_x2 ((uint64_t) 0, x1),
+ p4 = svwhilele_b8_u64_x2 (0, x1))
+
+/*
+** whilele_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilels {p4\.b, p5\.b}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_5r_u64, uint64_t,
+ p4 = svwhilele_b8_x2 ((uint64_t) 5, x1),
+ p4 = svwhilele_b8_u64_x2 (5, x1))
+
+/*
+** whilele_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilels {p4\.b, p5\.b}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilele_p4_r5_u64, uint64_t,
+ p4 = svwhilele_b8_x2 (x0, (uint64_t) 5),
+ p4 = svwhilele_b8_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c16.c
new file mode 100644
index 0000000..768fd91
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c16.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilele_pn0_rr_2_s64:
+** whilele pn[0-9]+\.h, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilele_c16_s64 (x0, x1, 2),
+ pn0 = svwhilele_c16 (x0, x1, 2))
+
+/*
+** whilele_pn7_rr_4_s64:
+** whilele pn[0-9]+\.h, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilele_c16_s64 (x0, x1, 4),
+ pn7 = svwhilele_c16 (x0, x1, 4))
+
+/*
+** whilele_pn8_rr_2_s64:
+** whilele pn8\.h, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilele_c16_s64 (x0, x1, 2),
+ pn8 = svwhilele_c16 (x0, x1, 2))
+
+/*
+** whilele_pn15_rr_4_s64:
+** whilele pn15\.h, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilele_c16_s64 (x0, x1, 4),
+ pn15 = svwhilele_c16 (x0, x1, 4))
+
+/*
+** whilele_pn8_0r_2_s64:
+** whilele pn8\.h, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilele_c16 ((int64_t) 0, x1, 2),
+ pn8 = svwhilele_c16_s64 (0, x1, 2))
+
+/*
+** whilele_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilele pn8\.h, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilele_c16 ((int64_t) 5, x1, 4),
+ pn8 = svwhilele_c16_s64 (5, x1, 4))
+
+/*
+** whilele_pn8_r0_2_s64:
+** whilele pn8\.h, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilele_c16 (x0, (int64_t) 0, 2),
+ pn8 = svwhilele_c16_s64 (x0, 0, 2))
+
+/*
+** whilele_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilele pn15\.h, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilele_c16 (x0, (int64_t) 5, 4),
+ pn15 = svwhilele_c16_s64 (x0, 5, 4))
+
+/*
+** whilele_pn8_rr_2_u64:
+** whilels pn8\.h, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilele_c16_u64 (x0, x1, 2),
+ pn8 = svwhilele_c16 (x0, x1, 2))
+
+/*
+** whilele_pn8_0r_4_u64:
+** whilels pn8\.h, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilele_c16 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilele_c16_u64 (0, x1, 4))
+
+/*
+** whilele_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilels pn8\.h, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilele_c16 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilele_c16_u64 (5, x1, 2))
+
+/*
+** whilele_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilels pn8\.h, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilele_c16 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilele_c16_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c32.c
new file mode 100644
index 0000000..3669d85
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c32.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilele_pn0_rr_2_s64:
+** whilele pn[0-9]+\.s, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilele_c32_s64 (x0, x1, 2),
+ pn0 = svwhilele_c32 (x0, x1, 2))
+
+/*
+** whilele_pn7_rr_4_s64:
+** whilele pn[0-9]+\.s, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilele_c32_s64 (x0, x1, 4),
+ pn7 = svwhilele_c32 (x0, x1, 4))
+
+/*
+** whilele_pn8_rr_2_s64:
+** whilele pn8\.s, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilele_c32_s64 (x0, x1, 2),
+ pn8 = svwhilele_c32 (x0, x1, 2))
+
+/*
+** whilele_pn15_rr_4_s64:
+** whilele pn15\.s, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilele_c32_s64 (x0, x1, 4),
+ pn15 = svwhilele_c32 (x0, x1, 4))
+
+/*
+** whilele_pn8_0r_2_s64:
+** whilele pn8\.s, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilele_c32 ((int64_t) 0, x1, 2),
+ pn8 = svwhilele_c32_s64 (0, x1, 2))
+
+/*
+** whilele_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilele pn8\.s, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilele_c32 ((int64_t) 5, x1, 4),
+ pn8 = svwhilele_c32_s64 (5, x1, 4))
+
+/*
+** whilele_pn8_r0_2_s64:
+** whilele pn8\.s, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilele_c32 (x0, (int64_t) 0, 2),
+ pn8 = svwhilele_c32_s64 (x0, 0, 2))
+
+/*
+** whilele_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilele pn15\.s, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilele_c32 (x0, (int64_t) 5, 4),
+ pn15 = svwhilele_c32_s64 (x0, 5, 4))
+
+/*
+** whilele_pn8_rr_2_u64:
+** whilels pn8\.s, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilele_c32_u64 (x0, x1, 2),
+ pn8 = svwhilele_c32 (x0, x1, 2))
+
+/*
+** whilele_pn8_0r_4_u64:
+** whilels pn8\.s, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilele_c32 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilele_c32_u64 (0, x1, 4))
+
+/*
+** whilele_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilels pn8\.s, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilele_c32 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilele_c32_u64 (5, x1, 2))
+
+/*
+** whilele_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilels pn8\.s, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilele_c32 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilele_c32_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c64.c
new file mode 100644
index 0000000..f614a55
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c64.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilele_pn0_rr_2_s64:
+** whilele pn[0-9]+\.d, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilele_c64_s64 (x0, x1, 2),
+ pn0 = svwhilele_c64 (x0, x1, 2))
+
+/*
+** whilele_pn7_rr_4_s64:
+** whilele pn[0-9]+\.d, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilele_c64_s64 (x0, x1, 4),
+ pn7 = svwhilele_c64 (x0, x1, 4))
+
+/*
+** whilele_pn8_rr_2_s64:
+** whilele pn8\.d, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilele_c64_s64 (x0, x1, 2),
+ pn8 = svwhilele_c64 (x0, x1, 2))
+
+/*
+** whilele_pn15_rr_4_s64:
+** whilele pn15\.d, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilele_c64_s64 (x0, x1, 4),
+ pn15 = svwhilele_c64 (x0, x1, 4))
+
+/*
+** whilele_pn8_0r_2_s64:
+** whilele pn8\.d, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilele_c64 ((int64_t) 0, x1, 2),
+ pn8 = svwhilele_c64_s64 (0, x1, 2))
+
+/*
+** whilele_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilele pn8\.d, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilele_c64 ((int64_t) 5, x1, 4),
+ pn8 = svwhilele_c64_s64 (5, x1, 4))
+
+/*
+** whilele_pn8_r0_2_s64:
+** whilele pn8\.d, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilele_c64 (x0, (int64_t) 0, 2),
+ pn8 = svwhilele_c64_s64 (x0, 0, 2))
+
+/*
+** whilele_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilele pn15\.d, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilele_c64 (x0, (int64_t) 5, 4),
+ pn15 = svwhilele_c64_s64 (x0, 5, 4))
+
+/*
+** whilele_pn8_rr_2_u64:
+** whilels pn8\.d, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilele_c64_u64 (x0, x1, 2),
+ pn8 = svwhilele_c64 (x0, x1, 2))
+
+/*
+** whilele_pn8_0r_4_u64:
+** whilels pn8\.d, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilele_c64 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilele_c64_u64 (0, x1, 4))
+
+/*
+** whilele_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilels pn8\.d, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilele_c64 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilele_c64_u64 (5, x1, 2))
+
+/*
+** whilele_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilels pn8\.d, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilele_c64 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilele_c64_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c8.c
new file mode 100644
index 0000000..e334f5b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilele_c8.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilele_pn0_rr_2_s64:
+** whilele pn[0-9]+\.b, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilele_c8_s64 (x0, x1, 2),
+ pn0 = svwhilele_c8 (x0, x1, 2))
+
+/*
+** whilele_pn7_rr_4_s64:
+** whilele pn[0-9]+\.b, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilele_c8_s64 (x0, x1, 4),
+ pn7 = svwhilele_c8 (x0, x1, 4))
+
+/*
+** whilele_pn8_rr_2_s64:
+** whilele pn8\.b, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilele_c8_s64 (x0, x1, 2),
+ pn8 = svwhilele_c8 (x0, x1, 2))
+
+/*
+** whilele_pn15_rr_4_s64:
+** whilele pn15\.b, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilele_c8_s64 (x0, x1, 4),
+ pn15 = svwhilele_c8 (x0, x1, 4))
+
+/*
+** whilele_pn8_0r_2_s64:
+** whilele pn8\.b, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilele_c8 ((int64_t) 0, x1, 2),
+ pn8 = svwhilele_c8_s64 (0, x1, 2))
+
+/*
+** whilele_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilele pn8\.b, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilele_c8 ((int64_t) 5, x1, 4),
+ pn8 = svwhilele_c8_s64 (5, x1, 4))
+
+/*
+** whilele_pn8_r0_2_s64:
+** whilele pn8\.b, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilele_c8 (x0, (int64_t) 0, 2),
+ pn8 = svwhilele_c8_s64 (x0, 0, 2))
+
+/*
+** whilele_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilele pn15\.b, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilele_c8 (x0, (int64_t) 5, 4),
+ pn15 = svwhilele_c8_s64 (x0, 5, 4))
+
+/*
+** whilele_pn8_rr_2_u64:
+** whilels pn8\.b, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilele_c8_u64 (x0, x1, 2),
+ pn8 = svwhilele_c8 (x0, x1, 2))
+
+/*
+** whilele_pn8_0r_4_u64:
+** whilels pn8\.b, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilele_c8 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilele_c8_u64 (0, x1, 4))
+
+/*
+** whilele_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilels pn8\.b, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilele_c8 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilele_c8_u64 (5, x1, 2))
+
+/*
+** whilele_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilels pn8\.b, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilele_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilele_c8 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilele_c8_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b16.c
new file mode 100644
index 0000000..4ed6b4d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b16.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilelt_p1_rr_s64:
+** whilelt {p[0-9]+\.h, p[0-9]+\.h}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p1_rr_s64, int64_t,
+ p1 = svwhilelt_b16_s64_x2 (x0, x1),
+ p1 = svwhilelt_b16_x2 (x0, x1))
+
+/*
+** whilelt_p4_rr_s64:
+** whilelt {p4\.h, p5\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_rr_s64, int64_t,
+ p4 = svwhilelt_b16_s64_x2 (x0, x1),
+ p4 = svwhilelt_b16_x2 (x0, x1))
+
+/*
+** whilelt_p9_rr_s64:
+** whilelt {p[0-9]+\.h, p[0-9]+\.h}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p9_rr_s64, int64_t,
+ p9 = svwhilelt_b16_s64_x2 (x0, x1),
+ p9 = svwhilelt_b16_x2 (x0, x1))
+
+/*
+** whilelt_p14_rr_s64:
+** whilelt {p14\.h, p15\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p14_rr_s64, int64_t,
+ p14 = svwhilelt_b16_s64_x2 (x0, x1),
+ p14 = svwhilelt_b16_x2 (x0, x1))
+
+/*
+** whilelt_p4_0r_s64:
+** whilelt {p4\.h, p5\.h}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_0r_s64, int64_t,
+ p4 = svwhilelt_b16_x2 ((int64_t) 0, x1),
+ p4 = svwhilelt_b16_s64_x2 (0, x1))
+
+/*
+** whilelt_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilelt {p4\.h, p5\.h}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_5r_s64, int64_t,
+ p4 = svwhilelt_b16_x2 ((int64_t) 5, x1),
+ p4 = svwhilelt_b16_s64_x2 (5, x1))
+
+/*
+** whilelt_p4_r0_s64:
+** whilelt {p4\.h, p5\.h}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_r0_s64, int64_t,
+ p4 = svwhilelt_b16_x2 (x0, (int64_t) 0),
+ p4 = svwhilelt_b16_s64_x2 (x0, 0))
+
+/*
+** whilelt_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilelt {p14\.h, p15\.h}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p14_r5_s64, int64_t,
+ p14 = svwhilelt_b16_x2 (x0, (int64_t) 5),
+ p14 = svwhilelt_b16_s64_x2 (x0, 5))
+
+/*
+** whilelt_p4_rr_u64:
+** whilelo {p4\.h, p5\.h}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_rr_u64, uint64_t,
+ p4 = svwhilelt_b16_u64_x2 (x0, x1),
+ p4 = svwhilelt_b16_x2 (x0, x1))
+
+/*
+** whilelt_p4_0r_u64:
+** whilelo {p4\.h, p5\.h}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_0r_u64, uint64_t,
+ p4 = svwhilelt_b16_x2 ((uint64_t) 0, x1),
+ p4 = svwhilelt_b16_u64_x2 (0, x1))
+
+/*
+** whilelt_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilelo {p4\.h, p5\.h}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_5r_u64, uint64_t,
+ p4 = svwhilelt_b16_x2 ((uint64_t) 5, x1),
+ p4 = svwhilelt_b16_u64_x2 (5, x1))
+
+/*
+** whilelt_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilelo {p4\.h, p5\.h}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_r5_u64, uint64_t,
+ p4 = svwhilelt_b16_x2 (x0, (uint64_t) 5),
+ p4 = svwhilelt_b16_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b32.c
new file mode 100644
index 0000000..39bc144
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b32.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilelt_p1_rr_s64:
+** whilelt {p[0-9]+\.s, p[0-9]+\.s}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p1_rr_s64, int64_t,
+ p1 = svwhilelt_b32_s64_x2 (x0, x1),
+ p1 = svwhilelt_b32_x2 (x0, x1))
+
+/*
+** whilelt_p4_rr_s64:
+** whilelt {p4\.s, p5\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_rr_s64, int64_t,
+ p4 = svwhilelt_b32_s64_x2 (x0, x1),
+ p4 = svwhilelt_b32_x2 (x0, x1))
+
+/*
+** whilelt_p9_rr_s64:
+** whilelt {p[0-9]+\.s, p[0-9]+\.s}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p9_rr_s64, int64_t,
+ p9 = svwhilelt_b32_s64_x2 (x0, x1),
+ p9 = svwhilelt_b32_x2 (x0, x1))
+
+/*
+** whilelt_p14_rr_s64:
+** whilelt {p14\.s, p15\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p14_rr_s64, int64_t,
+ p14 = svwhilelt_b32_s64_x2 (x0, x1),
+ p14 = svwhilelt_b32_x2 (x0, x1))
+
+/*
+** whilelt_p4_0r_s64:
+** whilelt {p4\.s, p5\.s}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_0r_s64, int64_t,
+ p4 = svwhilelt_b32_x2 ((int64_t) 0, x1),
+ p4 = svwhilelt_b32_s64_x2 (0, x1))
+
+/*
+** whilelt_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilelt {p4\.s, p5\.s}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_5r_s64, int64_t,
+ p4 = svwhilelt_b32_x2 ((int64_t) 5, x1),
+ p4 = svwhilelt_b32_s64_x2 (5, x1))
+
+/*
+** whilelt_p4_r0_s64:
+** whilelt {p4\.s, p5\.s}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_r0_s64, int64_t,
+ p4 = svwhilelt_b32_x2 (x0, (int64_t) 0),
+ p4 = svwhilelt_b32_s64_x2 (x0, 0))
+
+/*
+** whilelt_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilelt {p14\.s, p15\.s}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p14_r5_s64, int64_t,
+ p14 = svwhilelt_b32_x2 (x0, (int64_t) 5),
+ p14 = svwhilelt_b32_s64_x2 (x0, 5))
+
+/*
+** whilelt_p4_rr_u64:
+** whilelo {p4\.s, p5\.s}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_rr_u64, uint64_t,
+ p4 = svwhilelt_b32_u64_x2 (x0, x1),
+ p4 = svwhilelt_b32_x2 (x0, x1))
+
+/*
+** whilelt_p4_0r_u64:
+** whilelo {p4\.s, p5\.s}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_0r_u64, uint64_t,
+ p4 = svwhilelt_b32_x2 ((uint64_t) 0, x1),
+ p4 = svwhilelt_b32_u64_x2 (0, x1))
+
+/*
+** whilelt_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilelo {p4\.s, p5\.s}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_5r_u64, uint64_t,
+ p4 = svwhilelt_b32_x2 ((uint64_t) 5, x1),
+ p4 = svwhilelt_b32_u64_x2 (5, x1))
+
+/*
+** whilelt_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilelo {p4\.s, p5\.s}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_r5_u64, uint64_t,
+ p4 = svwhilelt_b32_x2 (x0, (uint64_t) 5),
+ p4 = svwhilelt_b32_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b64.c
new file mode 100644
index 0000000..9acd245
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b64.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilelt_p1_rr_s64:
+** whilelt {p[0-9]+\.d, p[0-9]+\.d}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p1_rr_s64, int64_t,
+ p1 = svwhilelt_b64_s64_x2 (x0, x1),
+ p1 = svwhilelt_b64_x2 (x0, x1))
+
+/*
+** whilelt_p4_rr_s64:
+** whilelt {p4\.d, p5\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_rr_s64, int64_t,
+ p4 = svwhilelt_b64_s64_x2 (x0, x1),
+ p4 = svwhilelt_b64_x2 (x0, x1))
+
+/*
+** whilelt_p9_rr_s64:
+** whilelt {p[0-9]+\.d, p[0-9]+\.d}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p9_rr_s64, int64_t,
+ p9 = svwhilelt_b64_s64_x2 (x0, x1),
+ p9 = svwhilelt_b64_x2 (x0, x1))
+
+/*
+** whilelt_p14_rr_s64:
+** whilelt {p14\.d, p15\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p14_rr_s64, int64_t,
+ p14 = svwhilelt_b64_s64_x2 (x0, x1),
+ p14 = svwhilelt_b64_x2 (x0, x1))
+
+/*
+** whilelt_p4_0r_s64:
+** whilelt {p4\.d, p5\.d}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_0r_s64, int64_t,
+ p4 = svwhilelt_b64_x2 ((int64_t) 0, x1),
+ p4 = svwhilelt_b64_s64_x2 (0, x1))
+
+/*
+** whilelt_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilelt {p4\.d, p5\.d}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_5r_s64, int64_t,
+ p4 = svwhilelt_b64_x2 ((int64_t) 5, x1),
+ p4 = svwhilelt_b64_s64_x2 (5, x1))
+
+/*
+** whilelt_p4_r0_s64:
+** whilelt {p4\.d, p5\.d}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_r0_s64, int64_t,
+ p4 = svwhilelt_b64_x2 (x0, (int64_t) 0),
+ p4 = svwhilelt_b64_s64_x2 (x0, 0))
+
+/*
+** whilelt_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilelt {p14\.d, p15\.d}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p14_r5_s64, int64_t,
+ p14 = svwhilelt_b64_x2 (x0, (int64_t) 5),
+ p14 = svwhilelt_b64_s64_x2 (x0, 5))
+
+/*
+** whilelt_p4_rr_u64:
+** whilelo {p4\.d, p5\.d}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_rr_u64, uint64_t,
+ p4 = svwhilelt_b64_u64_x2 (x0, x1),
+ p4 = svwhilelt_b64_x2 (x0, x1))
+
+/*
+** whilelt_p4_0r_u64:
+** whilelo {p4\.d, p5\.d}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_0r_u64, uint64_t,
+ p4 = svwhilelt_b64_x2 ((uint64_t) 0, x1),
+ p4 = svwhilelt_b64_u64_x2 (0, x1))
+
+/*
+** whilelt_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilelo {p4\.d, p5\.d}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_5r_u64, uint64_t,
+ p4 = svwhilelt_b64_x2 ((uint64_t) 5, x1),
+ p4 = svwhilelt_b64_u64_x2 (5, x1))
+
+/*
+** whilelt_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilelo {p4\.d, p5\.d}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_r5_u64, uint64_t,
+ p4 = svwhilelt_b64_x2 (x0, (uint64_t) 5),
+ p4 = svwhilelt_b64_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b8.c
new file mode 100644
index 0000000..2096655
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_b8.c
@@ -0,0 +1,119 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilelt_p1_rr_s64:
+** whilelt {p[0-9]+\.b, p[0-9]+\.b}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p1_rr_s64, int64_t,
+ p1 = svwhilelt_b8_s64_x2 (x0, x1),
+ p1 = svwhilelt_b8_x2 (x0, x1))
+
+/*
+** whilelt_p4_rr_s64:
+** whilelt {p4\.b, p5\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_rr_s64, int64_t,
+ p4 = svwhilelt_b8_s64_x2 (x0, x1),
+ p4 = svwhilelt_b8_x2 (x0, x1))
+
+/*
+** whilelt_p9_rr_s64:
+** whilelt {p[0-9]+\.b, p[0-9]+\.b}, x0, x1
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p9_rr_s64, int64_t,
+ p9 = svwhilelt_b8_s64_x2 (x0, x1),
+ p9 = svwhilelt_b8_x2 (x0, x1))
+
+/*
+** whilelt_p14_rr_s64:
+** whilelt {p14\.b, p15\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p14_rr_s64, int64_t,
+ p14 = svwhilelt_b8_s64_x2 (x0, x1),
+ p14 = svwhilelt_b8_x2 (x0, x1))
+
+/*
+** whilelt_p4_0r_s64:
+** whilelt {p4\.b, p5\.b}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_0r_s64, int64_t,
+ p4 = svwhilelt_b8_x2 ((int64_t) 0, x1),
+ p4 = svwhilelt_b8_s64_x2 (0, x1))
+
+/*
+** whilelt_p4_5r_s64:
+** mov (x[0-9]+), #?5
+** whilelt {p4\.b, p5\.b}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_5r_s64, int64_t,
+ p4 = svwhilelt_b8_x2 ((int64_t) 5, x1),
+ p4 = svwhilelt_b8_s64_x2 (5, x1))
+
+/*
+** whilelt_p4_r0_s64:
+** whilelt {p4\.b, p5\.b}, x0, xzr
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_r0_s64, int64_t,
+ p4 = svwhilelt_b8_x2 (x0, (int64_t) 0),
+ p4 = svwhilelt_b8_s64_x2 (x0, 0))
+
+/*
+** whilelt_p14_r5_s64:
+** mov (x[0-9]+), #?5
+** whilelt {p14\.b, p15\.b}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p14_r5_s64, int64_t,
+ p14 = svwhilelt_b8_x2 (x0, (int64_t) 5),
+ p14 = svwhilelt_b8_s64_x2 (x0, 5))
+
+/*
+** whilelt_p4_rr_u64:
+** whilelo {p4\.b, p5\.b}, x0, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_rr_u64, uint64_t,
+ p4 = svwhilelt_b8_u64_x2 (x0, x1),
+ p4 = svwhilelt_b8_x2 (x0, x1))
+
+/*
+** whilelt_p4_0r_u64:
+** whilelo {p4\.b, p5\.b}, xzr, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_0r_u64, uint64_t,
+ p4 = svwhilelt_b8_x2 ((uint64_t) 0, x1),
+ p4 = svwhilelt_b8_u64_x2 (0, x1))
+
+/*
+** whilelt_p4_5r_u64:
+** mov (x[0-9]+), #?5
+** whilelo {p4\.b, p5\.b}, \1, x1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_5r_u64, uint64_t,
+ p4 = svwhilelt_b8_x2 ((uint64_t) 5, x1),
+ p4 = svwhilelt_b8_u64_x2 (5, x1))
+
+/*
+** whilelt_p4_r5_u64:
+** mov (x[0-9]+), #?5
+** whilelo {p4\.b, p5\.b}, x0, \1
+** ret
+*/
+TEST_COMPARE_S_X2 (whilelt_p4_r5_u64, uint64_t,
+ p4 = svwhilelt_b8_x2 (x0, (uint64_t) 5),
+ p4 = svwhilelt_b8_u64_x2 (x0, 5))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c16.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c16.c
new file mode 100644
index 0000000..4e7ce65
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c16.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilelt_pn0_rr_2_s64:
+** whilelt pn[0-9]+\.h, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilelt_c16_s64 (x0, x1, 2),
+ pn0 = svwhilelt_c16 (x0, x1, 2))
+
+/*
+** whilelt_pn7_rr_4_s64:
+** whilelt pn[0-9]+\.h, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilelt_c16_s64 (x0, x1, 4),
+ pn7 = svwhilelt_c16 (x0, x1, 4))
+
+/*
+** whilelt_pn8_rr_2_s64:
+** whilelt pn8\.h, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilelt_c16_s64 (x0, x1, 2),
+ pn8 = svwhilelt_c16 (x0, x1, 2))
+
+/*
+** whilelt_pn15_rr_4_s64:
+** whilelt pn15\.h, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilelt_c16_s64 (x0, x1, 4),
+ pn15 = svwhilelt_c16 (x0, x1, 4))
+
+/*
+** whilelt_pn8_0r_2_s64:
+** whilelt pn8\.h, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilelt_c16 ((int64_t) 0, x1, 2),
+ pn8 = svwhilelt_c16_s64 (0, x1, 2))
+
+/*
+** whilelt_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilelt pn8\.h, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilelt_c16 ((int64_t) 5, x1, 4),
+ pn8 = svwhilelt_c16_s64 (5, x1, 4))
+
+/*
+** whilelt_pn8_r0_2_s64:
+** whilelt pn8\.h, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilelt_c16 (x0, (int64_t) 0, 2),
+ pn8 = svwhilelt_c16_s64 (x0, 0, 2))
+
+/*
+** whilelt_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilelt pn15\.h, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilelt_c16 (x0, (int64_t) 5, 4),
+ pn15 = svwhilelt_c16_s64 (x0, 5, 4))
+
+/*
+** whilelt_pn8_rr_2_u64:
+** whilelo pn8\.h, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilelt_c16_u64 (x0, x1, 2),
+ pn8 = svwhilelt_c16 (x0, x1, 2))
+
+/*
+** whilelt_pn8_0r_4_u64:
+** whilelo pn8\.h, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilelt_c16 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilelt_c16_u64 (0, x1, 4))
+
+/*
+** whilelt_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilelo pn8\.h, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilelt_c16 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilelt_c16_u64 (5, x1, 2))
+
+/*
+** whilelt_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilelo pn8\.h, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilelt_c16 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilelt_c16_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c32.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c32.c
new file mode 100644
index 0000000..f2a63f6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c32.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilelt_pn0_rr_2_s64:
+** whilelt pn[0-9]+\.s, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilelt_c32_s64 (x0, x1, 2),
+ pn0 = svwhilelt_c32 (x0, x1, 2))
+
+/*
+** whilelt_pn7_rr_4_s64:
+** whilelt pn[0-9]+\.s, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilelt_c32_s64 (x0, x1, 4),
+ pn7 = svwhilelt_c32 (x0, x1, 4))
+
+/*
+** whilelt_pn8_rr_2_s64:
+** whilelt pn8\.s, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilelt_c32_s64 (x0, x1, 2),
+ pn8 = svwhilelt_c32 (x0, x1, 2))
+
+/*
+** whilelt_pn15_rr_4_s64:
+** whilelt pn15\.s, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilelt_c32_s64 (x0, x1, 4),
+ pn15 = svwhilelt_c32 (x0, x1, 4))
+
+/*
+** whilelt_pn8_0r_2_s64:
+** whilelt pn8\.s, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilelt_c32 ((int64_t) 0, x1, 2),
+ pn8 = svwhilelt_c32_s64 (0, x1, 2))
+
+/*
+** whilelt_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilelt pn8\.s, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilelt_c32 ((int64_t) 5, x1, 4),
+ pn8 = svwhilelt_c32_s64 (5, x1, 4))
+
+/*
+** whilelt_pn8_r0_2_s64:
+** whilelt pn8\.s, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilelt_c32 (x0, (int64_t) 0, 2),
+ pn8 = svwhilelt_c32_s64 (x0, 0, 2))
+
+/*
+** whilelt_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilelt pn15\.s, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilelt_c32 (x0, (int64_t) 5, 4),
+ pn15 = svwhilelt_c32_s64 (x0, 5, 4))
+
+/*
+** whilelt_pn8_rr_2_u64:
+** whilelo pn8\.s, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilelt_c32_u64 (x0, x1, 2),
+ pn8 = svwhilelt_c32 (x0, x1, 2))
+
+/*
+** whilelt_pn8_0r_4_u64:
+** whilelo pn8\.s, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilelt_c32 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilelt_c32_u64 (0, x1, 4))
+
+/*
+** whilelt_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilelo pn8\.s, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilelt_c32 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilelt_c32_u64 (5, x1, 2))
+
+/*
+** whilelt_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilelo pn8\.s, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilelt_c32 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilelt_c32_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c64.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c64.c
new file mode 100644
index 0000000..3c48cd0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c64.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilelt_pn0_rr_2_s64:
+** whilelt pn[0-9]+\.d, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilelt_c64_s64 (x0, x1, 2),
+ pn0 = svwhilelt_c64 (x0, x1, 2))
+
+/*
+** whilelt_pn7_rr_4_s64:
+** whilelt pn[0-9]+\.d, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilelt_c64_s64 (x0, x1, 4),
+ pn7 = svwhilelt_c64 (x0, x1, 4))
+
+/*
+** whilelt_pn8_rr_2_s64:
+** whilelt pn8\.d, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilelt_c64_s64 (x0, x1, 2),
+ pn8 = svwhilelt_c64 (x0, x1, 2))
+
+/*
+** whilelt_pn15_rr_4_s64:
+** whilelt pn15\.d, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilelt_c64_s64 (x0, x1, 4),
+ pn15 = svwhilelt_c64 (x0, x1, 4))
+
+/*
+** whilelt_pn8_0r_2_s64:
+** whilelt pn8\.d, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilelt_c64 ((int64_t) 0, x1, 2),
+ pn8 = svwhilelt_c64_s64 (0, x1, 2))
+
+/*
+** whilelt_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilelt pn8\.d, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilelt_c64 ((int64_t) 5, x1, 4),
+ pn8 = svwhilelt_c64_s64 (5, x1, 4))
+
+/*
+** whilelt_pn8_r0_2_s64:
+** whilelt pn8\.d, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilelt_c64 (x0, (int64_t) 0, 2),
+ pn8 = svwhilelt_c64_s64 (x0, 0, 2))
+
+/*
+** whilelt_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilelt pn15\.d, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilelt_c64 (x0, (int64_t) 5, 4),
+ pn15 = svwhilelt_c64_s64 (x0, 5, 4))
+
+/*
+** whilelt_pn8_rr_2_u64:
+** whilelo pn8\.d, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilelt_c64_u64 (x0, x1, 2),
+ pn8 = svwhilelt_c64 (x0, x1, 2))
+
+/*
+** whilelt_pn8_0r_4_u64:
+** whilelo pn8\.d, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilelt_c64 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilelt_c64_u64 (0, x1, 4))
+
+/*
+** whilelt_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilelo pn8\.d, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilelt_c64 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilelt_c64_u64 (5, x1, 2))
+
+/*
+** whilelt_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilelo pn8\.d, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilelt_c64 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilelt_c64_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c8.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c8.c
new file mode 100644
index 0000000..729c129
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/whilelt_c8.c
@@ -0,0 +1,117 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** whilelt_pn0_rr_2_s64:
+** whilelt pn[0-9]+\.b, x0, x1, vlx2
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn0_rr_2_s64, int64_t,
+ pn0 = svwhilelt_c8_s64 (x0, x1, 2),
+ pn0 = svwhilelt_c8 (x0, x1, 2))
+
+/*
+** whilelt_pn7_rr_4_s64:
+** whilelt pn[0-9]+\.b, x0, x1, vlx4
+** mov [^\n]+
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn7_rr_4_s64, int64_t,
+ pn7 = svwhilelt_c8_s64 (x0, x1, 4),
+ pn7 = svwhilelt_c8 (x0, x1, 4))
+
+/*
+** whilelt_pn8_rr_2_s64:
+** whilelt pn8\.b, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_rr_2_s64, int64_t,
+ pn8 = svwhilelt_c8_s64 (x0, x1, 2),
+ pn8 = svwhilelt_c8 (x0, x1, 2))
+
+/*
+** whilelt_pn15_rr_4_s64:
+** whilelt pn15\.b, x0, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn15_rr_4_s64, int64_t,
+ pn15 = svwhilelt_c8_s64 (x0, x1, 4),
+ pn15 = svwhilelt_c8 (x0, x1, 4))
+
+/*
+** whilelt_pn8_0r_2_s64:
+** whilelt pn8\.b, xzr, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_0r_2_s64, int64_t,
+ pn8 = svwhilelt_c8 ((int64_t) 0, x1, 2),
+ pn8 = svwhilelt_c8_s64 (0, x1, 2))
+
+/*
+** whilelt_pn8_5r_4_s64:
+** mov (x[0-9]+), #?5
+** whilelt pn8\.b, \1, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_5r_4_s64, int64_t,
+ pn8 = svwhilelt_c8 ((int64_t) 5, x1, 4),
+ pn8 = svwhilelt_c8_s64 (5, x1, 4))
+
+/*
+** whilelt_pn8_r0_2_s64:
+** whilelt pn8\.b, x0, xzr, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_r0_2_s64, int64_t,
+ pn8 = svwhilelt_c8 (x0, (int64_t) 0, 2),
+ pn8 = svwhilelt_c8_s64 (x0, 0, 2))
+
+/*
+** whilelt_pn15_r5_4_s64:
+** mov (x[0-9]+), #?5
+** whilelt pn15\.b, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn15_r5_4_s64, int64_t,
+ pn15 = svwhilelt_c8 (x0, (int64_t) 5, 4),
+ pn15 = svwhilelt_c8_s64 (x0, 5, 4))
+
+/*
+** whilelt_pn8_rr_2_u64:
+** whilelo pn8\.b, x0, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_rr_2_u64, uint64_t,
+ pn8 = svwhilelt_c8_u64 (x0, x1, 2),
+ pn8 = svwhilelt_c8 (x0, x1, 2))
+
+/*
+** whilelt_pn8_0r_4_u64:
+** whilelo pn8\.b, xzr, x1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_0r_4_u64, uint64_t,
+ pn8 = svwhilelt_c8 ((uint64_t) 0, x1, 4),
+ pn8 = svwhilelt_c8_u64 (0, x1, 4))
+
+/*
+** whilelt_pn8_5r_2_u64:
+** mov (x[0-9]+), #?5
+** whilelo pn8\.b, \1, x1, vlx2
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_5r_2_u64, uint64_t,
+ pn8 = svwhilelt_c8 ((uint64_t) 5, x1, 2),
+ pn8 = svwhilelt_c8_u64 (5, x1, 2))
+
+/*
+** whilelt_pn8_r5_4_u64:
+** mov (x[0-9]+), #?5
+** whilelo pn8\.b, x0, \1, vlx4
+** ret
+*/
+TEST_COMPARE_S_C (whilelt_pn8_r5_4_u64, uint64_t,
+ pn8 = svwhilelt_c8 (x0, (uint64_t) 5, 4),
+ pn8 = svwhilelt_c8_u64 (x0, 5, 4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za16_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za16_vg2.c
new file mode 100644
index 0000000..2d18a21
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za16_vg2.c
@@ -0,0 +1,140 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za16_s16_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.h\[\1, 0:1\], {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z0_0_0, svint16x2_t,
+ svwrite_hor_za16_s16_vg2 (0, 0, z0),
+ svwrite_hor_za16_s16_vg2 (0, 0, z0))
+
+/*
+** write_za16_u16_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1h\.h\[\1, 0:1\], {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z4_1_1, svuint16x2_t,
+ svwrite_hor_za16_u16_vg2 (1, 1, z4),
+ svwrite_hor_za16_u16_vg2 (1, 1, z4))
+
+/*
+** write_za16_f16_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova za0h\.h\[\1, 0:1\], {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_f16_z28_0_w11, svfloat16x2_t,
+ svwrite_hor_za16_f16_vg2 (0, w11, z28),
+ svwrite_hor_za16_f16_vg2 (0, w11, z28))
+
+/*
+** write_za16_bf16_z0_1_w12:
+** mova za1h\.h\[w12, 0:1\], {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_bf16_z0_1_w12, svbfloat16x2_t,
+ svwrite_hor_za16_bf16_vg2 (1, w12, z0),
+ svwrite_hor_za16_bf16_vg2 (1, w12, z0))
+
+/*
+** write_za16_u16_z18_0_w15:
+** mova za0h\.h\[w15, 0:1\], {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z18_0_w15, svuint16x2_t,
+ svwrite_hor_za16_u16_vg2 (0, w15, z18),
+ svwrite_hor_za16_u16_vg2 (0, w15, z18))
+
+/*
+** write_za16_s16_z23_1_w12p6:
+** mov [^\n]+
+** mov [^\n]+
+** mova za1h\.h\[w12, 6:7\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z23_1_w12p6, svint16x2_t,
+ svwrite_hor_za16_s16_vg2 (1, w12 + 6, z23),
+ svwrite_hor_za16_s16_vg2 (1, w12 + 6, z23))
+
+/*
+** write_za16_f16_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za0h\.h\[\1, 0:1\], {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_f16_z4_0_w12p1, svfloat16x2_t,
+ svwrite_hor_za16_f16_vg2 (0, w12 + 1, z4),
+ svwrite_hor_za16_f16_vg2 (0, w12 + 1, z4))
+
+/*
+** write_za16_s16_z28_1_w12p2:
+** mova za1h\.h\[w12, 2:3\], {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z28_1_w12p2, svint16x2_t,
+ svwrite_hor_za16_s16_vg2 (1, w12 + 2, z28),
+ svwrite_hor_za16_s16_vg2 (1, w12 + 2, z28))
+
+/*
+** write_za16_u16_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0h\.h\[\1, 0:1\], {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z0_0_w15p3, svuint16x2_t,
+ svwrite_hor_za16_u16_vg2 (0, w15 + 3, z0),
+ svwrite_hor_za16_u16_vg2 (0, w15 + 3, z0))
+
+/*
+** write_za16_bf16_z4_1_w15p4:
+** mova za1h\.h\[w15, 4:5\], {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_bf16_z4_1_w15p4, svbfloat16x2_t,
+ svwrite_hor_za16_bf16_vg2 (1, w15 + 4, z4),
+ svwrite_hor_za16_bf16_vg2 (1, w15 + 4, z4))
+
+/*
+** write_za16_u16_z28_0_w12p7:
+** add (w[0-9]+), w12, #?7
+** mova za0h\.h\[\1, 0:1\], {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z28_0_w12p7, svuint16x2_t,
+ svwrite_hor_za16_u16_vg2 (0, w12 + 7, z28),
+ svwrite_hor_za16_u16_vg2 (0, w12 + 7, z28))
+
+/*
+** write_za16_s16_z0_1_w15p8:
+** add (w[0-9]+), w15, #?8
+** mova za1h\.h\[\1, 0:1\], {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z0_1_w15p8, svint16x2_t,
+ svwrite_hor_za16_s16_vg2 (1, w15 + 8, z0),
+ svwrite_hor_za16_s16_vg2 (1, w15 + 8, z0))
+
+/*
+** write_za16_u16_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za0h\.h\[\1, 0:1\], {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z4_0_w12m1, svuint16x2_t,
+ svwrite_hor_za16_u16_vg2 (0, w12 - 1, z4),
+ svwrite_hor_za16_u16_vg2 (0, w12 - 1, z4))
+
+/*
+** write_za16_u16_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova za1h\.h\[\1, 0:1\], {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z18_1_w16, svuint16x2_t,
+ svwrite_hor_za16_u16_vg2 (1, w16, z18),
+ svwrite_hor_za16_u16_vg2 (1, w16, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za16_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za16_vg4.c
new file mode 100644
index 0000000..34e1a22
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za16_vg4.c
@@ -0,0 +1,138 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za16_s16_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.h\[\1, 0:3\], {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z0_0_0, svint16x4_t,
+ svwrite_hor_za16_s16_vg4 (0, 0, z0),
+ svwrite_hor_za16_s16_vg4 (0, 0, z0))
+
+/*
+** write_za16_u16_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1h\.h\[\1, 0:3\], {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z4_1_1, svuint16x4_t,
+ svwrite_hor_za16_u16_vg4 (1, 1, z4),
+ svwrite_hor_za16_u16_vg4 (1, 1, z4))
+
+/*
+** write_za16_f16_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova za0h\.h\[\1, 0:3\], {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_f16_z28_0_w11, svfloat16x4_t,
+ svwrite_hor_za16_f16_vg4 (0, w11, z28),
+ svwrite_hor_za16_f16_vg4 (0, w11, z28))
+
+/*
+** write_za16_s16_z0_1_w12:
+** mova za1h\.h\[w12, 0:3\], {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z0_1_w12, svint16x4_t,
+ svwrite_hor_za16_s16_vg4 (1, w12, z0),
+ svwrite_hor_za16_s16_vg4 (1, w12, z0))
+
+/*
+** write_za16_u16_z18_0_w15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za0h\.h\[w15, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z18_0_w15, svuint16x4_t,
+ svwrite_hor_za16_u16_vg4 (0, w15, z18),
+ svwrite_hor_za16_u16_vg4 (0, w15, z18))
+
+/*
+** write_za16_bf16_z23_1_w12p4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za1h\.h\[w12, 4:7\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za16_bf16_z23_1_w12p4, svbfloat16x4_t,
+ svwrite_hor_za16_bf16_vg4 (1, w12 + 4, z23),
+ svwrite_hor_za16_bf16_vg4 (1, w12 + 4, z23))
+
+/*
+** write_za16_u16_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za0h\.h\[\1, 0:3\], {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z4_0_w12p1, svuint16x4_t,
+ svwrite_hor_za16_u16_vg4 (0, w12 + 1, z4),
+ svwrite_hor_za16_u16_vg4 (0, w12 + 1, z4))
+
+/*
+** write_za16_s16_z28_1_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova za1h\.h\[\1, 0:3\], {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z28_1_w12p2, svint16x4_t,
+ svwrite_hor_za16_s16_vg4 (1, w12 + 2, z28),
+ svwrite_hor_za16_s16_vg4 (1, w12 + 2, z28))
+
+/*
+** write_za16_f16_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0h\.h\[\1, 0:3\], {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_f16_z0_0_w15p3, svfloat16x4_t,
+ svwrite_hor_za16_f16_vg4 (0, w15 + 3, z0),
+ svwrite_hor_za16_f16_vg4 (0, w15 + 3, z0))
+
+/*
+** write_za16_u16_z28_1_w12p6:
+** add (w[0-9]+), w12, #?6
+** mova za1h\.h\[\1, 0:3\], {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z28_1_w12p6, svuint16x4_t,
+ svwrite_hor_za16_u16_vg4 (1, w12 + 6, z28),
+ svwrite_hor_za16_u16_vg4 (1, w12 + 6, z28))
+
+/*
+** write_za16_s16_z0_0_w15p8:
+** add (w[0-9]+), w15, #?8
+** mova za0h\.h\[\1, 0:3\], {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z0_0_w15p8, svint16x4_t,
+ svwrite_hor_za16_s16_vg4 (0, w15 + 8, z0),
+ svwrite_hor_za16_s16_vg4 (0, w15 + 8, z0))
+
+/*
+** write_za16_bf16_z4_1_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za1h\.h\[\1, 0:3\], {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_bf16_z4_1_w12m1, svbfloat16x4_t,
+ svwrite_hor_za16_bf16_vg4 (1, w12 - 1, z4),
+ svwrite_hor_za16_bf16_vg4 (1, w12 - 1, z4))
+
+/*
+** write_za16_u16_z28_0_w16:
+** mov (w1[2-5]), w16
+** mova za0h\.h\[\1, 0:3\], {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z28_0_w16, svuint16x4_t,
+ svwrite_hor_za16_u16_vg4 (0, w16, z28),
+ svwrite_hor_za16_u16_vg4 (0, w16, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za32_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za32_vg2.c
new file mode 100644
index 0000000..7e64839
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za32_vg2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za32_s32_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.s\[\1, 0:1\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z0_0_0, svint32x2_t,
+ svwrite_hor_za32_s32_vg2 (0, 0, z0),
+ svwrite_hor_za32_s32_vg2 (0, 0, z0))
+
+/*
+** write_za32_u32_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1h\.s\[\1, 0:1\], {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z4_1_1, svuint32x2_t,
+ svwrite_hor_za32_u32_vg2 (1, 1, z4),
+ svwrite_hor_za32_u32_vg2 (1, 1, z4))
+
+/*
+** write_za32_f32_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova za2h\.s\[\1, 0:1\], {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z28_2_w11, svfloat32x2_t,
+ svwrite_hor_za32_f32_vg2 (2, w11, z28),
+ svwrite_hor_za32_f32_vg2 (2, w11, z28))
+
+/*
+** write_za32_f32_z0_3_w12:
+** mova za3h\.s\[w12, 0:1\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z0_3_w12, svfloat32x2_t,
+ svwrite_hor_za32_f32_vg2 (3, w12, z0),
+ svwrite_hor_za32_f32_vg2 (3, w12, z0))
+
+/*
+** write_za32_u32_z18_0_w15:
+** mova za0h\.s\[w15, 0:1\], {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z18_0_w15, svuint32x2_t,
+ svwrite_hor_za32_u32_vg2 (0, w15, z18),
+ svwrite_hor_za32_u32_vg2 (0, w15, z18))
+
+/*
+** write_za32_s32_z23_1_w12p2:
+** mov [^\n]+
+** mov [^\n]+
+** mova za1h\.s\[w12, 2:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z23_1_w12p2, svint32x2_t,
+ svwrite_hor_za32_s32_vg2 (1, w12 + 2, z23),
+ svwrite_hor_za32_s32_vg2 (1, w12 + 2, z23))
+
+/*
+** write_za32_f32_z4_2_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za2h\.s\[\1, 0:1\], {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z4_2_w12p1, svfloat32x2_t,
+ svwrite_hor_za32_f32_vg2 (2, w12 + 1, z4),
+ svwrite_hor_za32_f32_vg2 (2, w12 + 1, z4))
+
+/*
+** write_za32_u32_z0_3_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za3h\.s\[\1, 0:1\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z0_3_w15p3, svuint32x2_t,
+ svwrite_hor_za32_u32_vg2 (3, w15 + 3, z0),
+ svwrite_hor_za32_u32_vg2 (3, w15 + 3, z0))
+
+/*
+** write_za32_s32_z0_1_w15p4:
+** add (w[0-9]+), w15, #?4
+** mova za1h\.s\[\1, 0:1\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z0_1_w15p4, svint32x2_t,
+ svwrite_hor_za32_s32_vg2 (1, w15 + 4, z0),
+ svwrite_hor_za32_s32_vg2 (1, w15 + 4, z0))
+
+/*
+** write_za32_u32_z4_3_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za3h\.s\[\1, 0:1\], {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z4_3_w12m1, svuint32x2_t,
+ svwrite_hor_za32_u32_vg2 (3, w12 - 1, z4),
+ svwrite_hor_za32_u32_vg2 (3, w12 - 1, z4))
+
+/*
+** write_za32_u32_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova za1h\.s\[\1, 0:1\], {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z18_1_w16, svuint32x2_t,
+ svwrite_hor_za32_u32_vg2 (1, w16, z18),
+ svwrite_hor_za32_u32_vg2 (1, w16, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za32_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za32_vg4.c
new file mode 100644
index 0000000..da00972
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za32_vg4.c
@@ -0,0 +1,129 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za32_s32_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.s\[\1, 0:3\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z0_0_0, svint32x4_t,
+ svwrite_hor_za32_s32_vg4 (0, 0, z0),
+ svwrite_hor_za32_s32_vg4 (0, 0, z0))
+
+/*
+** write_za32_u32_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1h\.s\[\1, 0:3\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z4_1_1, svuint32x4_t,
+ svwrite_hor_za32_u32_vg4 (1, 1, z4),
+ svwrite_hor_za32_u32_vg4 (1, 1, z4))
+
+/*
+** write_za32_f32_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova za2h\.s\[\1, 0:3\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z28_2_w11, svfloat32x4_t,
+ svwrite_hor_za32_f32_vg4 (2, w11, z28),
+ svwrite_hor_za32_f32_vg4 (2, w11, z28))
+
+/*
+** write_za32_s32_z0_3_w12:
+** mova za3h\.s\[w12, 0:3\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z0_3_w12, svint32x4_t,
+ svwrite_hor_za32_s32_vg4 (3, w12, z0),
+ svwrite_hor_za32_s32_vg4 (3, w12, z0))
+
+/*
+** write_za32_u32_z18_0_w15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za0h\.s\[w15, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z18_0_w15, svuint32x4_t,
+ svwrite_hor_za32_u32_vg4 (0, w15, z18),
+ svwrite_hor_za32_u32_vg4 (0, w15, z18))
+
+/*
+** write_za32_f32_z23_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za1h\.s\[\1, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z23_1_w12p4, svfloat32x4_t,
+ svwrite_hor_za32_f32_vg4 (1, w12 + 4, z23),
+ svwrite_hor_za32_f32_vg4 (1, w12 + 4, z23))
+
+/*
+** write_za32_u32_z4_2_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za2h\.s\[\1, 0:3\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z4_2_w12p1, svuint32x4_t,
+ svwrite_hor_za32_u32_vg4 (2, w12 + 1, z4),
+ svwrite_hor_za32_u32_vg4 (2, w12 + 1, z4))
+
+/*
+** write_za32_s32_z28_3_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova za3h\.s\[\1, 0:3\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z28_3_w12p2, svint32x4_t,
+ svwrite_hor_za32_s32_vg4 (3, w12 + 2, z28),
+ svwrite_hor_za32_s32_vg4 (3, w12 + 2, z28))
+
+/*
+** write_za32_f32_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0h\.s\[\1, 0:3\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z0_0_w15p3, svfloat32x4_t,
+ svwrite_hor_za32_f32_vg4 (0, w15 + 3, z0),
+ svwrite_hor_za32_f32_vg4 (0, w15 + 3, z0))
+
+/*
+** write_za32_u32_z28_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova za1h\.s\[\1, 0:3\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z28_1_w12p4, svuint32x4_t,
+ svwrite_hor_za32_u32_vg4 (1, w12 + 4, z28),
+ svwrite_hor_za32_u32_vg4 (1, w12 + 4, z28))
+
+/*
+** write_za32_f32_z4_2_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za2h\.s\[\1, 0:3\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z4_2_w12m1, svfloat32x4_t,
+ svwrite_hor_za32_f32_vg4 (2, w12 - 1, z4),
+ svwrite_hor_za32_f32_vg4 (2, w12 - 1, z4))
+
+/*
+** write_za32_u32_z28_3_w16:
+** mov (w1[2-5]), w16
+** mova za3h\.s\[\1, 0:3\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z28_3_w16, svuint32x4_t,
+ svwrite_hor_za32_u32_vg4 (3, w16, z28),
+ svwrite_hor_za32_u32_vg4 (3, w16, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za64_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za64_vg2.c
new file mode 100644
index 0000000..57b5194
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za64_vg2.c
@@ -0,0 +1,113 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za64_s64_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.d\[\1, 0:1\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z0_0_0, svint64x2_t,
+ svwrite_hor_za64_s64_vg2 (0, 0, z0),
+ svwrite_hor_za64_s64_vg2 (0, 0, z0))
+
+/*
+** write_za64_u64_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1h\.d\[\1, 0:1\], {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z4_1_1, svuint64x2_t,
+ svwrite_hor_za64_u64_vg2 (1, 1, z4),
+ svwrite_hor_za64_u64_vg2 (1, 1, z4))
+
+/*
+** write_za64_f64_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova za2h\.d\[\1, 0:1\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z28_2_w11, svfloat64x2_t,
+ svwrite_hor_za64_f64_vg2 (2, w11, z28),
+ svwrite_hor_za64_f64_vg2 (2, w11, z28))
+
+/*
+** write_za64_f64_z0_3_w12:
+** mova za3h\.d\[w12, 0:1\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z0_3_w12, svfloat64x2_t,
+ svwrite_hor_za64_f64_vg2 (3, w12, z0),
+ svwrite_hor_za64_f64_vg2 (3, w12, z0))
+
+/*
+** write_za64_u64_z18_4_w15:
+** mova za4h\.d\[w15, 0:1\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z18_4_w15, svuint64x2_t,
+ svwrite_hor_za64_u64_vg2 (4, w15, z18),
+ svwrite_hor_za64_u64_vg2 (4, w15, z18))
+
+/*
+** write_za64_s64_z23_5_w12p2:
+** add (w[0-9]+), w12, #?2
+** mov [^\n]+
+** mov [^\n]+
+** mova za5h\.d\[\1, 0:1\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z23_5_w12p2, svint64x2_t,
+ svwrite_hor_za64_s64_vg2 (5, w12 + 2, z23),
+ svwrite_hor_za64_s64_vg2 (5, w12 + 2, z23))
+
+/*
+** write_za64_f64_z4_6_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za6h\.d\[\1, 0:1\], {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z4_6_w12p1, svfloat64x2_t,
+ svwrite_hor_za64_f64_vg2 (6, w12 + 1, z4),
+ svwrite_hor_za64_f64_vg2 (6, w12 + 1, z4))
+
+/*
+** write_za64_u64_z0_7_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za7h\.d\[\1, 0:1\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z0_7_w15p3, svuint64x2_t,
+ svwrite_hor_za64_u64_vg2 (7, w15 + 3, z0),
+ svwrite_hor_za64_u64_vg2 (7, w15 + 3, z0))
+
+/*
+** write_za64_s64_z0_1_w15p4:
+** add (w[0-9]+), w15, #?4
+** mova za1h\.d\[\1, 0:1\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z0_1_w15p4, svint64x2_t,
+ svwrite_hor_za64_s64_vg2 (1, w15 + 4, z0),
+ svwrite_hor_za64_s64_vg2 (1, w15 + 4, z0))
+
+/*
+** write_za64_u64_z4_3_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za3h\.d\[\1, 0:1\], {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z4_3_w12m1, svuint64x2_t,
+ svwrite_hor_za64_u64_vg2 (3, w12 - 1, z4),
+ svwrite_hor_za64_u64_vg2 (3, w12 - 1, z4))
+
+/*
+** write_za64_u64_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova za1h\.d\[\1, 0:1\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z18_1_w16, svuint64x2_t,
+ svwrite_hor_za64_u64_vg2 (1, w16, z18),
+ svwrite_hor_za64_u64_vg2 (1, w16, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za64_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za64_vg4.c
new file mode 100644
index 0000000..fbe9588
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za64_vg4.c
@@ -0,0 +1,129 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za64_s64_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.d\[\1, 0:3\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z0_0_0, svint64x4_t,
+ svwrite_hor_za64_s64_vg4 (0, 0, z0),
+ svwrite_hor_za64_s64_vg4 (0, 0, z0))
+
+/*
+** write_za64_u64_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1h\.d\[\1, 0:3\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z4_1_1, svuint64x4_t,
+ svwrite_hor_za64_u64_vg4 (1, 1, z4),
+ svwrite_hor_za64_u64_vg4 (1, 1, z4))
+
+/*
+** write_za64_f64_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova za2h\.d\[\1, 0:3\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z28_2_w11, svfloat64x4_t,
+ svwrite_hor_za64_f64_vg4 (2, w11, z28),
+ svwrite_hor_za64_f64_vg4 (2, w11, z28))
+
+/*
+** write_za64_s64_z0_3_w12:
+** mova za3h\.d\[w12, 0:3\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z0_3_w12, svint64x4_t,
+ svwrite_hor_za64_s64_vg4 (3, w12, z0),
+ svwrite_hor_za64_s64_vg4 (3, w12, z0))
+
+/*
+** write_za64_u64_z18_4_w15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za4h\.d\[w15, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z18_4_w15, svuint64x4_t,
+ svwrite_hor_za64_u64_vg4 (4, w15, z18),
+ svwrite_hor_za64_u64_vg4 (4, w15, z18))
+
+/*
+** write_za64_f64_z23_5_w12p4:
+** add (w[0-9]+), w12, #?4
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za5h\.d\[\1, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z23_5_w12p4, svfloat64x4_t,
+ svwrite_hor_za64_f64_vg4 (5, w12 + 4, z23),
+ svwrite_hor_za64_f64_vg4 (5, w12 + 4, z23))
+
+/*
+** write_za64_u64_z4_6_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za6h\.d\[\1, 0:3\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z4_6_w12p1, svuint64x4_t,
+ svwrite_hor_za64_u64_vg4 (6, w12 + 1, z4),
+ svwrite_hor_za64_u64_vg4 (6, w12 + 1, z4))
+
+/*
+** write_za64_s64_z28_7_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova za7h\.d\[\1, 0:3\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z28_7_w12p2, svint64x4_t,
+ svwrite_hor_za64_s64_vg4 (7, w12 + 2, z28),
+ svwrite_hor_za64_s64_vg4 (7, w12 + 2, z28))
+
+/*
+** write_za64_f64_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0h\.d\[\1, 0:3\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z0_0_w15p3, svfloat64x4_t,
+ svwrite_hor_za64_f64_vg4 (0, w15 + 3, z0),
+ svwrite_hor_za64_f64_vg4 (0, w15 + 3, z0))
+
+/*
+** write_za64_u64_z28_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova za1h\.d\[\1, 0:3\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z28_1_w12p4, svuint64x4_t,
+ svwrite_hor_za64_u64_vg4 (1, w12 + 4, z28),
+ svwrite_hor_za64_u64_vg4 (1, w12 + 4, z28))
+
+/*
+** write_za64_f64_z4_2_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za2h\.d\[\1, 0:3\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z4_2_w12m1, svfloat64x4_t,
+ svwrite_hor_za64_f64_vg4 (2, w12 - 1, z4),
+ svwrite_hor_za64_f64_vg4 (2, w12 - 1, z4))
+
+/*
+** write_za64_u64_z28_3_w16:
+** mov (w1[2-5]), w16
+** mova za3h\.d\[\1, 0:3\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z28_3_w16, svuint64x4_t,
+ svwrite_hor_za64_u64_vg4 (3, w16, z28),
+ svwrite_hor_za64_u64_vg4 (3, w16, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za8_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za8_vg2.c
new file mode 100644
index 0000000..a2af846
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za8_vg2.c
@@ -0,0 +1,140 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za8_s8_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.b\[\1, 0:1\], {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_0, svint8x2_t,
+ svwrite_hor_za8_s8_vg2 (0, 0, z0),
+ svwrite_hor_za8_s8_vg2 (0, 0, z0))
+
+/*
+** write_za8_u8_z4_0_1:
+** mov (w1[2-5]), #?1
+** mova za0h\.b\[\1, 0:1\], {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_1, svuint8x2_t,
+ svwrite_hor_za8_u8_vg2 (0, 1, z4),
+ svwrite_hor_za8_u8_vg2 (0, 1, z4))
+
+/*
+** write_za8_s8_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova za0h\.b\[\1, 0:1\], {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z28_0_w11, svint8x2_t,
+ svwrite_hor_za8_s8_vg2 (0, w11, z28),
+ svwrite_hor_za8_s8_vg2 (0, w11, z28))
+
+/*
+** write_za8_s8_z0_0_w12:
+** mova za0h\.b\[w12, 0:1\], {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_w12, svint8x2_t,
+ svwrite_hor_za8_s8_vg2 (0, w12, z0),
+ svwrite_hor_za8_s8_vg2 (0, w12, z0))
+
+/*
+** write_za8_u8_z18_0_w15:
+** mova za0h\.b\[w15, 0:1\], {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z18_0_w15, svuint8x2_t,
+ svwrite_hor_za8_u8_vg2 (0, w15, z18),
+ svwrite_hor_za8_u8_vg2 (0, w15, z18))
+
+/*
+** write_za8_s8_z23_0_w12p14:
+** mov [^\n]+
+** mov [^\n]+
+** mova za0h\.b\[w12, 14:15\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z23_0_w12p14, svint8x2_t,
+ svwrite_hor_za8_s8_vg2 (0, w12 + 14, z23),
+ svwrite_hor_za8_s8_vg2 (0, w12 + 14, z23))
+
+/*
+** write_za8_u8_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za0h\.b\[\1, 0:1\], {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w12p1, svuint8x2_t,
+ svwrite_hor_za8_u8_vg2 (0, w12 + 1, z4),
+ svwrite_hor_za8_u8_vg2 (0, w12 + 1, z4))
+
+/*
+** write_za8_s8_z28_0_w12p2:
+** mova za0h\.b\[w12, 2:3\], {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z28_0_w12p2, svint8x2_t,
+ svwrite_hor_za8_s8_vg2 (0, w12 + 2, z28),
+ svwrite_hor_za8_s8_vg2 (0, w12 + 2, z28))
+
+/*
+** write_za8_u8_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0h\.b\[\1, 0:1\], {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z0_0_w15p3, svuint8x2_t,
+ svwrite_hor_za8_u8_vg2 (0, w15 + 3, z0),
+ svwrite_hor_za8_u8_vg2 (0, w15 + 3, z0))
+
+/*
+** write_za8_u8_z4_0_w15p12:
+** mova za0h\.b\[w15, 12:13\], {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w15p12, svuint8x2_t,
+ svwrite_hor_za8_u8_vg2 (0, w15 + 12, z4),
+ svwrite_hor_za8_u8_vg2 (0, w15 + 12, z4))
+
+/*
+** write_za8_u8_z28_0_w12p15:
+** add (w[0-9]+), w12, #?15
+** mova za0h\.b\[\1, 0:1\], {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z28_0_w12p15, svuint8x2_t,
+ svwrite_hor_za8_u8_vg2 (0, w12 + 15, z28),
+ svwrite_hor_za8_u8_vg2 (0, w12 + 15, z28))
+
+/*
+** write_za8_s8_z0_0_w15p16:
+** add (w[0-9]+), w15, #?16
+** mova za0h\.b\[\1, 0:1\], {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_w15p16, svint8x2_t,
+ svwrite_hor_za8_s8_vg2 (0, w15 + 16, z0),
+ svwrite_hor_za8_s8_vg2 (0, w15 + 16, z0))
+
+/*
+** write_za8_u8_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za0h\.b\[\1, 0:1\], {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w12m1, svuint8x2_t,
+ svwrite_hor_za8_u8_vg2 (0, w12 - 1, z4),
+ svwrite_hor_za8_u8_vg2 (0, w12 - 1, z4))
+
+/*
+** write_za8_u8_z18_0_w16:
+** mov (w1[2-5]), w16
+** mova za0h\.b\[\1, 0:1\], {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z18_0_w16, svuint8x2_t,
+ svwrite_hor_za8_u8_vg2 (0, w16, z18),
+ svwrite_hor_za8_u8_vg2 (0, w16, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za8_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za8_vg4.c
new file mode 100644
index 0000000..e333ce6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_hor_za8_vg4.c
@@ -0,0 +1,156 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za8_s8_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0h\.b\[\1, 0:3\], {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_0, svint8x4_t,
+ svwrite_hor_za8_s8_vg4 (0, 0, z0),
+ svwrite_hor_za8_s8_vg4 (0, 0, z0))
+
+/*
+** write_za8_u8_z4_0_1:
+** mov (w1[2-5]), #?1
+** mova za0h\.b\[\1, 0:3\], {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_1, svuint8x4_t,
+ svwrite_hor_za8_u8_vg4 (0, 1, z4),
+ svwrite_hor_za8_u8_vg4 (0, 1, z4))
+
+/*
+** write_za8_s8_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova za0h\.b\[\1, 0:3\], {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z28_0_w11, svint8x4_t,
+ svwrite_hor_za8_s8_vg4 (0, w11, z28),
+ svwrite_hor_za8_s8_vg4 (0, w11, z28))
+
+/*
+** write_za8_s8_z0_0_w12:
+** mova za0h\.b\[w12, 0:3\], {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_w12, svint8x4_t,
+ svwrite_hor_za8_s8_vg4 (0, w12, z0),
+ svwrite_hor_za8_s8_vg4 (0, w12, z0))
+
+/*
+** write_za8_u8_z18_0_w15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za0h\.b\[w15, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z18_0_w15, svuint8x4_t,
+ svwrite_hor_za8_u8_vg4 (0, w15, z18),
+ svwrite_hor_za8_u8_vg4 (0, w15, z18))
+
+/*
+** write_za8_s8_z23_0_w12p12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za0h\.b\[w12, 12:15\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z23_0_w12p12, svint8x4_t,
+ svwrite_hor_za8_s8_vg4 (0, w12 + 12, z23),
+ svwrite_hor_za8_s8_vg4 (0, w12 + 12, z23))
+
+/*
+** write_za8_u8_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za0h\.b\[\1, 0:3\], {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w12p1, svuint8x4_t,
+ svwrite_hor_za8_u8_vg4 (0, w12 + 1, z4),
+ svwrite_hor_za8_u8_vg4 (0, w12 + 1, z4))
+
+/*
+** write_za8_s8_z28_0_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova za0h\.b\[\1, 0:3\], {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z28_0_w12p2, svint8x4_t,
+ svwrite_hor_za8_s8_vg4 (0, w12 + 2, z28),
+ svwrite_hor_za8_s8_vg4 (0, w12 + 2, z28))
+
+/*
+** write_za8_u8_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0h\.b\[\1, 0:3\], {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z0_0_w15p3, svuint8x4_t,
+ svwrite_hor_za8_u8_vg4 (0, w15 + 3, z0),
+ svwrite_hor_za8_u8_vg4 (0, w15 + 3, z0))
+
+/*
+** write_za8_u8_z0_0_w12p4:
+** mova za0h\.b\[w12, 4:7\], {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z0_0_w12p4, svuint8x4_t,
+ svwrite_hor_za8_u8_vg4 (0, w12 + 4, z0),
+ svwrite_hor_za8_u8_vg4 (0, w12 + 4, z0))
+
+/*
+** write_za8_u8_z4_0_w15p12:
+** mova za0h\.b\[w15, 12:15\], {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w15p12, svuint8x4_t,
+ svwrite_hor_za8_u8_vg4 (0, w15 + 12, z4),
+ svwrite_hor_za8_u8_vg4 (0, w15 + 12, z4))
+
+/*
+** write_za8_u8_z28_0_w12p14:
+** add (w[0-9]+), w12, #?14
+** mova za0h\.b\[\1, 0:3\], {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z28_0_w12p14, svuint8x4_t,
+ svwrite_hor_za8_u8_vg4 (0, w12 + 14, z28),
+ svwrite_hor_za8_u8_vg4 (0, w12 + 14, z28))
+
+/*
+** write_za8_s8_z0_0_w15p16:
+** add (w[0-9]+), w15, #?16
+** mova za0h\.b\[\1, 0:3\], {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_w15p16, svint8x4_t,
+ svwrite_hor_za8_s8_vg4 (0, w15 + 16, z0),
+ svwrite_hor_za8_s8_vg4 (0, w15 + 16, z0))
+
+/*
+** write_za8_u8_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za0h\.b\[\1, 0:3\], {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w12m1, svuint8x4_t,
+ svwrite_hor_za8_u8_vg4 (0, w12 - 1, z4),
+ svwrite_hor_za8_u8_vg4 (0, w12 - 1, z4))
+
+/*
+** write_za8_u8_z28_0_w16:
+** mov (w1[2-5]), w16
+** mova za0h\.b\[\1, 0:3\], {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z28_0_w16, svuint8x4_t,
+ svwrite_hor_za8_u8_vg4 (0, w16, z28),
+ svwrite_hor_za8_u8_vg4 (0, w16, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za16_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za16_vg2.c
new file mode 100644
index 0000000..0b8dc18
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za16_vg2.c
@@ -0,0 +1,140 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za16_s16_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.h\[\1, 0:1\], {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z0_0_0, svint16x2_t,
+ svwrite_ver_za16_s16_vg2 (0, 0, z0),
+ svwrite_ver_za16_s16_vg2 (0, 0, z0))
+
+/*
+** write_za16_u16_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1v\.h\[\1, 0:1\], {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z4_1_1, svuint16x2_t,
+ svwrite_ver_za16_u16_vg2 (1, 1, z4),
+ svwrite_ver_za16_u16_vg2 (1, 1, z4))
+
+/*
+** write_za16_f16_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova za0v\.h\[\1, 0:1\], {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_f16_z28_0_w11, svfloat16x2_t,
+ svwrite_ver_za16_f16_vg2 (0, w11, z28),
+ svwrite_ver_za16_f16_vg2 (0, w11, z28))
+
+/*
+** write_za16_bf16_z0_1_w12:
+** mova za1v\.h\[w12, 0:1\], {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_bf16_z0_1_w12, svbfloat16x2_t,
+ svwrite_ver_za16_bf16_vg2 (1, w12, z0),
+ svwrite_ver_za16_bf16_vg2 (1, w12, z0))
+
+/*
+** write_za16_u16_z18_0_w15:
+** mova za0v\.h\[w15, 0:1\], {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z18_0_w15, svuint16x2_t,
+ svwrite_ver_za16_u16_vg2 (0, w15, z18),
+ svwrite_ver_za16_u16_vg2 (0, w15, z18))
+
+/*
+** write_za16_s16_z23_1_w12p6:
+** mov [^\n]+
+** mov [^\n]+
+** mova za1v\.h\[w12, 6:7\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z23_1_w12p6, svint16x2_t,
+ svwrite_ver_za16_s16_vg2 (1, w12 + 6, z23),
+ svwrite_ver_za16_s16_vg2 (1, w12 + 6, z23))
+
+/*
+** write_za16_f16_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za0v\.h\[\1, 0:1\], {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_f16_z4_0_w12p1, svfloat16x2_t,
+ svwrite_ver_za16_f16_vg2 (0, w12 + 1, z4),
+ svwrite_ver_za16_f16_vg2 (0, w12 + 1, z4))
+
+/*
+** write_za16_s16_z28_1_w12p2:
+** mova za1v\.h\[w12, 2:3\], {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z28_1_w12p2, svint16x2_t,
+ svwrite_ver_za16_s16_vg2 (1, w12 + 2, z28),
+ svwrite_ver_za16_s16_vg2 (1, w12 + 2, z28))
+
+/*
+** write_za16_u16_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0v\.h\[\1, 0:1\], {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z0_0_w15p3, svuint16x2_t,
+ svwrite_ver_za16_u16_vg2 (0, w15 + 3, z0),
+ svwrite_ver_za16_u16_vg2 (0, w15 + 3, z0))
+
+/*
+** write_za16_bf16_z4_1_w15p4:
+** mova za1v\.h\[w15, 4:5\], {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_bf16_z4_1_w15p4, svbfloat16x2_t,
+ svwrite_ver_za16_bf16_vg2 (1, w15 + 4, z4),
+ svwrite_ver_za16_bf16_vg2 (1, w15 + 4, z4))
+
+/*
+** write_za16_u16_z28_0_w12p7:
+** add (w[0-9]+), w12, #?7
+** mova za0v\.h\[\1, 0:1\], {z28\.h - z29\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z28_0_w12p7, svuint16x2_t,
+ svwrite_ver_za16_u16_vg2 (0, w12 + 7, z28),
+ svwrite_ver_za16_u16_vg2 (0, w12 + 7, z28))
+
+/*
+** write_za16_s16_z0_1_w15p8:
+** add (w[0-9]+), w15, #?8
+** mova za1v\.h\[\1, 0:1\], {z0\.h - z1\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z0_1_w15p8, svint16x2_t,
+ svwrite_ver_za16_s16_vg2 (1, w15 + 8, z0),
+ svwrite_ver_za16_s16_vg2 (1, w15 + 8, z0))
+
+/*
+** write_za16_u16_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za0v\.h\[\1, 0:1\], {z4\.h - z5\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z4_0_w12m1, svuint16x2_t,
+ svwrite_ver_za16_u16_vg2 (0, w12 - 1, z4),
+ svwrite_ver_za16_u16_vg2 (0, w12 - 1, z4))
+
+/*
+** write_za16_u16_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova za1v\.h\[\1, 0:1\], {z18\.h - z19\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z18_1_w16, svuint16x2_t,
+ svwrite_ver_za16_u16_vg2 (1, w16, z18),
+ svwrite_ver_za16_u16_vg2 (1, w16, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za16_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za16_vg4.c
new file mode 100644
index 0000000..4326815
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za16_vg4.c
@@ -0,0 +1,138 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za16_s16_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.h\[\1, 0:3\], {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z0_0_0, svint16x4_t,
+ svwrite_ver_za16_s16_vg4 (0, 0, z0),
+ svwrite_ver_za16_s16_vg4 (0, 0, z0))
+
+/*
+** write_za16_u16_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1v\.h\[\1, 0:3\], {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z4_1_1, svuint16x4_t,
+ svwrite_ver_za16_u16_vg4 (1, 1, z4),
+ svwrite_ver_za16_u16_vg4 (1, 1, z4))
+
+/*
+** write_za16_f16_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova za0v\.h\[\1, 0:3\], {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_f16_z28_0_w11, svfloat16x4_t,
+ svwrite_ver_za16_f16_vg4 (0, w11, z28),
+ svwrite_ver_za16_f16_vg4 (0, w11, z28))
+
+/*
+** write_za16_s16_z0_1_w12:
+** mova za1v\.h\[w12, 0:3\], {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z0_1_w12, svint16x4_t,
+ svwrite_ver_za16_s16_vg4 (1, w12, z0),
+ svwrite_ver_za16_s16_vg4 (1, w12, z0))
+
+/*
+** write_za16_u16_z18_0_w15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za0v\.h\[w15, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z18_0_w15, svuint16x4_t,
+ svwrite_ver_za16_u16_vg4 (0, w15, z18),
+ svwrite_ver_za16_u16_vg4 (0, w15, z18))
+
+/*
+** write_za16_bf16_z23_1_w12p4:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za1v\.h\[w12, 4:7\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za16_bf16_z23_1_w12p4, svbfloat16x4_t,
+ svwrite_ver_za16_bf16_vg4 (1, w12 + 4, z23),
+ svwrite_ver_za16_bf16_vg4 (1, w12 + 4, z23))
+
+/*
+** write_za16_u16_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za0v\.h\[\1, 0:3\], {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z4_0_w12p1, svuint16x4_t,
+ svwrite_ver_za16_u16_vg4 (0, w12 + 1, z4),
+ svwrite_ver_za16_u16_vg4 (0, w12 + 1, z4))
+
+/*
+** write_za16_s16_z28_1_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova za1v\.h\[\1, 0:3\], {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z28_1_w12p2, svint16x4_t,
+ svwrite_ver_za16_s16_vg4 (1, w12 + 2, z28),
+ svwrite_ver_za16_s16_vg4 (1, w12 + 2, z28))
+
+/*
+** write_za16_f16_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0v\.h\[\1, 0:3\], {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_f16_z0_0_w15p3, svfloat16x4_t,
+ svwrite_ver_za16_f16_vg4 (0, w15 + 3, z0),
+ svwrite_ver_za16_f16_vg4 (0, w15 + 3, z0))
+
+/*
+** write_za16_u16_z28_1_w12p6:
+** add (w[0-9]+), w12, #?6
+** mova za1v\.h\[\1, 0:3\], {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z28_1_w12p6, svuint16x4_t,
+ svwrite_ver_za16_u16_vg4 (1, w12 + 6, z28),
+ svwrite_ver_za16_u16_vg4 (1, w12 + 6, z28))
+
+/*
+** write_za16_s16_z0_0_w15p8:
+** add (w[0-9]+), w15, #?8
+** mova za0v\.h\[\1, 0:3\], {z0\.h - z3\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_s16_z0_0_w15p8, svint16x4_t,
+ svwrite_ver_za16_s16_vg4 (0, w15 + 8, z0),
+ svwrite_ver_za16_s16_vg4 (0, w15 + 8, z0))
+
+/*
+** write_za16_bf16_z4_1_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za1v\.h\[\1, 0:3\], {z4\.h - z7\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_bf16_z4_1_w12m1, svbfloat16x4_t,
+ svwrite_ver_za16_bf16_vg4 (1, w12 - 1, z4),
+ svwrite_ver_za16_bf16_vg4 (1, w12 - 1, z4))
+
+/*
+** write_za16_u16_z28_0_w16:
+** mov (w1[2-5]), w16
+** mova za0v\.h\[\1, 0:3\], {z28\.h - z31\.h}
+** ret
+*/
+TEST_ZA_XN (write_za16_u16_z28_0_w16, svuint16x4_t,
+ svwrite_ver_za16_u16_vg4 (0, w16, z28),
+ svwrite_ver_za16_u16_vg4 (0, w16, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za32_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za32_vg2.c
new file mode 100644
index 0000000..307a2d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za32_vg2.c
@@ -0,0 +1,112 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za32_s32_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.s\[\1, 0:1\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z0_0_0, svint32x2_t,
+ svwrite_ver_za32_s32_vg2 (0, 0, z0),
+ svwrite_ver_za32_s32_vg2 (0, 0, z0))
+
+/*
+** write_za32_u32_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1v\.s\[\1, 0:1\], {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z4_1_1, svuint32x2_t,
+ svwrite_ver_za32_u32_vg2 (1, 1, z4),
+ svwrite_ver_za32_u32_vg2 (1, 1, z4))
+
+/*
+** write_za32_f32_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova za2v\.s\[\1, 0:1\], {z28\.s - z29\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z28_2_w11, svfloat32x2_t,
+ svwrite_ver_za32_f32_vg2 (2, w11, z28),
+ svwrite_ver_za32_f32_vg2 (2, w11, z28))
+
+/*
+** write_za32_f32_z0_3_w12:
+** mova za3v\.s\[w12, 0:1\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z0_3_w12, svfloat32x2_t,
+ svwrite_ver_za32_f32_vg2 (3, w12, z0),
+ svwrite_ver_za32_f32_vg2 (3, w12, z0))
+
+/*
+** write_za32_u32_z18_0_w15:
+** mova za0v\.s\[w15, 0:1\], {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z18_0_w15, svuint32x2_t,
+ svwrite_ver_za32_u32_vg2 (0, w15, z18),
+ svwrite_ver_za32_u32_vg2 (0, w15, z18))
+
+/*
+** write_za32_s32_z23_1_w12p2:
+** mov [^\n]+
+** mov [^\n]+
+** mova za1v\.s\[w12, 2:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z23_1_w12p2, svint32x2_t,
+ svwrite_ver_za32_s32_vg2 (1, w12 + 2, z23),
+ svwrite_ver_za32_s32_vg2 (1, w12 + 2, z23))
+
+/*
+** write_za32_f32_z4_2_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za2v\.s\[\1, 0:1\], {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z4_2_w12p1, svfloat32x2_t,
+ svwrite_ver_za32_f32_vg2 (2, w12 + 1, z4),
+ svwrite_ver_za32_f32_vg2 (2, w12 + 1, z4))
+
+/*
+** write_za32_u32_z0_3_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za3v\.s\[\1, 0:1\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z0_3_w15p3, svuint32x2_t,
+ svwrite_ver_za32_u32_vg2 (3, w15 + 3, z0),
+ svwrite_ver_za32_u32_vg2 (3, w15 + 3, z0))
+
+/*
+** write_za32_s32_z0_1_w15p4:
+** add (w[0-9]+), w15, #?4
+** mova za1v\.s\[\1, 0:1\], {z0\.s - z1\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z0_1_w15p4, svint32x2_t,
+ svwrite_ver_za32_s32_vg2 (1, w15 + 4, z0),
+ svwrite_ver_za32_s32_vg2 (1, w15 + 4, z0))
+
+/*
+** write_za32_u32_z4_3_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za3v\.s\[\1, 0:1\], {z4\.s - z5\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z4_3_w12m1, svuint32x2_t,
+ svwrite_ver_za32_u32_vg2 (3, w12 - 1, z4),
+ svwrite_ver_za32_u32_vg2 (3, w12 - 1, z4))
+
+/*
+** write_za32_u32_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova za1v\.s\[\1, 0:1\], {z18\.s - z19\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z18_1_w16, svuint32x2_t,
+ svwrite_ver_za32_u32_vg2 (1, w16, z18),
+ svwrite_ver_za32_u32_vg2 (1, w16, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za32_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za32_vg4.c
new file mode 100644
index 0000000..0334094
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za32_vg4.c
@@ -0,0 +1,129 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za32_s32_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.s\[\1, 0:3\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z0_0_0, svint32x4_t,
+ svwrite_ver_za32_s32_vg4 (0, 0, z0),
+ svwrite_ver_za32_s32_vg4 (0, 0, z0))
+
+/*
+** write_za32_u32_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1v\.s\[\1, 0:3\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z4_1_1, svuint32x4_t,
+ svwrite_ver_za32_u32_vg4 (1, 1, z4),
+ svwrite_ver_za32_u32_vg4 (1, 1, z4))
+
+/*
+** write_za32_f32_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova za2v\.s\[\1, 0:3\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z28_2_w11, svfloat32x4_t,
+ svwrite_ver_za32_f32_vg4 (2, w11, z28),
+ svwrite_ver_za32_f32_vg4 (2, w11, z28))
+
+/*
+** write_za32_s32_z0_3_w12:
+** mova za3v\.s\[w12, 0:3\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z0_3_w12, svint32x4_t,
+ svwrite_ver_za32_s32_vg4 (3, w12, z0),
+ svwrite_ver_za32_s32_vg4 (3, w12, z0))
+
+/*
+** write_za32_u32_z18_0_w15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za0v\.s\[w15, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z18_0_w15, svuint32x4_t,
+ svwrite_ver_za32_u32_vg4 (0, w15, z18),
+ svwrite_ver_za32_u32_vg4 (0, w15, z18))
+
+/*
+** write_za32_f32_z23_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za1v\.s\[\1, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z23_1_w12p4, svfloat32x4_t,
+ svwrite_ver_za32_f32_vg4 (1, w12 + 4, z23),
+ svwrite_ver_za32_f32_vg4 (1, w12 + 4, z23))
+
+/*
+** write_za32_u32_z4_2_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za2v\.s\[\1, 0:3\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z4_2_w12p1, svuint32x4_t,
+ svwrite_ver_za32_u32_vg4 (2, w12 + 1, z4),
+ svwrite_ver_za32_u32_vg4 (2, w12 + 1, z4))
+
+/*
+** write_za32_s32_z28_3_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova za3v\.s\[\1, 0:3\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_s32_z28_3_w12p2, svint32x4_t,
+ svwrite_ver_za32_s32_vg4 (3, w12 + 2, z28),
+ svwrite_ver_za32_s32_vg4 (3, w12 + 2, z28))
+
+/*
+** write_za32_f32_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0v\.s\[\1, 0:3\], {z0\.s - z3\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z0_0_w15p3, svfloat32x4_t,
+ svwrite_ver_za32_f32_vg4 (0, w15 + 3, z0),
+ svwrite_ver_za32_f32_vg4 (0, w15 + 3, z0))
+
+/*
+** write_za32_u32_z28_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova za1v\.s\[\1, 0:3\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z28_1_w12p4, svuint32x4_t,
+ svwrite_ver_za32_u32_vg4 (1, w12 + 4, z28),
+ svwrite_ver_za32_u32_vg4 (1, w12 + 4, z28))
+
+/*
+** write_za32_f32_z4_2_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za2v\.s\[\1, 0:3\], {z4\.s - z7\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_f32_z4_2_w12m1, svfloat32x4_t,
+ svwrite_ver_za32_f32_vg4 (2, w12 - 1, z4),
+ svwrite_ver_za32_f32_vg4 (2, w12 - 1, z4))
+
+/*
+** write_za32_u32_z28_3_w16:
+** mov (w1[2-5]), w16
+** mova za3v\.s\[\1, 0:3\], {z28\.s - z31\.s}
+** ret
+*/
+TEST_ZA_XN (write_za32_u32_z28_3_w16, svuint32x4_t,
+ svwrite_ver_za32_u32_vg4 (3, w16, z28),
+ svwrite_ver_za32_u32_vg4 (3, w16, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za64_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za64_vg2.c
new file mode 100644
index 0000000..9b13ea5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za64_vg2.c
@@ -0,0 +1,113 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za64_s64_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.d\[\1, 0:1\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z0_0_0, svint64x2_t,
+ svwrite_ver_za64_s64_vg2 (0, 0, z0),
+ svwrite_ver_za64_s64_vg2 (0, 0, z0))
+
+/*
+** write_za64_u64_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1v\.d\[\1, 0:1\], {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z4_1_1, svuint64x2_t,
+ svwrite_ver_za64_u64_vg2 (1, 1, z4),
+ svwrite_ver_za64_u64_vg2 (1, 1, z4))
+
+/*
+** write_za64_f64_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova za2v\.d\[\1, 0:1\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z28_2_w11, svfloat64x2_t,
+ svwrite_ver_za64_f64_vg2 (2, w11, z28),
+ svwrite_ver_za64_f64_vg2 (2, w11, z28))
+
+/*
+** write_za64_f64_z0_3_w12:
+** mova za3v\.d\[w12, 0:1\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z0_3_w12, svfloat64x2_t,
+ svwrite_ver_za64_f64_vg2 (3, w12, z0),
+ svwrite_ver_za64_f64_vg2 (3, w12, z0))
+
+/*
+** write_za64_u64_z18_4_w15:
+** mova za4v\.d\[w15, 0:1\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z18_4_w15, svuint64x2_t,
+ svwrite_ver_za64_u64_vg2 (4, w15, z18),
+ svwrite_ver_za64_u64_vg2 (4, w15, z18))
+
+/*
+** write_za64_s64_z23_5_w12p2:
+** add (w[0-9]+), w12, #?2
+** mov [^\n]+
+** mov [^\n]+
+** mova za5v\.d\[\1, 0:1\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z23_5_w12p2, svint64x2_t,
+ svwrite_ver_za64_s64_vg2 (5, w12 + 2, z23),
+ svwrite_ver_za64_s64_vg2 (5, w12 + 2, z23))
+
+/*
+** write_za64_f64_z4_6_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za6v\.d\[\1, 0:1\], {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z4_6_w12p1, svfloat64x2_t,
+ svwrite_ver_za64_f64_vg2 (6, w12 + 1, z4),
+ svwrite_ver_za64_f64_vg2 (6, w12 + 1, z4))
+
+/*
+** write_za64_u64_z0_7_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za7v\.d\[\1, 0:1\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z0_7_w15p3, svuint64x2_t,
+ svwrite_ver_za64_u64_vg2 (7, w15 + 3, z0),
+ svwrite_ver_za64_u64_vg2 (7, w15 + 3, z0))
+
+/*
+** write_za64_s64_z0_1_w15p4:
+** add (w[0-9]+), w15, #?4
+** mova za1v\.d\[\1, 0:1\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z0_1_w15p4, svint64x2_t,
+ svwrite_ver_za64_s64_vg2 (1, w15 + 4, z0),
+ svwrite_ver_za64_s64_vg2 (1, w15 + 4, z0))
+
+/*
+** write_za64_u64_z4_3_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za3v\.d\[\1, 0:1\], {z4\.d - z5\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z4_3_w12m1, svuint64x2_t,
+ svwrite_ver_za64_u64_vg2 (3, w12 - 1, z4),
+ svwrite_ver_za64_u64_vg2 (3, w12 - 1, z4))
+
+/*
+** write_za64_u64_z18_1_w16:
+** mov (w1[2-5]), w16
+** mova za1v\.d\[\1, 0:1\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z18_1_w16, svuint64x2_t,
+ svwrite_ver_za64_u64_vg2 (1, w16, z18),
+ svwrite_ver_za64_u64_vg2 (1, w16, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za64_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za64_vg4.c
new file mode 100644
index 0000000..a6d091a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za64_vg4.c
@@ -0,0 +1,129 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za64_s64_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.d\[\1, 0:3\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z0_0_0, svint64x4_t,
+ svwrite_ver_za64_s64_vg4 (0, 0, z0),
+ svwrite_ver_za64_s64_vg4 (0, 0, z0))
+
+/*
+** write_za64_u64_z4_1_1:
+** mov (w1[2-5]), #?1
+** mova za1v\.d\[\1, 0:3\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z4_1_1, svuint64x4_t,
+ svwrite_ver_za64_u64_vg4 (1, 1, z4),
+ svwrite_ver_za64_u64_vg4 (1, 1, z4))
+
+/*
+** write_za64_f64_z28_2_w11:
+** mov (w1[2-5]), w11
+** mova za2v\.d\[\1, 0:3\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z28_2_w11, svfloat64x4_t,
+ svwrite_ver_za64_f64_vg4 (2, w11, z28),
+ svwrite_ver_za64_f64_vg4 (2, w11, z28))
+
+/*
+** write_za64_s64_z0_3_w12:
+** mova za3v\.d\[w12, 0:3\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z0_3_w12, svint64x4_t,
+ svwrite_ver_za64_s64_vg4 (3, w12, z0),
+ svwrite_ver_za64_s64_vg4 (3, w12, z0))
+
+/*
+** write_za64_u64_z18_4_w15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za4v\.d\[w15, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z18_4_w15, svuint64x4_t,
+ svwrite_ver_za64_u64_vg4 (4, w15, z18),
+ svwrite_ver_za64_u64_vg4 (4, w15, z18))
+
+/*
+** write_za64_f64_z23_5_w12p4:
+** add (w[0-9]+), w12, #?4
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za5v\.d\[\1, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z23_5_w12p4, svfloat64x4_t,
+ svwrite_ver_za64_f64_vg4 (5, w12 + 4, z23),
+ svwrite_ver_za64_f64_vg4 (5, w12 + 4, z23))
+
+/*
+** write_za64_u64_z4_6_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za6v\.d\[\1, 0:3\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z4_6_w12p1, svuint64x4_t,
+ svwrite_ver_za64_u64_vg4 (6, w12 + 1, z4),
+ svwrite_ver_za64_u64_vg4 (6, w12 + 1, z4))
+
+/*
+** write_za64_s64_z28_7_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova za7v\.d\[\1, 0:3\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_s64_z28_7_w12p2, svint64x4_t,
+ svwrite_ver_za64_s64_vg4 (7, w12 + 2, z28),
+ svwrite_ver_za64_s64_vg4 (7, w12 + 2, z28))
+
+/*
+** write_za64_f64_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0v\.d\[\1, 0:3\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z0_0_w15p3, svfloat64x4_t,
+ svwrite_ver_za64_f64_vg4 (0, w15 + 3, z0),
+ svwrite_ver_za64_f64_vg4 (0, w15 + 3, z0))
+
+/*
+** write_za64_u64_z28_1_w12p4:
+** add (w[0-9]+), w12, #?4
+** mova za1v\.d\[\1, 0:3\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z28_1_w12p4, svuint64x4_t,
+ svwrite_ver_za64_u64_vg4 (1, w12 + 4, z28),
+ svwrite_ver_za64_u64_vg4 (1, w12 + 4, z28))
+
+/*
+** write_za64_f64_z4_2_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za2v\.d\[\1, 0:3\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_f64_z4_2_w12m1, svfloat64x4_t,
+ svwrite_ver_za64_f64_vg4 (2, w12 - 1, z4),
+ svwrite_ver_za64_f64_vg4 (2, w12 - 1, z4))
+
+/*
+** write_za64_u64_z28_3_w16:
+** mov (w1[2-5]), w16
+** mova za3v\.d\[\1, 0:3\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_za64_u64_z28_3_w16, svuint64x4_t,
+ svwrite_ver_za64_u64_vg4 (3, w16, z28),
+ svwrite_ver_za64_u64_vg4 (3, w16, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za8_vg2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za8_vg2.c
new file mode 100644
index 0000000..ce3dbdd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za8_vg2.c
@@ -0,0 +1,140 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za8_s8_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.b\[\1, 0:1\], {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_0, svint8x2_t,
+ svwrite_ver_za8_s8_vg2 (0, 0, z0),
+ svwrite_ver_za8_s8_vg2 (0, 0, z0))
+
+/*
+** write_za8_u8_z4_0_1:
+** mov (w1[2-5]), #?1
+** mova za0v\.b\[\1, 0:1\], {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_1, svuint8x2_t,
+ svwrite_ver_za8_u8_vg2 (0, 1, z4),
+ svwrite_ver_za8_u8_vg2 (0, 1, z4))
+
+/*
+** write_za8_s8_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova za0v\.b\[\1, 0:1\], {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z28_0_w11, svint8x2_t,
+ svwrite_ver_za8_s8_vg2 (0, w11, z28),
+ svwrite_ver_za8_s8_vg2 (0, w11, z28))
+
+/*
+** write_za8_s8_z0_0_w12:
+** mova za0v\.b\[w12, 0:1\], {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_w12, svint8x2_t,
+ svwrite_ver_za8_s8_vg2 (0, w12, z0),
+ svwrite_ver_za8_s8_vg2 (0, w12, z0))
+
+/*
+** write_za8_u8_z18_0_w15:
+** mova za0v\.b\[w15, 0:1\], {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z18_0_w15, svuint8x2_t,
+ svwrite_ver_za8_u8_vg2 (0, w15, z18),
+ svwrite_ver_za8_u8_vg2 (0, w15, z18))
+
+/*
+** write_za8_s8_z23_0_w12p14:
+** mov [^\n]+
+** mov [^\n]+
+** mova za0v\.b\[w12, 14:15\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z23_0_w12p14, svint8x2_t,
+ svwrite_ver_za8_s8_vg2 (0, w12 + 14, z23),
+ svwrite_ver_za8_s8_vg2 (0, w12 + 14, z23))
+
+/*
+** write_za8_u8_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za0v\.b\[\1, 0:1\], {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w12p1, svuint8x2_t,
+ svwrite_ver_za8_u8_vg2 (0, w12 + 1, z4),
+ svwrite_ver_za8_u8_vg2 (0, w12 + 1, z4))
+
+/*
+** write_za8_s8_z28_0_w12p2:
+** mova za0v\.b\[w12, 2:3\], {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z28_0_w12p2, svint8x2_t,
+ svwrite_ver_za8_s8_vg2 (0, w12 + 2, z28),
+ svwrite_ver_za8_s8_vg2 (0, w12 + 2, z28))
+
+/*
+** write_za8_u8_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0v\.b\[\1, 0:1\], {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z0_0_w15p3, svuint8x2_t,
+ svwrite_ver_za8_u8_vg2 (0, w15 + 3, z0),
+ svwrite_ver_za8_u8_vg2 (0, w15 + 3, z0))
+
+/*
+** write_za8_u8_z4_0_w15p12:
+** mova za0v\.b\[w15, 12:13\], {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w15p12, svuint8x2_t,
+ svwrite_ver_za8_u8_vg2 (0, w15 + 12, z4),
+ svwrite_ver_za8_u8_vg2 (0, w15 + 12, z4))
+
+/*
+** write_za8_u8_z28_0_w12p15:
+** add (w[0-9]+), w12, #?15
+** mova za0v\.b\[\1, 0:1\], {z28\.b - z29\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z28_0_w12p15, svuint8x2_t,
+ svwrite_ver_za8_u8_vg2 (0, w12 + 15, z28),
+ svwrite_ver_za8_u8_vg2 (0, w12 + 15, z28))
+
+/*
+** write_za8_s8_z0_0_w15p16:
+** add (w[0-9]+), w15, #?16
+** mova za0v\.b\[\1, 0:1\], {z0\.b - z1\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_w15p16, svint8x2_t,
+ svwrite_ver_za8_s8_vg2 (0, w15 + 16, z0),
+ svwrite_ver_za8_s8_vg2 (0, w15 + 16, z0))
+
+/*
+** write_za8_u8_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za0v\.b\[\1, 0:1\], {z4\.b - z5\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w12m1, svuint8x2_t,
+ svwrite_ver_za8_u8_vg2 (0, w12 - 1, z4),
+ svwrite_ver_za8_u8_vg2 (0, w12 - 1, z4))
+
+/*
+** write_za8_u8_z18_0_w16:
+** mov (w1[2-5]), w16
+** mova za0v\.b\[\1, 0:1\], {z18\.b - z19\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z18_0_w16, svuint8x2_t,
+ svwrite_ver_za8_u8_vg2 (0, w16, z18),
+ svwrite_ver_za8_u8_vg2 (0, w16, z18))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za8_vg4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za8_vg4.c
new file mode 100644
index 0000000..8972fed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_ver_za8_vg4.c
@@ -0,0 +1,156 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_za8_s8_z0_0_0:
+** mov (w1[2-5]), (?:wzr|#?0)
+** mova za0v\.b\[\1, 0:3\], {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_0, svint8x4_t,
+ svwrite_ver_za8_s8_vg4 (0, 0, z0),
+ svwrite_ver_za8_s8_vg4 (0, 0, z0))
+
+/*
+** write_za8_u8_z4_0_1:
+** mov (w1[2-5]), #?1
+** mova za0v\.b\[\1, 0:3\], {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_1, svuint8x4_t,
+ svwrite_ver_za8_u8_vg4 (0, 1, z4),
+ svwrite_ver_za8_u8_vg4 (0, 1, z4))
+
+/*
+** write_za8_s8_z28_0_w11:
+** mov (w1[2-5]), w11
+** mova za0v\.b\[\1, 0:3\], {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z28_0_w11, svint8x4_t,
+ svwrite_ver_za8_s8_vg4 (0, w11, z28),
+ svwrite_ver_za8_s8_vg4 (0, w11, z28))
+
+/*
+** write_za8_s8_z0_0_w12:
+** mova za0v\.b\[w12, 0:3\], {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_w12, svint8x4_t,
+ svwrite_ver_za8_s8_vg4 (0, w12, z0),
+ svwrite_ver_za8_s8_vg4 (0, w12, z0))
+
+/*
+** write_za8_u8_z18_0_w15:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za0v\.b\[w15, 0:3\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z18_0_w15, svuint8x4_t,
+ svwrite_ver_za8_u8_vg4 (0, w15, z18),
+ svwrite_ver_za8_u8_vg4 (0, w15, z18))
+
+/*
+** write_za8_s8_z23_0_w12p12:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za0v\.b\[w12, 12:15\], {[^\n]+}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z23_0_w12p12, svint8x4_t,
+ svwrite_ver_za8_s8_vg4 (0, w12 + 12, z23),
+ svwrite_ver_za8_s8_vg4 (0, w12 + 12, z23))
+
+/*
+** write_za8_u8_z4_0_w12p1:
+** add (w[0-9]+), w12, #?1
+** mova za0v\.b\[\1, 0:3\], {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w12p1, svuint8x4_t,
+ svwrite_ver_za8_u8_vg4 (0, w12 + 1, z4),
+ svwrite_ver_za8_u8_vg4 (0, w12 + 1, z4))
+
+/*
+** write_za8_s8_z28_0_w12p2:
+** add (w[0-9]+), w12, #?2
+** mova za0v\.b\[\1, 0:3\], {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z28_0_w12p2, svint8x4_t,
+ svwrite_ver_za8_s8_vg4 (0, w12 + 2, z28),
+ svwrite_ver_za8_s8_vg4 (0, w12 + 2, z28))
+
+/*
+** write_za8_u8_z0_0_w15p3:
+** add (w[0-9]+), w15, #?3
+** mova za0v\.b\[\1, 0:3\], {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z0_0_w15p3, svuint8x4_t,
+ svwrite_ver_za8_u8_vg4 (0, w15 + 3, z0),
+ svwrite_ver_za8_u8_vg4 (0, w15 + 3, z0))
+
+/*
+** write_za8_u8_z0_0_w12p4:
+** mova za0v\.b\[w12, 4:7\], {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z0_0_w12p4, svuint8x4_t,
+ svwrite_ver_za8_u8_vg4 (0, w12 + 4, z0),
+ svwrite_ver_za8_u8_vg4 (0, w12 + 4, z0))
+
+/*
+** write_za8_u8_z4_0_w15p12:
+** mova za0v\.b\[w15, 12:15\], {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w15p12, svuint8x4_t,
+ svwrite_ver_za8_u8_vg4 (0, w15 + 12, z4),
+ svwrite_ver_za8_u8_vg4 (0, w15 + 12, z4))
+
+/*
+** write_za8_u8_z28_0_w12p14:
+** add (w[0-9]+), w12, #?14
+** mova za0v\.b\[\1, 0:3\], {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z28_0_w12p14, svuint8x4_t,
+ svwrite_ver_za8_u8_vg4 (0, w12 + 14, z28),
+ svwrite_ver_za8_u8_vg4 (0, w12 + 14, z28))
+
+/*
+** write_za8_s8_z0_0_w15p16:
+** add (w[0-9]+), w15, #?16
+** mova za0v\.b\[\1, 0:3\], {z0\.b - z3\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_s8_z0_0_w15p16, svint8x4_t,
+ svwrite_ver_za8_s8_vg4 (0, w15 + 16, z0),
+ svwrite_ver_za8_s8_vg4 (0, w15 + 16, z0))
+
+/*
+** write_za8_u8_z4_0_w12m1:
+** sub (w[0-9]+), w12, #?1
+** mova za0v\.b\[\1, 0:3\], {z4\.b - z7\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z4_0_w12m1, svuint8x4_t,
+ svwrite_ver_za8_u8_vg4 (0, w12 - 1, z4),
+ svwrite_ver_za8_u8_vg4 (0, w12 - 1, z4))
+
+/*
+** write_za8_u8_z28_0_w16:
+** mov (w1[2-5]), w16
+** mova za0v\.b\[\1, 0:3\], {z28\.b - z31\.b}
+** ret
+*/
+TEST_ZA_XN (write_za8_u8_z28_0_w16, svuint8x4_t,
+ svwrite_ver_za8_u8_vg4 (0, w16, z28),
+ svwrite_ver_za8_u8_vg4 (0, w16, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za16_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za16_vg1x2.c
new file mode 100644
index 0000000..2044460
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za16_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_0_z0, svfloat16x2_t,
+ svwrite_za16_f16_vg1x2 (0, z0),
+ svwrite_za16_vg1x2 (0, z0))
+
+/*
+** write_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w0_z0, svint16x2_t,
+ svwrite_za16_s16_vg1x2 (w0, z0),
+ svwrite_za16_vg1x2 (w0, z0))
+
+/*
+** write_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w7_z0, svuint16x2_t,
+ svwrite_za16_u16_vg1x2 (w7, z0),
+ svwrite_za16_vg1x2 (w7, z0))
+
+/*
+** write_w8_z0:
+** mova za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z0, svbfloat16x2_t,
+ svwrite_za16_bf16_vg1x2 (w8, z0),
+ svwrite_za16_vg1x2 (w8, z0))
+
+/*
+** write_w11_z0:
+** mova za\.d\[w11, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w11_z0, svint16x2_t,
+ svwrite_za16_s16_vg1x2 (w11, z0),
+ svwrite_za16_vg1x2 (w11, z0))
+
+
+/*
+** write_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w12_z0, svuint16x2_t,
+ svwrite_za16_u16_vg1x2 (w12, z0),
+ svwrite_za16_vg1x2 (w12, z0))
+
+/*
+** write_w8p7_z0:
+** mova za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p7_z0, svfloat16x2_t,
+ svwrite_za16_f16_vg1x2 (w8 + 7, z0),
+ svwrite_za16_vg1x2 (w8 + 7, z0))
+
+/*
+** write_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p8_z0, svint16x2_t,
+ svwrite_za16_s16_vg1x2 (w8 + 8, z0),
+ svwrite_za16_vg1x2 (w8 + 8, z0))
+
+/*
+** write_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8m1_z0, svuint16x2_t,
+ svwrite_za16_u16_vg1x2 (w8 - 1, z0),
+ svwrite_za16_vg1x2 (w8 - 1, z0))
+
+/*
+** write_w8_z18:
+** mova za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z18, svfloat16x2_t,
+ svwrite_za16_f16_vg1x2 (w8, z18),
+ svwrite_za16_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** write_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z23, svint16x2_t,
+ svwrite_za16_s16_vg1x2 (w8, z23),
+ svwrite_za16_vg1x2 (w8, z23))
+
+/*
+** write_w8_z28:
+** mova za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z28, svbfloat16x2_t,
+ svwrite_za16_bf16_vg1x2 (w8, z28),
+ svwrite_za16_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za16_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za16_vg1x4.c
new file mode 100644
index 0000000..e965801
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za16_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_0_z0, svint16x4_t,
+ svwrite_za16_s16_vg1x4 (0, z0),
+ svwrite_za16_vg1x4 (0, z0))
+
+/*
+** write_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w0_z0, svuint16x4_t,
+ svwrite_za16_u16_vg1x4 (w0, z0),
+ svwrite_za16_vg1x4 (w0, z0))
+
+/*
+** write_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w7_z0, svfloat16x4_t,
+ svwrite_za16_f16_vg1x4 (w7, z0),
+ svwrite_za16_vg1x4 (w7, z0))
+
+/*
+** write_w8_z0:
+** mova za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z0, svint16x4_t,
+ svwrite_za16_s16_vg1x4 (w8, z0),
+ svwrite_za16_vg1x4 (w8, z0))
+
+/*
+** write_w11_z0:
+** mova za\.d\[w11, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w11_z0, svuint16x4_t,
+ svwrite_za16_u16_vg1x4 (w11, z0),
+ svwrite_za16_vg1x4 (w11, z0))
+
+
+/*
+** write_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w12_z0, svbfloat16x4_t,
+ svwrite_za16_bf16_vg1x4 (w12, z0),
+ svwrite_za16_vg1x4 (w12, z0))
+
+/*
+** write_w8p7_z0:
+** mova za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p7_z0, svint16x4_t,
+ svwrite_za16_s16_vg1x4 (w8 + 7, z0),
+ svwrite_za16_vg1x4 (w8 + 7, z0))
+
+/*
+** write_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p8_z0, svuint16x4_t,
+ svwrite_za16_u16_vg1x4 (w8 + 8, z0),
+ svwrite_za16_vg1x4 (w8 + 8, z0))
+
+/*
+** write_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8m1_z0, svfloat16x4_t,
+ svwrite_za16_f16_vg1x4 (w8 - 1, z0),
+ svwrite_za16_vg1x4 (w8 - 1, z0))
+
+/*
+** write_w8_z4:
+** mova za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z4, svint16x4_t,
+ svwrite_za16_s16_vg1x4 (w8, z4),
+ svwrite_za16_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** write_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z18, svuint16x4_t,
+ svwrite_za16_u16_vg1x4 (w8, z18),
+ svwrite_za16_vg1x4 (w8, z18))
+
+/*
+** write_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z23, svbfloat16x4_t,
+ svwrite_za16_bf16_vg1x4 (w8, z23),
+ svwrite_za16_vg1x4 (w8, z23))
+
+/*
+** write_w8_z28:
+** mova za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z28, svint16x4_t,
+ svwrite_za16_s16_vg1x4 (w8, z28),
+ svwrite_za16_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za32_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za32_vg1x2.c
new file mode 100644
index 0000000..9f44716
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za32_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_0_z0, svfloat32x2_t,
+ svwrite_za32_f32_vg1x2 (0, z0),
+ svwrite_za32_vg1x2 (0, z0))
+
+/*
+** write_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w0_z0, svint32x2_t,
+ svwrite_za32_s32_vg1x2 (w0, z0),
+ svwrite_za32_vg1x2 (w0, z0))
+
+/*
+** write_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w7_z0, svuint32x2_t,
+ svwrite_za32_u32_vg1x2 (w7, z0),
+ svwrite_za32_vg1x2 (w7, z0))
+
+/*
+** write_w8_z0:
+** mova za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z0, svfloat32x2_t,
+ svwrite_za32_f32_vg1x2 (w8, z0),
+ svwrite_za32_vg1x2 (w8, z0))
+
+/*
+** write_w11_z0:
+** mova za\.d\[w11, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w11_z0, svint32x2_t,
+ svwrite_za32_s32_vg1x2 (w11, z0),
+ svwrite_za32_vg1x2 (w11, z0))
+
+
+/*
+** write_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w12_z0, svuint32x2_t,
+ svwrite_za32_u32_vg1x2 (w12, z0),
+ svwrite_za32_vg1x2 (w12, z0))
+
+/*
+** write_w8p7_z0:
+** mova za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p7_z0, svfloat32x2_t,
+ svwrite_za32_f32_vg1x2 (w8 + 7, z0),
+ svwrite_za32_vg1x2 (w8 + 7, z0))
+
+/*
+** write_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p8_z0, svint32x2_t,
+ svwrite_za32_s32_vg1x2 (w8 + 8, z0),
+ svwrite_za32_vg1x2 (w8 + 8, z0))
+
+/*
+** write_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8m1_z0, svuint32x2_t,
+ svwrite_za32_u32_vg1x2 (w8 - 1, z0),
+ svwrite_za32_vg1x2 (w8 - 1, z0))
+
+/*
+** write_w8_z18:
+** mova za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z18, svfloat32x2_t,
+ svwrite_za32_f32_vg1x2 (w8, z18),
+ svwrite_za32_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** write_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z23, svint32x2_t,
+ svwrite_za32_s32_vg1x2 (w8, z23),
+ svwrite_za32_vg1x2 (w8, z23))
+
+/*
+** write_w8_z28:
+** mova za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z28, svuint32x2_t,
+ svwrite_za32_u32_vg1x2 (w8, z28),
+ svwrite_za32_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za32_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za32_vg1x4.c
new file mode 100644
index 0000000..d94c8d8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za32_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_0_z0, svint32x4_t,
+ svwrite_za32_s32_vg1x4 (0, z0),
+ svwrite_za32_vg1x4 (0, z0))
+
+/*
+** write_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w0_z0, svuint32x4_t,
+ svwrite_za32_u32_vg1x4 (w0, z0),
+ svwrite_za32_vg1x4 (w0, z0))
+
+/*
+** write_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w7_z0, svfloat32x4_t,
+ svwrite_za32_f32_vg1x4 (w7, z0),
+ svwrite_za32_vg1x4 (w7, z0))
+
+/*
+** write_w8_z0:
+** mova za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z0, svint32x4_t,
+ svwrite_za32_s32_vg1x4 (w8, z0),
+ svwrite_za32_vg1x4 (w8, z0))
+
+/*
+** write_w11_z0:
+** mova za\.d\[w11, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w11_z0, svuint32x4_t,
+ svwrite_za32_u32_vg1x4 (w11, z0),
+ svwrite_za32_vg1x4 (w11, z0))
+
+
+/*
+** write_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w12_z0, svfloat32x4_t,
+ svwrite_za32_f32_vg1x4 (w12, z0),
+ svwrite_za32_vg1x4 (w12, z0))
+
+/*
+** write_w8p7_z0:
+** mova za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p7_z0, svint32x4_t,
+ svwrite_za32_s32_vg1x4 (w8 + 7, z0),
+ svwrite_za32_vg1x4 (w8 + 7, z0))
+
+/*
+** write_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p8_z0, svuint32x4_t,
+ svwrite_za32_u32_vg1x4 (w8 + 8, z0),
+ svwrite_za32_vg1x4 (w8 + 8, z0))
+
+/*
+** write_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8m1_z0, svfloat32x4_t,
+ svwrite_za32_f32_vg1x4 (w8 - 1, z0),
+ svwrite_za32_vg1x4 (w8 - 1, z0))
+
+/*
+** write_w8_z4:
+** mova za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z4, svint32x4_t,
+ svwrite_za32_s32_vg1x4 (w8, z4),
+ svwrite_za32_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** write_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z18, svuint32x4_t,
+ svwrite_za32_u32_vg1x4 (w8, z18),
+ svwrite_za32_vg1x4 (w8, z18))
+
+/*
+** write_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z23, svfloat32x4_t,
+ svwrite_za32_f32_vg1x4 (w8, z23),
+ svwrite_za32_vg1x4 (w8, z23))
+
+/*
+** write_w8_z28:
+** mova za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z28, svint32x4_t,
+ svwrite_za32_s32_vg1x4 (w8, z28),
+ svwrite_za32_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za64_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za64_vg1x2.c
new file mode 100644
index 0000000..23ff25b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za64_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_0_z0, svfloat64x2_t,
+ svwrite_za64_f64_vg1x2 (0, z0),
+ svwrite_za64_vg1x2 (0, z0))
+
+/*
+** write_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w0_z0, svint64x2_t,
+ svwrite_za64_s64_vg1x2 (w0, z0),
+ svwrite_za64_vg1x2 (w0, z0))
+
+/*
+** write_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w7_z0, svuint64x2_t,
+ svwrite_za64_u64_vg1x2 (w7, z0),
+ svwrite_za64_vg1x2 (w7, z0))
+
+/*
+** write_w8_z0:
+** mova za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z0, svfloat64x2_t,
+ svwrite_za64_f64_vg1x2 (w8, z0),
+ svwrite_za64_vg1x2 (w8, z0))
+
+/*
+** write_w11_z0:
+** mova za\.d\[w11, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w11_z0, svint64x2_t,
+ svwrite_za64_s64_vg1x2 (w11, z0),
+ svwrite_za64_vg1x2 (w11, z0))
+
+
+/*
+** write_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w12_z0, svuint64x2_t,
+ svwrite_za64_u64_vg1x2 (w12, z0),
+ svwrite_za64_vg1x2 (w12, z0))
+
+/*
+** write_w8p7_z0:
+** mova za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p7_z0, svfloat64x2_t,
+ svwrite_za64_f64_vg1x2 (w8 + 7, z0),
+ svwrite_za64_vg1x2 (w8 + 7, z0))
+
+/*
+** write_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p8_z0, svint64x2_t,
+ svwrite_za64_s64_vg1x2 (w8 + 8, z0),
+ svwrite_za64_vg1x2 (w8 + 8, z0))
+
+/*
+** write_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8m1_z0, svuint64x2_t,
+ svwrite_za64_u64_vg1x2 (w8 - 1, z0),
+ svwrite_za64_vg1x2 (w8 - 1, z0))
+
+/*
+** write_w8_z18:
+** mova za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z18, svfloat64x2_t,
+ svwrite_za64_f64_vg1x2 (w8, z18),
+ svwrite_za64_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** write_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z23, svint64x2_t,
+ svwrite_za64_s64_vg1x2 (w8, z23),
+ svwrite_za64_vg1x2 (w8, z23))
+
+/*
+** write_w8_z28:
+** mova za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z28, svuint64x2_t,
+ svwrite_za64_u64_vg1x2 (w8, z28),
+ svwrite_za64_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za64_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za64_vg1x4.c
new file mode 100644
index 0000000..7fcd6e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za64_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_0_z0, svint64x4_t,
+ svwrite_za64_s64_vg1x4 (0, z0),
+ svwrite_za64_vg1x4 (0, z0))
+
+/*
+** write_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w0_z0, svuint64x4_t,
+ svwrite_za64_u64_vg1x4 (w0, z0),
+ svwrite_za64_vg1x4 (w0, z0))
+
+/*
+** write_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w7_z0, svfloat64x4_t,
+ svwrite_za64_f64_vg1x4 (w7, z0),
+ svwrite_za64_vg1x4 (w7, z0))
+
+/*
+** write_w8_z0:
+** mova za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z0, svint64x4_t,
+ svwrite_za64_s64_vg1x4 (w8, z0),
+ svwrite_za64_vg1x4 (w8, z0))
+
+/*
+** write_w11_z0:
+** mova za\.d\[w11, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w11_z0, svuint64x4_t,
+ svwrite_za64_u64_vg1x4 (w11, z0),
+ svwrite_za64_vg1x4 (w11, z0))
+
+
+/*
+** write_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w12_z0, svfloat64x4_t,
+ svwrite_za64_f64_vg1x4 (w12, z0),
+ svwrite_za64_vg1x4 (w12, z0))
+
+/*
+** write_w8p7_z0:
+** mova za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p7_z0, svint64x4_t,
+ svwrite_za64_s64_vg1x4 (w8 + 7, z0),
+ svwrite_za64_vg1x4 (w8 + 7, z0))
+
+/*
+** write_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p8_z0, svuint64x4_t,
+ svwrite_za64_u64_vg1x4 (w8 + 8, z0),
+ svwrite_za64_vg1x4 (w8 + 8, z0))
+
+/*
+** write_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8m1_z0, svfloat64x4_t,
+ svwrite_za64_f64_vg1x4 (w8 - 1, z0),
+ svwrite_za64_vg1x4 (w8 - 1, z0))
+
+/*
+** write_w8_z4:
+** mova za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z4, svint64x4_t,
+ svwrite_za64_s64_vg1x4 (w8, z4),
+ svwrite_za64_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** write_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z18, svuint64x4_t,
+ svwrite_za64_u64_vg1x4 (w8, z18),
+ svwrite_za64_vg1x4 (w8, z18))
+
+/*
+** write_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z23, svfloat64x4_t,
+ svwrite_za64_f64_vg1x4 (w8, z23),
+ svwrite_za64_vg1x4 (w8, z23))
+
+/*
+** write_w8_z28:
+** mova za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z28, svint64x4_t,
+ svwrite_za64_s64_vg1x4 (w8, z28),
+ svwrite_za64_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za8_vg1x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za8_vg1x2.c
new file mode 100644
index 0000000..4b83a37
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za8_vg1x2.c
@@ -0,0 +1,122 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_0_z0, svint8x2_t,
+ svwrite_za8_s8_vg1x2 (0, z0),
+ svwrite_za8_vg1x2 (0, z0))
+
+/*
+** write_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w0_z0, svint8x2_t,
+ svwrite_za8_s8_vg1x2 (w0, z0),
+ svwrite_za8_vg1x2 (w0, z0))
+
+/*
+** write_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w7_z0, svuint8x2_t,
+ svwrite_za8_u8_vg1x2 (w7, z0),
+ svwrite_za8_vg1x2 (w7, z0))
+
+/*
+** write_w8_z0:
+** mova za\.d\[w8, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z0, svint8x2_t,
+ svwrite_za8_s8_vg1x2 (w8, z0),
+ svwrite_za8_vg1x2 (w8, z0))
+
+/*
+** write_w11_z0:
+** mova za\.d\[w11, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w11_z0, svint8x2_t,
+ svwrite_za8_s8_vg1x2 (w11, z0),
+ svwrite_za8_vg1x2 (w11, z0))
+
+
+/*
+** write_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w12_z0, svuint8x2_t,
+ svwrite_za8_u8_vg1x2 (w12, z0),
+ svwrite_za8_vg1x2 (w12, z0))
+
+/*
+** write_w8p7_z0:
+** mova za\.d\[w8, 7, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p7_z0, svint8x2_t,
+ svwrite_za8_s8_vg1x2 (w8 + 7, z0),
+ svwrite_za8_vg1x2 (w8 + 7, z0))
+
+/*
+** write_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p8_z0, svint8x2_t,
+ svwrite_za8_s8_vg1x2 (w8 + 8, z0),
+ svwrite_za8_vg1x2 (w8 + 8, z0))
+
+/*
+** write_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova za\.d\[\1, 0, vgx2\], {z0\.d - z1\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8m1_z0, svuint8x2_t,
+ svwrite_za8_u8_vg1x2 (w8 - 1, z0),
+ svwrite_za8_vg1x2 (w8 - 1, z0))
+
+/*
+** write_w8_z18:
+** mova za\.d\[w8, 0, vgx2\], {z18\.d - z19\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z18, svuint8x2_t,
+ svwrite_za8_u8_vg1x2 (w8, z18),
+ svwrite_za8_vg1x2 (w8, z18))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** write_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx2\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z23, svint8x2_t,
+ svwrite_za8_s8_vg1x2 (w8, z23),
+ svwrite_za8_vg1x2 (w8, z23))
+
+/*
+** write_w8_z28:
+** mova za\.d\[w8, 0, vgx2\], {z28\.d - z29\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z28, svuint8x2_t,
+ svwrite_za8_u8_vg1x2 (w8, z28),
+ svwrite_za8_vg1x2 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za8_vg1x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za8_vg1x4.c
new file mode 100644
index 0000000..a529bf9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/write_za8_vg1x4.c
@@ -0,0 +1,137 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** write_0_z0:
+** mov (w8|w9|w10|w11), #?0
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_0_z0, svint8x4_t,
+ svwrite_za8_s8_vg1x4 (0, z0),
+ svwrite_za8_vg1x4 (0, z0))
+
+/*
+** write_w0_z0:
+** mov (w8|w9|w10|w11), w0
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w0_z0, svuint8x4_t,
+ svwrite_za8_u8_vg1x4 (w0, z0),
+ svwrite_za8_vg1x4 (w0, z0))
+
+/*
+** write_w7_z0:
+** mov (w8|w9|w10|w11), w7
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w7_z0, svint8x4_t,
+ svwrite_za8_s8_vg1x4 (w7, z0),
+ svwrite_za8_vg1x4 (w7, z0))
+
+/*
+** write_w8_z0:
+** mova za\.d\[w8, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z0, svint8x4_t,
+ svwrite_za8_s8_vg1x4 (w8, z0),
+ svwrite_za8_vg1x4 (w8, z0))
+
+/*
+** write_w11_z0:
+** mova za\.d\[w11, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w11_z0, svuint8x4_t,
+ svwrite_za8_u8_vg1x4 (w11, z0),
+ svwrite_za8_vg1x4 (w11, z0))
+
+
+/*
+** write_w12_z0:
+** mov (w8|w9|w10|w11), w12
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w12_z0, svint8x4_t,
+ svwrite_za8_s8_vg1x4 (w12, z0),
+ svwrite_za8_vg1x4 (w12, z0))
+
+/*
+** write_w8p7_z0:
+** mova za\.d\[w8, 7, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p7_z0, svint8x4_t,
+ svwrite_za8_s8_vg1x4 (w8 + 7, z0),
+ svwrite_za8_vg1x4 (w8 + 7, z0))
+
+/*
+** write_w8p8_z0:
+** add (w8|w9|w10|w11), w8, #?8
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8p8_z0, svuint8x4_t,
+ svwrite_za8_u8_vg1x4 (w8 + 8, z0),
+ svwrite_za8_vg1x4 (w8 + 8, z0))
+
+/*
+** write_w8m1_z0:
+** sub (w8|w9|w10|w11), w8, #?1
+** mova za\.d\[\1, 0, vgx4\], {z0\.d - z3\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8m1_z0, svint8x4_t,
+ svwrite_za8_s8_vg1x4 (w8 - 1, z0),
+ svwrite_za8_vg1x4 (w8 - 1, z0))
+
+/*
+** write_w8_z4:
+** mova za\.d\[w8, 0, vgx4\], {z4\.d - z7\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z4, svint8x4_t,
+ svwrite_za8_s8_vg1x4 (w8, z4),
+ svwrite_za8_vg1x4 (w8, z4))
+
+/* Leave the assembler to check for correctness for misaligned registers. */
+
+/*
+** write_w8_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z18, svuint8x4_t,
+ svwrite_za8_u8_vg1x4 (w8, z18),
+ svwrite_za8_vg1x4 (w8, z18))
+
+/*
+** write_w8_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mova za\.d\[w8, 0, vgx4\], [^\n]+
+** ret
+*/
+TEST_ZA_XN (write_w8_z23, svuint8x4_t,
+ svwrite_za8_u8_vg1x4 (w8, z23),
+ svwrite_za8_vg1x4 (w8, z23))
+
+/*
+** write_w8_z28:
+** mova za\.d\[w8, 0, vgx4\], {z28\.d - z31\.d}
+** ret
+*/
+TEST_ZA_XN (write_w8_z28, svint8x4_t,
+ svwrite_za8_s8_vg1x4 (w8, z28),
+ svwrite_za8_vg1x4 (w8, z28))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zero_zt.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zero_zt.c
new file mode 100644
index 0000000..eec298f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zero_zt.c
@@ -0,0 +1,12 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#define STREAMING_COMPATIBLE
+#define SHARED_ZT0
+#include "test_sme2_acle.h"
+
+/*
+** zero_zt0:
+** zero { zt0 }
+** ret
+*/
+PROTO (zero_zt0, void, ()) { svzero_zt (0); }
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_bf16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_bf16_x2.c
new file mode 100644
index 0000000..53fb2fa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_bf16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.h - z1\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (zip_z0_z0, svbfloat16x2_t, z0,
+ svzip_bf16_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.h - z1\.h}, z4\.h, z5\.h
+** ret
+*/
+TEST_XN (zip_z0_z4, svbfloat16x2_t, z0,
+ svzip_bf16_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.h - z5\.h}, z18\.h, z19\.h
+** ret
+*/
+TEST_XN (zip_z4_z18, svbfloat16x2_t, z4,
+ svzip_bf16_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.h - z19\.h}, z23\.h, z24\.h
+** ret
+*/
+TEST_XN (zip_z18_z23, svbfloat16x2_t, z18,
+ svzip_bf16_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.h, z29\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svbfloat16x2_t, z23,
+ svzip_bf16_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.h - z29\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (zip_z28_z0, svbfloat16x2_t, z28,
+ svzip_bf16_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.h - z29\.h}, z0\.h, z23\.h
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svbfloat16x2_t, z28,
+ svzip_bf16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.h - z29\.h}, z5\.h, z19\.h
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svbfloat16x2_t, z28,
+ svzip_bf16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_bf16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_bf16_x4.c
new file mode 100644
index 0000000..7e532f3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_bf16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (zip_z0_z0, svbfloat16x4_t, z0,
+ svzip_bf16_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (zip_z0_z4, svbfloat16x4_t, z0,
+ svzip_bf16_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.h - z7\.h}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svbfloat16x4_t, z4,
+ svzip_bf16_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svbfloat16x4_t, z18,
+ svzip_bf16_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svbfloat16x4_t, z23,
+ svzip_bf16_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (zip_z28_z0, svbfloat16x4_t, z28,
+ svzip_bf16_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f16_x2.c
new file mode 100644
index 0000000..c404cfa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.h - z1\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (zip_z0_z0, svfloat16x2_t, z0,
+ svzip_f16_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.h - z1\.h}, z4\.h, z5\.h
+** ret
+*/
+TEST_XN (zip_z0_z4, svfloat16x2_t, z0,
+ svzip_f16_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.h - z5\.h}, z18\.h, z19\.h
+** ret
+*/
+TEST_XN (zip_z4_z18, svfloat16x2_t, z4,
+ svzip_f16_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.h - z19\.h}, z23\.h, z24\.h
+** ret
+*/
+TEST_XN (zip_z18_z23, svfloat16x2_t, z18,
+ svzip_f16_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.h, z29\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svfloat16x2_t, z23,
+ svzip_f16_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.h - z29\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (zip_z28_z0, svfloat16x2_t, z28,
+ svzip_f16_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.h - z29\.h}, z0\.h, z23\.h
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svfloat16x2_t, z28,
+ svzip_f16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.h - z29\.h}, z5\.h, z19\.h
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svfloat16x2_t, z28,
+ svzip_f16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f16_x4.c
new file mode 100644
index 0000000..3159d1e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (zip_z0_z0, svfloat16x4_t, z0,
+ svzip_f16_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (zip_z0_z4, svfloat16x4_t, z0,
+ svzip_f16_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.h - z7\.h}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svfloat16x4_t, z4,
+ svzip_f16_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svfloat16x4_t, z18,
+ svzip_f16_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svfloat16x4_t, z23,
+ svzip_f16_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (zip_z28_z0, svfloat16x4_t, z28,
+ svzip_f16_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f32_x2.c
new file mode 100644
index 0000000..24f1900
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.s - z1\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (zip_z0_z0, svfloat32x2_t, z0,
+ svzip_f32_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.s - z1\.s}, z4\.s, z5\.s
+** ret
+*/
+TEST_XN (zip_z0_z4, svfloat32x2_t, z0,
+ svzip_f32_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.s - z5\.s}, z18\.s, z19\.s
+** ret
+*/
+TEST_XN (zip_z4_z18, svfloat32x2_t, z4,
+ svzip_f32_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.s - z19\.s}, z23\.s, z24\.s
+** ret
+*/
+TEST_XN (zip_z18_z23, svfloat32x2_t, z18,
+ svzip_f32_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.s, z29\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svfloat32x2_t, z23,
+ svzip_f32_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.s - z29\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (zip_z28_z0, svfloat32x2_t, z28,
+ svzip_f32_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.s - z29\.s}, z0\.s, z23\.s
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svfloat32x2_t, z28,
+ svzip_f32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.s - z29\.s}, z5\.s, z19\.s
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svfloat32x2_t, z28,
+ svzip_f32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f32_x4.c
new file mode 100644
index 0000000..2f4fd53
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (zip_z0_z0, svfloat32x4_t, z0,
+ svzip_f32_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (zip_z0_z4, svfloat32x4_t, z0,
+ svzip_f32_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svfloat32x4_t, z4,
+ svzip_f32_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svfloat32x4_t, z18,
+ svzip_f32_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svfloat32x4_t, z23,
+ svzip_f32_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (zip_z28_z0, svfloat32x4_t, z28,
+ svzip_f32_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f64_x2.c
new file mode 100644
index 0000000..6c27bdb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.d - z1\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (zip_z0_z0, svfloat64x2_t, z0,
+ svzip_f64_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.d - z1\.d}, z4\.d, z5\.d
+** ret
+*/
+TEST_XN (zip_z0_z4, svfloat64x2_t, z0,
+ svzip_f64_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.d - z5\.d}, z18\.d, z19\.d
+** ret
+*/
+TEST_XN (zip_z4_z18, svfloat64x2_t, z4,
+ svzip_f64_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.d - z19\.d}, z23\.d, z24\.d
+** ret
+*/
+TEST_XN (zip_z18_z23, svfloat64x2_t, z18,
+ svzip_f64_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.d, z29\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svfloat64x2_t, z23,
+ svzip_f64_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.d - z29\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (zip_z28_z0, svfloat64x2_t, z28,
+ svzip_f64_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.d - z29\.d}, z0\.d, z23\.d
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svfloat64x2_t, z28,
+ svzip_f64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.d - z29\.d}, z5\.d, z19\.d
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svfloat64x2_t, z28,
+ svzip_f64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f64_x4.c
new file mode 100644
index 0000000..ae10276
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_f64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (zip_z0_z0, svfloat64x4_t, z0,
+ svzip_f64_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (zip_z0_z4, svfloat64x4_t, z0,
+ svzip_f64_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.d - z7\.d}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svfloat64x4_t, z4,
+ svzip_f64_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svfloat64x4_t, z18,
+ svzip_f64_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svfloat64x4_t, z23,
+ svzip_f64_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (zip_z28_z0, svfloat64x4_t, z28,
+ svzip_f64_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s16_x2.c
new file mode 100644
index 0000000..ebe26f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.h - z1\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (zip_z0_z0, svint16x2_t, z0,
+ svzip_s16_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.h - z1\.h}, z4\.h, z5\.h
+** ret
+*/
+TEST_XN (zip_z0_z4, svint16x2_t, z0,
+ svzip_s16_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.h - z5\.h}, z18\.h, z19\.h
+** ret
+*/
+TEST_XN (zip_z4_z18, svint16x2_t, z4,
+ svzip_s16_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.h - z19\.h}, z23\.h, z24\.h
+** ret
+*/
+TEST_XN (zip_z18_z23, svint16x2_t, z18,
+ svzip_s16_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.h, z29\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svint16x2_t, z23,
+ svzip_s16_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.h - z29\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (zip_z28_z0, svint16x2_t, z28,
+ svzip_s16_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.h - z29\.h}, z0\.h, z23\.h
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svint16x2_t, z28,
+ svzip_s16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.h - z29\.h}, z5\.h, z19\.h
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svint16x2_t, z28,
+ svzip_s16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s16_x4.c
new file mode 100644
index 0000000..bfb42a0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (zip_z0_z0, svint16x4_t, z0,
+ svzip_s16_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (zip_z0_z4, svint16x4_t, z0,
+ svzip_s16_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.h - z7\.h}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svint16x4_t, z4,
+ svzip_s16_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svint16x4_t, z18,
+ svzip_s16_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svint16x4_t, z23,
+ svzip_s16_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (zip_z28_z0, svint16x4_t, z28,
+ svzip_s16_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s32_x2.c
new file mode 100644
index 0000000..8969a89
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.s - z1\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (zip_z0_z0, svint32x2_t, z0,
+ svzip_s32_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.s - z1\.s}, z4\.s, z5\.s
+** ret
+*/
+TEST_XN (zip_z0_z4, svint32x2_t, z0,
+ svzip_s32_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.s - z5\.s}, z18\.s, z19\.s
+** ret
+*/
+TEST_XN (zip_z4_z18, svint32x2_t, z4,
+ svzip_s32_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.s - z19\.s}, z23\.s, z24\.s
+** ret
+*/
+TEST_XN (zip_z18_z23, svint32x2_t, z18,
+ svzip_s32_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.s, z29\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svint32x2_t, z23,
+ svzip_s32_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.s - z29\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (zip_z28_z0, svint32x2_t, z28,
+ svzip_s32_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.s - z29\.s}, z0\.s, z23\.s
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svint32x2_t, z28,
+ svzip_s32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.s - z29\.s}, z5\.s, z19\.s
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svint32x2_t, z28,
+ svzip_s32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s32_x4.c
new file mode 100644
index 0000000..5c2c393
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (zip_z0_z0, svint32x4_t, z0,
+ svzip_s32_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (zip_z0_z4, svint32x4_t, z0,
+ svzip_s32_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svint32x4_t, z4,
+ svzip_s32_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svint32x4_t, z18,
+ svzip_s32_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svint32x4_t, z23,
+ svzip_s32_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (zip_z28_z0, svint32x4_t, z28,
+ svzip_s32_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s64_x2.c
new file mode 100644
index 0000000..68ca3fa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.d - z1\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (zip_z0_z0, svint64x2_t, z0,
+ svzip_s64_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.d - z1\.d}, z4\.d, z5\.d
+** ret
+*/
+TEST_XN (zip_z0_z4, svint64x2_t, z0,
+ svzip_s64_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.d - z5\.d}, z18\.d, z19\.d
+** ret
+*/
+TEST_XN (zip_z4_z18, svint64x2_t, z4,
+ svzip_s64_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.d - z19\.d}, z23\.d, z24\.d
+** ret
+*/
+TEST_XN (zip_z18_z23, svint64x2_t, z18,
+ svzip_s64_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.d, z29\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svint64x2_t, z23,
+ svzip_s64_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.d - z29\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (zip_z28_z0, svint64x2_t, z28,
+ svzip_s64_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.d - z29\.d}, z0\.d, z23\.d
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svint64x2_t, z28,
+ svzip_s64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.d - z29\.d}, z5\.d, z19\.d
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svint64x2_t, z28,
+ svzip_s64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s64_x4.c
new file mode 100644
index 0000000..9a63b6d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (zip_z0_z0, svint64x4_t, z0,
+ svzip_s64_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (zip_z0_z4, svint64x4_t, z0,
+ svzip_s64_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.d - z7\.d}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svint64x4_t, z4,
+ svzip_s64_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svint64x4_t, z18,
+ svzip_s64_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svint64x4_t, z23,
+ svzip_s64_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (zip_z28_z0, svint64x4_t, z28,
+ svzip_s64_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s8_x2.c
new file mode 100644
index 0000000..2c98222
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s8_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.b - z1\.b}, z0\.b, z1\.b
+** ret
+*/
+TEST_XN (zip_z0_z0, svint8x2_t, z0,
+ svzip_s8_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.b - z1\.b}, z4\.b, z5\.b
+** ret
+*/
+TEST_XN (zip_z0_z4, svint8x2_t, z0,
+ svzip_s8_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.b - z5\.b}, z18\.b, z19\.b
+** ret
+*/
+TEST_XN (zip_z4_z18, svint8x2_t, z4,
+ svzip_s8_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.b - z19\.b}, z23\.b, z24\.b
+** ret
+*/
+TEST_XN (zip_z18_z23, svint8x2_t, z18,
+ svzip_s8_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.b, z29\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svint8x2_t, z23,
+ svzip_s8_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.b - z29\.b}, z0\.b, z1\.b
+** ret
+*/
+TEST_XN (zip_z28_z0, svint8x2_t, z28,
+ svzip_s8_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.b - z29\.b}, z0\.b, z23\.b
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svint8x2_t, z28,
+ svzip_s8_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.b - z29\.b}, z5\.b, z19\.b
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svint8x2_t, z28,
+ svzip_s8_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s8_x4.c
new file mode 100644
index 0000000..6f2a2d2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_s8_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (zip_z0_z0, svint8x4_t, z0,
+ svzip_s8_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (zip_z0_z4, svint8x4_t, z0,
+ svzip_s8_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.b - z7\.b}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svint8x4_t, z4,
+ svzip_s8_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svint8x4_t, z18,
+ svzip_s8_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svint8x4_t, z23,
+ svzip_s8_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (zip_z28_z0, svint8x4_t, z28,
+ svzip_s8_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u16_x2.c
new file mode 100644
index 0000000..8ed76db
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.h - z1\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (zip_z0_z0, svuint16x2_t, z0,
+ svzip_u16_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.h - z1\.h}, z4\.h, z5\.h
+** ret
+*/
+TEST_XN (zip_z0_z4, svuint16x2_t, z0,
+ svzip_u16_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.h - z5\.h}, z18\.h, z19\.h
+** ret
+*/
+TEST_XN (zip_z4_z18, svuint16x2_t, z4,
+ svzip_u16_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.h - z19\.h}, z23\.h, z24\.h
+** ret
+*/
+TEST_XN (zip_z18_z23, svuint16x2_t, z18,
+ svzip_u16_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.h, z29\.h
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svuint16x2_t, z23,
+ svzip_u16_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.h - z29\.h}, z0\.h, z1\.h
+** ret
+*/
+TEST_XN (zip_z28_z0, svuint16x2_t, z28,
+ svzip_u16_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.h - z29\.h}, z0\.h, z23\.h
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svuint16x2_t, z28,
+ svzip_u16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.h - z29\.h}, z5\.h, z19\.h
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svuint16x2_t, z28,
+ svzip_u16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u16_x4.c
new file mode 100644
index 0000000..2f69393
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.h - z3\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (zip_z0_z0, svuint16x4_t, z0,
+ svzip_u16_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.h - z3\.h}, {z4\.h - z7\.h}
+** ret
+*/
+TEST_XN (zip_z0_z4, svuint16x4_t, z0,
+ svzip_u16_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.h - z7\.h}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svuint16x4_t, z4,
+ svzip_u16_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svuint16x4_t, z18,
+ svzip_u16_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.h - z31\.h}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svuint16x4_t, z23,
+ svzip_u16_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.h - z31\.h}, {z0\.h - z3\.h}
+** ret
+*/
+TEST_XN (zip_z28_z0, svuint16x4_t, z28,
+ svzip_u16_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u32_x2.c
new file mode 100644
index 0000000..3970d3e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.s - z1\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (zip_z0_z0, svuint32x2_t, z0,
+ svzip_u32_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.s - z1\.s}, z4\.s, z5\.s
+** ret
+*/
+TEST_XN (zip_z0_z4, svuint32x2_t, z0,
+ svzip_u32_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.s - z5\.s}, z18\.s, z19\.s
+** ret
+*/
+TEST_XN (zip_z4_z18, svuint32x2_t, z4,
+ svzip_u32_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.s - z19\.s}, z23\.s, z24\.s
+** ret
+*/
+TEST_XN (zip_z18_z23, svuint32x2_t, z18,
+ svzip_u32_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.s, z29\.s
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svuint32x2_t, z23,
+ svzip_u32_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.s - z29\.s}, z0\.s, z1\.s
+** ret
+*/
+TEST_XN (zip_z28_z0, svuint32x2_t, z28,
+ svzip_u32_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.s - z29\.s}, z0\.s, z23\.s
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svuint32x2_t, z28,
+ svzip_u32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.s - z29\.s}, z5\.s, z19\.s
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svuint32x2_t, z28,
+ svzip_u32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u32_x4.c
new file mode 100644
index 0000000..bba01ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.s - z3\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (zip_z0_z0, svuint32x4_t, z0,
+ svzip_u32_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.s - z3\.s}, {z4\.s - z7\.s}
+** ret
+*/
+TEST_XN (zip_z0_z4, svuint32x4_t, z0,
+ svzip_u32_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.s - z7\.s}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svuint32x4_t, z4,
+ svzip_u32_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svuint32x4_t, z18,
+ svzip_u32_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.s - z31\.s}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svuint32x4_t, z23,
+ svzip_u32_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.s - z31\.s}, {z0\.s - z3\.s}
+** ret
+*/
+TEST_XN (zip_z28_z0, svuint32x4_t, z28,
+ svzip_u32_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u64_x2.c
new file mode 100644
index 0000000..7aa1d60
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.d - z1\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (zip_z0_z0, svuint64x2_t, z0,
+ svzip_u64_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.d - z1\.d}, z4\.d, z5\.d
+** ret
+*/
+TEST_XN (zip_z0_z4, svuint64x2_t, z0,
+ svzip_u64_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.d - z5\.d}, z18\.d, z19\.d
+** ret
+*/
+TEST_XN (zip_z4_z18, svuint64x2_t, z4,
+ svzip_u64_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.d - z19\.d}, z23\.d, z24\.d
+** ret
+*/
+TEST_XN (zip_z18_z23, svuint64x2_t, z18,
+ svzip_u64_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.d, z29\.d
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svuint64x2_t, z23,
+ svzip_u64_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.d - z29\.d}, z0\.d, z1\.d
+** ret
+*/
+TEST_XN (zip_z28_z0, svuint64x2_t, z28,
+ svzip_u64_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.d - z29\.d}, z0\.d, z23\.d
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svuint64x2_t, z28,
+ svzip_u64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.d - z29\.d}, z5\.d, z19\.d
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svuint64x2_t, z28,
+ svzip_u64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u64_x4.c
new file mode 100644
index 0000000..ed0c1db
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.d - z3\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (zip_z0_z0, svuint64x4_t, z0,
+ svzip_u64_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.d - z3\.d}, {z4\.d - z7\.d}
+** ret
+*/
+TEST_XN (zip_z0_z4, svuint64x4_t, z0,
+ svzip_u64_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.d - z7\.d}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svuint64x4_t, z4,
+ svzip_u64_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svuint64x4_t, z18,
+ svzip_u64_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.d - z31\.d}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svuint64x4_t, z23,
+ svzip_u64_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.d - z31\.d}, {z0\.d - z3\.d}
+** ret
+*/
+TEST_XN (zip_z28_z0, svuint64x4_t, z28,
+ svzip_u64_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u8_x2.c
new file mode 100644
index 0000000..716edb4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u8_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.b - z1\.b}, z0\.b, z1\.b
+** ret
+*/
+TEST_XN (zip_z0_z0, svuint8x2_t, z0,
+ svzip_u8_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.b - z1\.b}, z4\.b, z5\.b
+** ret
+*/
+TEST_XN (zip_z0_z4, svuint8x2_t, z0,
+ svzip_u8_x2 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** zip {z4\.b - z5\.b}, z18\.b, z19\.b
+** ret
+*/
+TEST_XN (zip_z4_z18, svuint8x2_t, z4,
+ svzip_u8_x2 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** zip {z18\.b - z19\.b}, z23\.b, z24\.b
+** ret
+*/
+TEST_XN (zip_z18_z23, svuint8x2_t, z18,
+ svzip_u8_x2 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, z28\.b, z29\.b
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svuint8x2_t, z23,
+ svzip_u8_x2 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.b - z29\.b}, z0\.b, z1\.b
+** ret
+*/
+TEST_XN (zip_z28_z0, svuint8x2_t, z28,
+ svzip_u8_x2 (z0),
+ svzip (z0))
+
+/*
+** zip_z28_z0_z23:
+** zip {z28\.b - z29\.b}, z0\.b, z23\.b
+** ret
+*/
+TEST_XN (zip_z28_z0_z23, svuint8x2_t, z28,
+ svzip_u8_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzip (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zip_z28_z5_z19:
+** zip {z28\.b - z29\.b}, z5\.b, z19\.b
+** ret
+*/
+TEST_XN (zip_z28_z5_z19, svuint8x2_t, z28,
+ svzip_u8_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzip (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u8_x4.c
new file mode 100644
index 0000000..c13ad57
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zip_u8_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zip_z0_z0:
+** zip {z0\.b - z3\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (zip_z0_z0, svuint8x4_t, z0,
+ svzip_u8_x4 (z0),
+ svzip (z0))
+
+/*
+** zip_z0_z4:
+** zip {z0\.b - z3\.b}, {z4\.b - z7\.b}
+** ret
+*/
+TEST_XN (zip_z0_z4, svuint8x4_t, z0,
+ svzip_u8_x4 (z4),
+ svzip (z4))
+
+/*
+** zip_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.b - z7\.b}, [^\n]+
+** ret
+*/
+TEST_XN (zip_z4_z18, svuint8x4_t, z4,
+ svzip_u8_x4 (z18),
+ svzip (z18))
+
+/*
+** zip_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z18_z23, svuint8x4_t, z18,
+ svzip_u8_x4 (z23),
+ svzip (z23))
+
+/*
+** zip_z23_z28:
+** zip [^\n]+, {z28\.b - z31\.b}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zip_z23_z28, svuint8x4_t, z23,
+ svzip_u8_x4 (z28),
+ svzip (z28))
+
+/*
+** zip_z28_z0:
+** zip {z28\.b - z31\.b}, {z0\.b - z3\.b}
+** ret
+*/
+TEST_XN (zip_z28_z0, svuint8x4_t, z28,
+ svzip_u8_x4 (z0),
+ svzip (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_bf16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_bf16_x2.c
new file mode 100644
index 0000000..d9432cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_bf16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svbfloat16x2_t, z0,
+ svzipq_bf16_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svbfloat16x2_t, z0,
+ svzipq_bf16_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svbfloat16x2_t, z4,
+ svzipq_bf16_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svbfloat16x2_t, z18,
+ svzipq_bf16_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svbfloat16x2_t, z23,
+ svzipq_bf16_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svbfloat16x2_t, z28,
+ svzipq_bf16_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svbfloat16x2_t, z28,
+ svzipq_bf16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svbfloat16x2_t, z28,
+ svzipq_bf16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_bf16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_bf16_x4.c
new file mode 100644
index 0000000..db27bed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_bf16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svbfloat16x4_t, z0,
+ svzipq_bf16_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svbfloat16x4_t, z0,
+ svzipq_bf16_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svbfloat16x4_t, z4,
+ svzipq_bf16_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svbfloat16x4_t, z18,
+ svzipq_bf16_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svbfloat16x4_t, z23,
+ svzipq_bf16_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svbfloat16x4_t, z28,
+ svzipq_bf16_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f16_x2.c
new file mode 100644
index 0000000..928ec54
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svfloat16x2_t, z0,
+ svzipq_f16_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svfloat16x2_t, z0,
+ svzipq_f16_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svfloat16x2_t, z4,
+ svzipq_f16_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svfloat16x2_t, z18,
+ svzipq_f16_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svfloat16x2_t, z23,
+ svzipq_f16_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svfloat16x2_t, z28,
+ svzipq_f16_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svfloat16x2_t, z28,
+ svzipq_f16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svfloat16x2_t, z28,
+ svzipq_f16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f16_x4.c
new file mode 100644
index 0000000..cfd5ce7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svfloat16x4_t, z0,
+ svzipq_f16_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svfloat16x4_t, z0,
+ svzipq_f16_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svfloat16x4_t, z4,
+ svzipq_f16_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svfloat16x4_t, z18,
+ svzipq_f16_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svfloat16x4_t, z23,
+ svzipq_f16_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svfloat16x4_t, z28,
+ svzipq_f16_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f32_x2.c
new file mode 100644
index 0000000..7cf9b43
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svfloat32x2_t, z0,
+ svzipq_f32_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svfloat32x2_t, z0,
+ svzipq_f32_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svfloat32x2_t, z4,
+ svzipq_f32_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svfloat32x2_t, z18,
+ svzipq_f32_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svfloat32x2_t, z23,
+ svzipq_f32_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svfloat32x2_t, z28,
+ svzipq_f32_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svfloat32x2_t, z28,
+ svzipq_f32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svfloat32x2_t, z28,
+ svzipq_f32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f32_x4.c
new file mode 100644
index 0000000..fd6f6e4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svfloat32x4_t, z0,
+ svzipq_f32_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svfloat32x4_t, z0,
+ svzipq_f32_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svfloat32x4_t, z4,
+ svzipq_f32_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svfloat32x4_t, z18,
+ svzipq_f32_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svfloat32x4_t, z23,
+ svzipq_f32_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svfloat32x4_t, z28,
+ svzipq_f32_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f64_x2.c
new file mode 100644
index 0000000..3360f0a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svfloat64x2_t, z0,
+ svzipq_f64_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svfloat64x2_t, z0,
+ svzipq_f64_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svfloat64x2_t, z4,
+ svzipq_f64_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svfloat64x2_t, z18,
+ svzipq_f64_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svfloat64x2_t, z23,
+ svzipq_f64_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svfloat64x2_t, z28,
+ svzipq_f64_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svfloat64x2_t, z28,
+ svzipq_f64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svfloat64x2_t, z28,
+ svzipq_f64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f64_x4.c
new file mode 100644
index 0000000..a51b1ff
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_f64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svfloat64x4_t, z0,
+ svzipq_f64_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svfloat64x4_t, z0,
+ svzipq_f64_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svfloat64x4_t, z4,
+ svzipq_f64_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svfloat64x4_t, z18,
+ svzipq_f64_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svfloat64x4_t, z23,
+ svzipq_f64_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svfloat64x4_t, z28,
+ svzipq_f64_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s16_x2.c
new file mode 100644
index 0000000..130c094
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svint16x2_t, z0,
+ svzipq_s16_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svint16x2_t, z0,
+ svzipq_s16_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svint16x2_t, z4,
+ svzipq_s16_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svint16x2_t, z18,
+ svzipq_s16_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svint16x2_t, z23,
+ svzipq_s16_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svint16x2_t, z28,
+ svzipq_s16_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svint16x2_t, z28,
+ svzipq_s16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svint16x2_t, z28,
+ svzipq_s16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s16_x4.c
new file mode 100644
index 0000000..f2784f3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svint16x4_t, z0,
+ svzipq_s16_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svint16x4_t, z0,
+ svzipq_s16_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svint16x4_t, z4,
+ svzipq_s16_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svint16x4_t, z18,
+ svzipq_s16_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svint16x4_t, z23,
+ svzipq_s16_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svint16x4_t, z28,
+ svzipq_s16_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s32_x2.c
new file mode 100644
index 0000000..cb353cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svint32x2_t, z0,
+ svzipq_s32_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svint32x2_t, z0,
+ svzipq_s32_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svint32x2_t, z4,
+ svzipq_s32_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svint32x2_t, z18,
+ svzipq_s32_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svint32x2_t, z23,
+ svzipq_s32_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svint32x2_t, z28,
+ svzipq_s32_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svint32x2_t, z28,
+ svzipq_s32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svint32x2_t, z28,
+ svzipq_s32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s32_x4.c
new file mode 100644
index 0000000..109af86
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svint32x4_t, z0,
+ svzipq_s32_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svint32x4_t, z0,
+ svzipq_s32_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svint32x4_t, z4,
+ svzipq_s32_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svint32x4_t, z18,
+ svzipq_s32_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svint32x4_t, z23,
+ svzipq_s32_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svint32x4_t, z28,
+ svzipq_s32_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s64_x2.c
new file mode 100644
index 0000000..8791a66
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svint64x2_t, z0,
+ svzipq_s64_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svint64x2_t, z0,
+ svzipq_s64_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svint64x2_t, z4,
+ svzipq_s64_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svint64x2_t, z18,
+ svzipq_s64_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svint64x2_t, z23,
+ svzipq_s64_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svint64x2_t, z28,
+ svzipq_s64_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svint64x2_t, z28,
+ svzipq_s64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svint64x2_t, z28,
+ svzipq_s64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s64_x4.c
new file mode 100644
index 0000000..71bbd18
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svint64x4_t, z0,
+ svzipq_s64_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svint64x4_t, z0,
+ svzipq_s64_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svint64x4_t, z4,
+ svzipq_s64_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svint64x4_t, z18,
+ svzipq_s64_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svint64x4_t, z23,
+ svzipq_s64_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svint64x4_t, z28,
+ svzipq_s64_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s8_x2.c
new file mode 100644
index 0000000..83a1756
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s8_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svint8x2_t, z0,
+ svzipq_s8_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svint8x2_t, z0,
+ svzipq_s8_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svint8x2_t, z4,
+ svzipq_s8_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svint8x2_t, z18,
+ svzipq_s8_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svint8x2_t, z23,
+ svzipq_s8_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svint8x2_t, z28,
+ svzipq_s8_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svint8x2_t, z28,
+ svzipq_s8_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svint8x2_t, z28,
+ svzipq_s8_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s8_x4.c
new file mode 100644
index 0000000..05a5813
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_s8_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svint8x4_t, z0,
+ svzipq_s8_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svint8x4_t, z0,
+ svzipq_s8_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svint8x4_t, z4,
+ svzipq_s8_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svint8x4_t, z18,
+ svzipq_s8_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svint8x4_t, z23,
+ svzipq_s8_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svint8x4_t, z28,
+ svzipq_s8_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u16_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u16_x2.c
new file mode 100644
index 0000000..e45ba03
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u16_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svuint16x2_t, z0,
+ svzipq_u16_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svuint16x2_t, z0,
+ svzipq_u16_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svuint16x2_t, z4,
+ svzipq_u16_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svuint16x2_t, z18,
+ svzipq_u16_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svuint16x2_t, z23,
+ svzipq_u16_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svuint16x2_t, z28,
+ svzipq_u16_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svuint16x2_t, z28,
+ svzipq_u16_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svuint16x2_t, z28,
+ svzipq_u16_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u16_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u16_x4.c
new file mode 100644
index 0000000..f562a6d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u16_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svuint16x4_t, z0,
+ svzipq_u16_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svuint16x4_t, z0,
+ svzipq_u16_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svuint16x4_t, z4,
+ svzipq_u16_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svuint16x4_t, z18,
+ svzipq_u16_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svuint16x4_t, z23,
+ svzipq_u16_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svuint16x4_t, z28,
+ svzipq_u16_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u32_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u32_x2.c
new file mode 100644
index 0000000..893b956
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u32_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svuint32x2_t, z0,
+ svzipq_u32_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svuint32x2_t, z0,
+ svzipq_u32_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svuint32x2_t, z4,
+ svzipq_u32_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svuint32x2_t, z18,
+ svzipq_u32_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svuint32x2_t, z23,
+ svzipq_u32_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svuint32x2_t, z28,
+ svzipq_u32_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svuint32x2_t, z28,
+ svzipq_u32_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svuint32x2_t, z28,
+ svzipq_u32_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u32_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u32_x4.c
new file mode 100644
index 0000000..c7c52e1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u32_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svuint32x4_t, z0,
+ svzipq_u32_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svuint32x4_t, z0,
+ svzipq_u32_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svuint32x4_t, z4,
+ svzipq_u32_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svuint32x4_t, z18,
+ svzipq_u32_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svuint32x4_t, z23,
+ svzipq_u32_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svuint32x4_t, z28,
+ svzipq_u32_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u64_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u64_x2.c
new file mode 100644
index 0000000..4cbf69a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u64_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svuint64x2_t, z0,
+ svzipq_u64_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svuint64x2_t, z0,
+ svzipq_u64_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svuint64x2_t, z4,
+ svzipq_u64_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svuint64x2_t, z18,
+ svzipq_u64_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svuint64x2_t, z23,
+ svzipq_u64_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svuint64x2_t, z28,
+ svzipq_u64_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svuint64x2_t, z28,
+ svzipq_u64_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svuint64x2_t, z28,
+ svzipq_u64_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u64_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u64_x4.c
new file mode 100644
index 0000000..780e3a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u64_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svuint64x4_t, z0,
+ svzipq_u64_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svuint64x4_t, z0,
+ svzipq_u64_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svuint64x4_t, z4,
+ svzipq_u64_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svuint64x4_t, z18,
+ svzipq_u64_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svuint64x4_t, z23,
+ svzipq_u64_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svuint64x4_t, z28,
+ svzipq_u64_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u8_x2.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u8_x2.c
new file mode 100644
index 0000000..1e1ee8b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u8_x2.c
@@ -0,0 +1,77 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z1\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z0_z0, svuint8x2_t, z0,
+ svzipq_u8_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z1\.q}, z4\.q, z5\.q
+** ret
+*/
+TEST_XN (zipq_z0_z4, svuint8x2_t, z0,
+ svzipq_u8_x2 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** zip {z4\.q - z5\.q}, z18\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z4_z18, svuint8x2_t, z4,
+ svzipq_u8_x2 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** zip {z18\.q - z19\.q}, z23\.q, z24\.q
+** ret
+*/
+TEST_XN (zipq_z18_z23, svuint8x2_t, z18,
+ svzipq_u8_x2 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, z28\.q, z29\.q
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svuint8x2_t, z23,
+ svzipq_u8_x2 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z29\.q}, z0\.q, z1\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0, svuint8x2_t, z28,
+ svzipq_u8_x2 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z28_z0_z23:
+** zip {z28\.q - z29\.q}, z0\.q, z23\.q
+** ret
+*/
+TEST_XN (zipq_z28_z0_z23, svuint8x2_t, z28,
+ svzipq_u8_x2 (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))),
+ svzipq (svcreate2 (svget2 (z0, 0), svget2 (z23, 0))))
+
+/*
+** zipq_z28_z5_z19:
+** zip {z28\.q - z29\.q}, z5\.q, z19\.q
+** ret
+*/
+TEST_XN (zipq_z28_z5_z19, svuint8x2_t, z28,
+ svzipq_u8_x2 (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))),
+ svzipq (svcreate2 (svget2 (z4, 1), svget2 (z18, 1))))
diff --git a/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u8_x4.c b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u8_x4.c
new file mode 100644
index 0000000..20344d0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sme2/acle-asm/zipq_u8_x4.c
@@ -0,0 +1,73 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sme2_acle.h"
+
+/*
+** zipq_z0_z0:
+** zip {z0\.q - z3\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z0, svuint8x4_t, z0,
+ svzipq_u8_x4 (z0),
+ svzipq (z0))
+
+/*
+** zipq_z0_z4:
+** zip {z0\.q - z3\.q}, {z4\.q - z7\.q}
+** ret
+*/
+TEST_XN (zipq_z0_z4, svuint8x4_t, z0,
+ svzipq_u8_x4 (z4),
+ svzipq (z4))
+
+/*
+** zipq_z4_z18:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z4\.q - z7\.q}, [^\n]+
+** ret
+*/
+TEST_XN (zipq_z4_z18, svuint8x4_t, z4,
+ svzipq_u8_x4 (z18),
+ svzipq (z18))
+
+/*
+** zipq_z18_z23:
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** zip {z[^\n]+}, {z[^\n]+}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z18_z23, svuint8x4_t, z18,
+ svzipq_u8_x4 (z23),
+ svzipq (z23))
+
+/*
+** zipq_z23_z28:
+** zip [^\n]+, {z28\.q - z31\.q}
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** mov [^\n]+
+** ret
+*/
+TEST_XN (zipq_z23_z28, svuint8x4_t, z23,
+ svzipq_u8_x4 (z28),
+ svzipq (z28))
+
+/*
+** zipq_z28_z0:
+** zip {z28\.q - z31\.q}, {z0\.q - z3\.q}
+** ret
+*/
+TEST_XN (zipq_z28_z0, svuint8x4_t, z28,
+ svzipq_u8_x4 (z0),
+ svzipq (z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp b/gcc/testsuite/gcc.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp
index ba4704e..eee7c42 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp
@@ -50,6 +50,7 @@ if { [info exists gcc_runtest_parallelize_limit_minor] } {
torture-init
set-torture-options {
"-std=c90 -O0 -g"
+ "-std=c90 -O0 -DSTREAMING_COMPATIBLE"
"-std=c90 -O1 -g"
"-std=c99 -O2 -g"
"-std=c11 -O3 -g"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f16.c
index 642c45a..d381d88 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f32.c
index 79bdd3d..e0b9088 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f64.c
index c8f5677..fd730c8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adda_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrb.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrb.c
index a61eec9..5dcdc54 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrb.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrb.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrd.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrd.c
index 970485b..d9d16ce 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrd.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrd.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrh.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrh.c
index d06f51f..a358c24 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrh.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrh.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrw.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrw.c
index b23f25a..bd1e9af 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrw.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/adrw.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfmmla_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfmmla_f32.c
index b1d98fb..4bb2912 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfmmla_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfmmla_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-additional-options "-march=armv8.2-a+sve+bf16" } */
/* { dg-require-effective-target aarch64_asm_bf16_ok } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntb.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntb.c
index 8b8fe8e..a22d8a2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntb.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntb.c
@@ -51,19 +51,24 @@ PROTO (cntb_15, uint64_t, ()) { return svcntb () * 15; }
*/
PROTO (cntb_16, uint64_t, ()) { return svcntb () * 16; }
-/* Other sequences would be OK. */
/*
** cntb_17:
-** cntb x0, all, mul #16
-** incb x0
+** rdvl x0, #17
** ret
*/
PROTO (cntb_17, uint64_t, ()) { return svcntb () * 17; }
/*
+** cntb_31:
+** rdvl x0, #31
+** ret
+*/
+PROTO (cntb_31, uint64_t, ()) { return svcntb () * 31; }
+
+/*
** cntb_32:
-** cntd (x[0-9]+)
-** lsl x0, \1, 8
+** cntb (x[0-9]+)
+** lsl x0, \1, 5
** ret
*/
PROTO (cntb_32, uint64_t, ()) { return svcntb () * 32; }
@@ -80,16 +85,16 @@ PROTO (cntb_33, uint64_t, ()) { return svcntb () * 33; }
/*
** cntb_64:
-** cntd (x[0-9]+)
-** lsl x0, \1, 9
+** cntb (x[0-9]+)
+** lsl x0, \1, 6
** ret
*/
PROTO (cntb_64, uint64_t, ()) { return svcntb () * 64; }
/*
** cntb_128:
-** cntd (x[0-9]+)
-** lsl x0, \1, 10
+** cntb (x[0-9]+)
+** lsl x0, \1, 7
** ret
*/
PROTO (cntb_128, uint64_t, ()) { return svcntb () * 128; }
@@ -106,47 +111,71 @@ PROTO (cntb_129, uint64_t, ()) { return svcntb () * 129; }
/*
** cntb_m1:
-** cntb (x[0-9]+)
-** neg x0, \1
+** rdvl x0, #-1
** ret
*/
PROTO (cntb_m1, uint64_t, ()) { return -svcntb (); }
/*
** cntb_m13:
-** cntb (x[0-9]+), all, mul #13
-** neg x0, \1
+** rdvl x0, #-13
** ret
*/
PROTO (cntb_m13, uint64_t, ()) { return -svcntb () * 13; }
/*
** cntb_m15:
-** cntb (x[0-9]+), all, mul #15
-** neg x0, \1
+** rdvl x0, #-15
** ret
*/
PROTO (cntb_m15, uint64_t, ()) { return -svcntb () * 15; }
/*
** cntb_m16:
-** cntb (x[0-9]+), all, mul #16
-** neg x0, \1
+** rdvl x0, #-16
** ret
*/
PROTO (cntb_m16, uint64_t, ()) { return -svcntb () * 16; }
-/* Other sequences would be OK. */
/*
** cntb_m17:
-** cntb x0, all, mul #16
-** incb x0
-** neg x0, x0
+** rdvl x0, #-17
** ret
*/
PROTO (cntb_m17, uint64_t, ()) { return -svcntb () * 17; }
/*
+** cntb_m32:
+** rdvl x0, #-32
+** ret
+*/
+PROTO (cntb_m32, uint64_t, ()) { return -svcntb () * 32; }
+
+/*
+** cntb_m33:
+** rdvl x0, #-32
+** decb x0
+** ret
+*/
+PROTO (cntb_m33, uint64_t, ()) { return -svcntb () * 33; }
+
+/*
+** cntb_m34:
+** rdvl (x[0-9]+), #-17
+** lsl x0, \1, #?1
+** ret
+*/
+PROTO (cntb_m34, uint64_t, ()) { return -svcntb () * 34; }
+
+/*
+** cntb_m64:
+** rdvl (x[0-9]+), #-1
+** lsl x0, \1, #?6
+** ret
+*/
+PROTO (cntb_m64, uint64_t, ()) { return -svcntb () * 64; }
+
+/*
** incb_1:
** incb x0
** ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntd.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntd.c
index 0d0ed48..090a643 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntd.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntd.c
@@ -54,8 +54,8 @@ PROTO (cntd_16, uint64_t, ()) { return svcntd () * 16; }
/* Other sequences would be OK. */
/*
** cntd_17:
-** cntb x0, all, mul #2
-** incd x0
+** rdvl (x[0-9]+), #17
+** asr x0, \1, 3
** ret
*/
PROTO (cntd_17, uint64_t, ()) { return svcntd () * 17; }
@@ -107,8 +107,7 @@ PROTO (cntd_m15, uint64_t, ()) { return -svcntd () * 15; }
/*
** cntd_m16:
-** cntb (x[0-9]+), all, mul #2
-** neg x0, \1
+** rdvl x0, #-2
** ret
*/
PROTO (cntd_m16, uint64_t, ()) { return -svcntd () * 16; }
@@ -116,9 +115,8 @@ PROTO (cntd_m16, uint64_t, ()) { return -svcntd () * 16; }
/* Other sequences would be OK. */
/*
** cntd_m17:
-** cntb x0, all, mul #2
-** incd x0
-** neg x0, x0
+** rdvl (x[0-9]+), #-17
+** asr x0, \1, 3
** ret
*/
PROTO (cntd_m17, uint64_t, ()) { return -svcntd () * 17; }
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cnth.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cnth.c
index c29930f..1a4e7dc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cnth.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cnth.c
@@ -54,8 +54,8 @@ PROTO (cnth_16, uint64_t, ()) { return svcnth () * 16; }
/* Other sequences would be OK. */
/*
** cnth_17:
-** cntb x0, all, mul #8
-** inch x0
+** rdvl (x[0-9]+), #17
+** asr x0, \1, 1
** ret
*/
PROTO (cnth_17, uint64_t, ()) { return svcnth () * 17; }
@@ -69,16 +69,16 @@ PROTO (cnth_32, uint64_t, ()) { return svcnth () * 32; }
/*
** cnth_64:
-** cntd (x[0-9]+)
-** lsl x0, \1, 8
+** cntb (x[0-9]+)
+** lsl x0, \1, 5
** ret
*/
PROTO (cnth_64, uint64_t, ()) { return svcnth () * 64; }
/*
** cnth_128:
-** cntd (x[0-9]+)
-** lsl x0, \1, 9
+** cntb (x[0-9]+)
+** lsl x0, \1, 6
** ret
*/
PROTO (cnth_128, uint64_t, ()) { return svcnth () * 128; }
@@ -109,8 +109,7 @@ PROTO (cnth_m15, uint64_t, ()) { return -svcnth () * 15; }
/*
** cnth_m16:
-** cntb (x[0-9]+), all, mul #8
-** neg x0, \1
+** rdvl x0, #-8
** ret
*/
PROTO (cnth_m16, uint64_t, ()) { return -svcnth () * 16; }
@@ -118,9 +117,8 @@ PROTO (cnth_m16, uint64_t, ()) { return -svcnth () * 16; }
/* Other sequences would be OK. */
/*
** cnth_m17:
-** cntb x0, all, mul #8
-** inch x0
-** neg x0, x0
+** rdvl (x[0-9]+), #-17
+** asr x0, \1, 1
** ret
*/
PROTO (cnth_m17, uint64_t, ()) { return -svcnth () * 17; }
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntw.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntw.c
index e26cc67..9d16976 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntw.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/cntw.c
@@ -54,8 +54,8 @@ PROTO (cntw_16, uint64_t, ()) { return svcntw () * 16; }
/* Other sequences would be OK. */
/*
** cntw_17:
-** cntb x0, all, mul #4
-** incw x0
+** rdvl (x[0-9]+), #17
+** asr x0, \1, 2
** ret
*/
PROTO (cntw_17, uint64_t, ()) { return svcntw () * 17; }
@@ -76,8 +76,8 @@ PROTO (cntw_64, uint64_t, ()) { return svcntw () * 64; }
/*
** cntw_128:
-** cntd (x[0-9]+)
-** lsl x0, \1, 8
+** cntb (x[0-9]+)
+** lsl x0, \1, 5
** ret
*/
PROTO (cntw_128, uint64_t, ()) { return svcntw () * 128; }
@@ -108,8 +108,7 @@ PROTO (cntw_m15, uint64_t, ()) { return -svcntw () * 15; }
/*
** cntw_m16:
-** cntb (x[0-9]+), all, mul #4
-** neg x0, \1
+** rdvl (x[0-9]+), #-4
** ret
*/
PROTO (cntw_m16, uint64_t, ()) { return -svcntw () * 16; }
@@ -117,9 +116,8 @@ PROTO (cntw_m16, uint64_t, ()) { return -svcntw () * 16; }
/* Other sequences would be OK. */
/*
** cntw_m17:
-** cntb x0, all, mul #4
-** incw x0
-** neg x0, x0
+** rdvl (x[0-9]+), #-17
+** asr x0, \1, 2
** ret
*/
PROTO (cntw_m17, uint64_t, ()) { return -svcntw () * 17; }
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_f32.c
index 2e80d68..d261ec0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_f64.c
index e0bc33e..024b051 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_s32.c
index e463498..0b32dfb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_s64.c
index 71cb97b..38688db 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_u32.c
index 954329a..a3e89cc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_u64.c
index ec66484..602ab04 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/compact_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/create2_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/create2_1.c
index e9158ed..3b9245e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/create2_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/create2_1.c
@@ -121,3 +121,21 @@ TEST_CREATE (create2_u64, svuint64x2_t, svuint64_t,
TEST_CREATE (create2_f64, svfloat64x2_t, svfloat64_t,
z0 = svcreate2_f64 (z5, z4),
z0 = svcreate2 (z5, z4))
+
+/*
+** create2_b_0:
+** ret
+*/
+TEST_CREATE_B (create2_b_0, svboolx2_t,
+ p0_res = svcreate2_b (p0, p1),
+ p0_res = svcreate2 (p0, p1))
+
+/*
+** create2_b_1:
+** mov p0\.b, p2\.b
+** mov p1\.b, p3\.b
+** ret
+*/
+TEST_CREATE_B (create2_b_1, svboolx2_t,
+ p0_res = svcreate2_b (p2, p3),
+ p0_res = svcreate2 (p2, p3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f16.c
index 5a5411e..87c26e6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f32.c
index 4ded1c5..5e98395 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f64.c
index c31f9cc..b117df2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/expa_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/get2_b.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/get2_b.c
new file mode 100644
index 0000000..f54feea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/get2_b.c
@@ -0,0 +1,55 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** get2_b_p0_0:
+** mov p0\.b, p4\.b
+** ret
+*/
+TEST_GET_B (get2_b_p0_0, svboolx2_t,
+ p0 = svget2_b (p4, 0),
+ p0 = svget2 (p4, 0))
+
+/*
+** get2_b_p0_1:
+** mov p0\.b, p5\.b
+** ret
+*/
+TEST_GET_B (get2_b_p0_1, svboolx2_t,
+ p0 = svget2_b (p4, 1),
+ p0 = svget2 (p4, 1))
+
+/*
+** get2_b_p4_0:
+** ret
+*/
+TEST_GET_B (get2_b_p4_0, svboolx2_t,
+ p4_res = svget2_b (p4, 0),
+ p4_res = svget2 (p4, 0))
+
+/*
+** get2_b_p4_1:
+** mov p4\.b, p5\.b
+** ret
+*/
+TEST_GET_B (get2_b_p4_1, svboolx2_t,
+ p4_res = svget2_b (p4, 1),
+ p4_res = svget2 (p4, 1))
+
+/*
+** get2_b_p5_0:
+** mov p5\.b, p4\.b
+** ret
+*/
+TEST_GET_B (get2_b_p5_0, svboolx2_t,
+ p5_res = svget2_b (p4, 0),
+ p5_res = svget2 (p4, 0))
+
+/*
+** get2_b_p5_1:
+** ret
+*/
+TEST_GET_B (get2_b_p5_1, svboolx2_t,
+ p5_res = svget2_b (p4, 1),
+ p5_res = svget2 (p4, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_f32.c
index 00b68ff..8b972f6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_f64.c
index 4712796..413d4d6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_s32.c
index 9b63355..b3df7d1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_s64.c
index c9cea3a..0da1e52 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_u32.c
index 2cccc8d..a3304c4 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_u64.c
index 6ee1d48..73ef948 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_bf16.c
index cb18017..fe909b6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_bf16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_bf16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f16.c
index 86081ed..30ba306 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f32.c
index c8df00f..cf62fad 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f64.c
index 2fb9d5b..b9fde4d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s16.c
index 3cd211b..35b7dd1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s32.c
index 44b16ed..57b6a65 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s64.c
index 3aa9a15..bd7e284 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s8.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s8.c
index 49aff51..1438000 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_s8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u16.c
index 00bf9e12..145b0b7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u32.c
index 9e9b329..9f15063 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u64.c
index 64ec628..8dd75d1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u8.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u8.c
index 2270132..f154545 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ro_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
/* { dg-additional-options "-march=armv8.6-a+f64mm" } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s32.c
index 16a5316..06249ad 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s64.c
index 3f95324..8d141e1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u32.c
index 424de65..77836cb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u64.c
index aa375be..f4b24ab 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sb_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s32.c
index ed07b4d..1b97823 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s64.c
index 20ca427..2009dec 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u32.c
index e3a85a2..0e1d489 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u64.c
index 3a0094f..115d7d3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sh_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sw_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sw_gather_s64.c
index 4d076b4..5dc4442 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sw_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sw_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sw_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sw_gather_u64.c
index ffa85eb..fac4ec4 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sw_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1sw_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s32.c
index a9c4182..f57df422 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s64.c
index 99af86d..0c069fa 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u32.c
index 77c7e0a..98102e0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u64.c
index b605f8b..f86a34d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1ub_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s32.c
index 84fb5c3..1393718 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s64.c
index 4470017..f0338aa 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u32.c
index 09d3cc8..5810bc0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u64.c
index f3dcf03..52e95ab 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uh_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uw_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uw_gather_s64.c
index f4e9d5d..0889eef 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uw_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uw_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uw_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uw_gather_u64.c
index 854d192..fb144d7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uw_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1uw_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_bf16.c
index 80f6468..1f99748 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_bf16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_bf16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f16.c
index 13ce863..60405d0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f32.c
index 2fcc633..225e996 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f64.c
index cc15b92..366e36a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_f32.c
index 7e330c0..b84b9bc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_f64.c
index d0e47f0..e779b07 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_s32.c
index 66bf0f7..17e0f9a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_s64.c
index faf71bf..030f187 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_u32.c
index 41c7dc9..fb86530 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_u64.c
index 8b53ce9..5be30a2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s16.c
index 1d5fde0..61d242c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s32.c
index 97a36e8..afe748e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s64.c
index c018a4c..bee2228 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s8.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s8.c
index cf620d1..ccaac2c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_s8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u16.c
index 1fa8192..c8416f9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u32.c
index 5224ec4..ec26a82 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u64.c
index 18e87f2..e211f17 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u8.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u8.c
index 83883fc..24dfe45 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s32.c
index c2a6768..f7e3977 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s64.c
index 2f2a04d..7f2a829 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u32.c
index e3e83a2..685f628 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u64.c
index 769f2c2..49a7a85 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s16.c
index e0a748c..1d30c7b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s32.c
index 86716da..c2b3f42 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s64.c
index e7a4aa6..585a624 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u16.c
index 69ba96d..ebb2f0f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u32.c
index e1a1873..f4ea96c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u64.c
index 0a49cbc..e373523 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sb_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s32.c
index b633335d..67e7036 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s64.c
index 32a4309..5755c79 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u32.c
index 73a9be8..a584899 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u64.c
index 94ea73b..b187512 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_s32.c
index 81b64e8..bffac93 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_s64.c
index 453b3ff..a4acb1e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_u32.c
index bbbed79..828288c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_u64.c
index 5430e25..e3432c4 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sh_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_s64.c
index e5da8a8..78aa34e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_u64.c
index 4114287..9dad121 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_s64.c
index d795ace..33b6c10 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_u64.c
index 6caf2f5..e8c9c84 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1sw_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s32.c
index af0be08..b1c9c81 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s64.c
index 43124dd..9ab776a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u32.c
index 90c4e58..745740df 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u64.c
index 302623a..3a7bd6a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s16.c
index 88ad2d1..ade0704 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s32.c
index e8e0641..5d3e0ce 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s64.c
index 21d02dd..08ae802 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u16.c
index 904cb02..d8dc5e1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u32.c
index a400123..042ae5a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u64.c
index a9a98a6..d0844fa 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1ub_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s32.c
index d02e443..1246010 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s64.c
index 663a73d..5363313 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u32.c
index 5e0ef06..602e6a6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u64.c
index 1cfae1b..4b307b3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_s32.c
index abb3d76..db205b1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_s64.c
index 6e330e8..0eac877 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_u32.c
index 4eb5323..266ecf1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_u64.c
index ebac26e..bdd725e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uh_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_s64.c
index 6c0daea..ab2c79d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_u64.c
index 0e400c6..361d7de 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_s64.c
index ac97798..8adcec3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_u64.c
index c7ab061..781fc1a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldff1uw_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_bf16.c
index 947a896..93b4425 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_bf16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_bf16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f16.c
index cf01786..d47d748 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f32.c
index 83b73ec..e390d68 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f64.c
index 778096e..97a0e39 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s16.c
index 592c823..21008d7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s32.c
index 634092a..8a3d795 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s64.c
index 4a03f66..c0b57a2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s8.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s8.c
index 162ee17..6714152 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_s8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u16.c
index e920ac4..3df404d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u32.c
index 65e28c5..e899a4a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u64.c
index 70d3f27..ab69656 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u8.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u8.c
index 5c29f1d..5d7b074 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s16.c
index e04b9a7..5b53c88 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s32.c
index 0553fc9..992eba7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s64.c
index 61a474f..99e0f8b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u16.c
index be63d8b..fe23913 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u32.c
index 4f52490..6deb397 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u64.c
index 73f50d1..e76457d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sb_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_s32.c
index 08c7dc6..e49a7f8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_s64.c
index 6a41bc2..00b4028 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_u32.c
index 2f77187..41560af 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_u64.c
index d7f1a68..0acf4b3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sh_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sw_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sw_s64.c
index 5b483e4..5782128 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sw_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sw_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sw_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sw_u64.c
index 62121ce..8249c4c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sw_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1sw_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s16.c
index 8fe1341..e59c451 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s32.c
index 50122e3..d788576 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s64.c
index d7cce11..b21fdb9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u16.c
index 7bf82c3..1ae41b0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u32.c
index e2fef06..e3d8fb3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u64.c
index 57c61e1..df9a0c0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1ub_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_s32.c
index ed9686c..c3467d8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_s64.c
index a3107f5..bf3355e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_u32.c
index 93d5aba..bcc3eb3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_u64.c
index 32d36a8..4c01c13 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uh_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uw_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uw_s64.c
index 3739227..3c65565 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uw_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uw_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uw_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uw_u64.c
index b3c3be1..b222a0d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uw_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ldnf1uw_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_f32.c
index f66dbf3..e1c7f47 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-require-effective-target aarch64_asm_f32mm_ok } */
/* { dg-additional-options "-march=armv8.2-a+f32mm" } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_f64.c
index 49dc060..c45caa7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-require-effective-target aarch64_asm_f64mm_ok } */
/* { dg-additional-options "-march=armv8.2-a+f64mm" } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_s32.c
index e7ce009..dc15546 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-require-effective-target aarch64_asm_i8mm_ok } */
/* { dg-additional-options "-march=armv8.2-a+sve+i8mm" } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_u32.c
index 81f5166..43d601a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/mmla_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-require-effective-target aarch64_asm_i8mm_ok } */
/* { dg-additional-options "-march=armv8.2-a+sve+i8mm" } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfb.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfb.c
index c90730a..94cd3a0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfb.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfb.c
@@ -218,8 +218,8 @@ TEST_PREFETCH (prfb_vnum_31, uint16_t,
/*
** prfb_vnum_32:
-** cntd (x[0-9]+)
-** lsl (x[0-9]+), \1, #?8
+** cntb (x[0-9]+)
+** lsl (x[0-9]+), \1, #?5
** add (x[0-9]+), (\2, x0|x0, \2)
** prfb pldl1keep, p0, \[\3\]
** ret
@@ -240,7 +240,7 @@ TEST_PREFETCH (prfb_vnum_m32, uint16_t,
/*
** prfb_vnum_m33:
** ...
-** prfb pldl1keep, p0, \[x[0-9]+\]
+** prfb pldl1keep, p0, \[x[0-9]+(, x[0-9]+)?\]
** ret
*/
TEST_PREFETCH (prfb_vnum_m33, uint16_t,
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfb_gather.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfb_gather.c
index c4bfbbb..f32cfbf 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfb_gather.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfb_gather.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfd.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfd.c
index 869ef3d..b7a116c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfd.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfd.c
@@ -218,8 +218,8 @@ TEST_PREFETCH (prfd_vnum_31, uint16_t,
/*
** prfd_vnum_32:
-** cntd (x[0-9]+)
-** lsl (x[0-9]+), \1, #?8
+** cntb (x[0-9]+)
+** lsl (x[0-9]+), \1, #?5
** add (x[0-9]+), (\2, x0|x0, \2)
** prfd pldl1keep, p0, \[\3\]
** ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfd_gather.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfd_gather.c
index a84acb1..8a4293b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfd_gather.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfd_gather.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfh.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfh.c
index 45a735e..9d3df6b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfh.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfh.c
@@ -218,8 +218,8 @@ TEST_PREFETCH (prfh_vnum_31, uint16_t,
/*
** prfh_vnum_32:
-** cntd (x[0-9]+)
-** lsl (x[0-9]+), \1, #?8
+** cntb (x[0-9]+)
+** lsl (x[0-9]+), \1, #?5
** add (x[0-9]+), (\2, x0|x0, \2)
** prfh pldl1keep, p0, \[\3\]
** ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfh_gather.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfh_gather.c
index 04b7a15..6beca4b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfh_gather.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfh_gather.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfw.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfw.c
index 444187f..6962aba 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfw.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfw.c
@@ -218,8 +218,8 @@ TEST_PREFETCH (prfw_vnum_31, uint16_t,
/*
** prfw_vnum_32:
-** cntd (x[0-9]+)
-** lsl (x[0-9]+), \1, #?8
+** cntb (x[0-9]+)
+** lsl (x[0-9]+), \1, #?5
** add (x[0-9]+), (\2, x0|x0, \2)
** prfw pldl1keep, p0, \[\3\]
** ret
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfw_gather.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfw_gather.c
index 2bbae1b..6af44ac 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfw_gather.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/prfw_gather.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/rdffr_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/rdffr_1.c
index 5564e96..7e28ef6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/rdffr_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/rdffr_1.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_b.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_b.c
new file mode 100644
index 0000000..57736ec
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_b.c
@@ -0,0 +1,20 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** reinterpret_b_c_tied1:
+** ret
+*/
+TEST_DUAL_P_REV (reinterpret_b_c_tied1, svbool_t, svcount_t,
+ p0_res = svreinterpret_b_c (p0),
+ p0_res = svreinterpret_b (p0))
+
+/*
+** reinterpret_b_c_untied:
+** mov p0\.b, p2\.b
+** ret
+*/
+TEST_DUAL_P (reinterpret_b_c_untied, svbool_t, svcount_t,
+ p0 = svreinterpret_b_c (p2),
+ p0 = svreinterpret_b (p2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_bf16.c
index 2d2c2a7..dd0daf2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_bf16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_bf16.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_bf16_u64_tied1, svbfloat16_t, svuint64_t,
TEST_DUAL_Z (reinterpret_bf16_u64_untied, svbfloat16_t, svuint64_t,
z0 = svreinterpret_bf16_u64 (z4),
z0 = svreinterpret_bf16 (z4))
+
+/*
+** reinterpret_bf16_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_bf16_bf16_x2_tied1, svbfloat16x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_bf16_bf16_x2 (z0),
+ z0_res = svreinterpret_bf16 (z0))
+
+/*
+** reinterpret_bf16_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_bf16_f32_x2_untied, svbfloat16x2_t, svfloat32x2_t, z0,
+ svreinterpret_bf16_f32_x2 (z4),
+ svreinterpret_bf16 (z4))
+
+/*
+** reinterpret_bf16_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_bf16_s64_x3_tied1, svbfloat16x3_t, svint64x3_t,
+ z0_res = svreinterpret_bf16_s64_x3 (z0),
+ z0_res = svreinterpret_bf16 (z0))
+
+/*
+** reinterpret_bf16_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_bf16_u8_x3_untied, svbfloat16x3_t, svuint8x3_t, z18,
+ svreinterpret_bf16_u8_x3 (z23),
+ svreinterpret_bf16 (z23))
+
+/*
+** reinterpret_bf16_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_bf16_u32_x4_tied1, svbfloat16x4_t, svuint32x4_t,
+ z0_res = svreinterpret_bf16_u32_x4 (z0),
+ z0_res = svreinterpret_bf16 (z0))
+
+/*
+** reinterpret_bf16_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_bf16_f64_x4_untied, svbfloat16x4_t, svfloat64x4_t, z28,
+ svreinterpret_bf16_f64_x4 (z4),
+ svreinterpret_bf16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f16.c
index 60705e6..9b6f822 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f16.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_f16_u64_tied1, svfloat16_t, svuint64_t,
TEST_DUAL_Z (reinterpret_f16_u64_untied, svfloat16_t, svuint64_t,
z0 = svreinterpret_f16_u64 (z4),
z0 = svreinterpret_f16 (z4))
+
+/*
+** reinterpret_f16_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f16_bf16_x2_tied1, svfloat16x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_f16_bf16_x2 (z0),
+ z0_res = svreinterpret_f16 (z0))
+
+/*
+** reinterpret_f16_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f16_f32_x2_untied, svfloat16x2_t, svfloat32x2_t, z0,
+ svreinterpret_f16_f32_x2 (z4),
+ svreinterpret_f16 (z4))
+
+/*
+** reinterpret_f16_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f16_s64_x3_tied1, svfloat16x3_t, svint64x3_t,
+ z0_res = svreinterpret_f16_s64_x3 (z0),
+ z0_res = svreinterpret_f16 (z0))
+
+/*
+** reinterpret_f16_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f16_u8_x3_untied, svfloat16x3_t, svuint8x3_t, z18,
+ svreinterpret_f16_u8_x3 (z23),
+ svreinterpret_f16 (z23))
+
+/*
+** reinterpret_f16_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f16_u32_x4_tied1, svfloat16x4_t, svuint32x4_t,
+ z0_res = svreinterpret_f16_u32_x4 (z0),
+ z0_res = svreinterpret_f16 (z0))
+
+/*
+** reinterpret_f16_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f16_f64_x4_untied, svfloat16x4_t, svfloat64x4_t, z28,
+ svreinterpret_f16_f64_x4 (z4),
+ svreinterpret_f16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f32.c
index 06fc46f..ce981fc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f32.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_f32_u64_tied1, svfloat32_t, svuint64_t,
TEST_DUAL_Z (reinterpret_f32_u64_untied, svfloat32_t, svuint64_t,
z0 = svreinterpret_f32_u64 (z4),
z0 = svreinterpret_f32 (z4))
+
+/*
+** reinterpret_f32_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f32_bf16_x2_tied1, svfloat32x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_f32_bf16_x2 (z0),
+ z0_res = svreinterpret_f32 (z0))
+
+/*
+** reinterpret_f32_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f32_f32_x2_untied, svfloat32x2_t, svfloat32x2_t, z0,
+ svreinterpret_f32_f32_x2 (z4),
+ svreinterpret_f32 (z4))
+
+/*
+** reinterpret_f32_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f32_s64_x3_tied1, svfloat32x3_t, svint64x3_t,
+ z0_res = svreinterpret_f32_s64_x3 (z0),
+ z0_res = svreinterpret_f32 (z0))
+
+/*
+** reinterpret_f32_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f32_u8_x3_untied, svfloat32x3_t, svuint8x3_t, z18,
+ svreinterpret_f32_u8_x3 (z23),
+ svreinterpret_f32 (z23))
+
+/*
+** reinterpret_f32_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f32_u32_x4_tied1, svfloat32x4_t, svuint32x4_t,
+ z0_res = svreinterpret_f32_u32_x4 (z0),
+ z0_res = svreinterpret_f32 (z0))
+
+/*
+** reinterpret_f32_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f32_f64_x4_untied, svfloat32x4_t, svfloat64x4_t, z28,
+ svreinterpret_f32_f64_x4 (z4),
+ svreinterpret_f32 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f64.c
index 003ee3f..4f51824 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_f64.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_f64_u64_tied1, svfloat64_t, svuint64_t,
TEST_DUAL_Z (reinterpret_f64_u64_untied, svfloat64_t, svuint64_t,
z0 = svreinterpret_f64_u64 (z4),
z0 = svreinterpret_f64 (z4))
+
+/*
+** reinterpret_f64_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f64_bf16_x2_tied1, svfloat64x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_f64_bf16_x2 (z0),
+ z0_res = svreinterpret_f64 (z0))
+
+/*
+** reinterpret_f64_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f64_f32_x2_untied, svfloat64x2_t, svfloat32x2_t, z0,
+ svreinterpret_f64_f32_x2 (z4),
+ svreinterpret_f64 (z4))
+
+/*
+** reinterpret_f64_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f64_s64_x3_tied1, svfloat64x3_t, svint64x3_t,
+ z0_res = svreinterpret_f64_s64_x3 (z0),
+ z0_res = svreinterpret_f64 (z0))
+
+/*
+** reinterpret_f64_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f64_u8_x3_untied, svfloat64x3_t, svuint8x3_t, z18,
+ svreinterpret_f64_u8_x3 (z23),
+ svreinterpret_f64 (z23))
+
+/*
+** reinterpret_f64_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_f64_u32_x4_tied1, svfloat64x4_t, svuint32x4_t,
+ z0_res = svreinterpret_f64_u32_x4 (z0),
+ z0_res = svreinterpret_f64 (z0))
+
+/*
+** reinterpret_f64_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_f64_f64_x4_untied, svfloat64x4_t, svfloat64x4_t, z28,
+ svreinterpret_f64_f64_x4 (z4),
+ svreinterpret_f64 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s16.c
index d62817c..7e15f3e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s16.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_s16_u64_tied1, svint16_t, svuint64_t,
TEST_DUAL_Z (reinterpret_s16_u64_untied, svint16_t, svuint64_t,
z0 = svreinterpret_s16_u64 (z4),
z0 = svreinterpret_s16 (z4))
+
+/*
+** reinterpret_s16_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s16_bf16_x2_tied1, svint16x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_s16_bf16_x2 (z0),
+ z0_res = svreinterpret_s16 (z0))
+
+/*
+** reinterpret_s16_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s16_f32_x2_untied, svint16x2_t, svfloat32x2_t, z0,
+ svreinterpret_s16_f32_x2 (z4),
+ svreinterpret_s16 (z4))
+
+/*
+** reinterpret_s16_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s16_s64_x3_tied1, svint16x3_t, svint64x3_t,
+ z0_res = svreinterpret_s16_s64_x3 (z0),
+ z0_res = svreinterpret_s16 (z0))
+
+/*
+** reinterpret_s16_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s16_u8_x3_untied, svint16x3_t, svuint8x3_t, z18,
+ svreinterpret_s16_u8_x3 (z23),
+ svreinterpret_s16 (z23))
+
+/*
+** reinterpret_s16_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s16_u32_x4_tied1, svint16x4_t, svuint32x4_t,
+ z0_res = svreinterpret_s16_u32_x4 (z0),
+ z0_res = svreinterpret_s16 (z0))
+
+/*
+** reinterpret_s16_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s16_f64_x4_untied, svint16x4_t, svfloat64x4_t, z28,
+ svreinterpret_s16_f64_x4 (z4),
+ svreinterpret_s16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s32.c
index e1068f2..60da8ae 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s32.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_s32_u64_tied1, svint32_t, svuint64_t,
TEST_DUAL_Z (reinterpret_s32_u64_untied, svint32_t, svuint64_t,
z0 = svreinterpret_s32_u64 (z4),
z0 = svreinterpret_s32 (z4))
+
+/*
+** reinterpret_s32_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s32_bf16_x2_tied1, svint32x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_s32_bf16_x2 (z0),
+ z0_res = svreinterpret_s32 (z0))
+
+/*
+** reinterpret_s32_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s32_f32_x2_untied, svint32x2_t, svfloat32x2_t, z0,
+ svreinterpret_s32_f32_x2 (z4),
+ svreinterpret_s32 (z4))
+
+/*
+** reinterpret_s32_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s32_s64_x3_tied1, svint32x3_t, svint64x3_t,
+ z0_res = svreinterpret_s32_s64_x3 (z0),
+ z0_res = svreinterpret_s32 (z0))
+
+/*
+** reinterpret_s32_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s32_u8_x3_untied, svint32x3_t, svuint8x3_t, z18,
+ svreinterpret_s32_u8_x3 (z23),
+ svreinterpret_s32 (z23))
+
+/*
+** reinterpret_s32_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s32_u32_x4_tied1, svint32x4_t, svuint32x4_t,
+ z0_res = svreinterpret_s32_u32_x4 (z0),
+ z0_res = svreinterpret_s32 (z0))
+
+/*
+** reinterpret_s32_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s32_f64_x4_untied, svint32x4_t, svfloat64x4_t, z28,
+ svreinterpret_s32_f64_x4 (z4),
+ svreinterpret_s32 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s64.c
index cada753..d705c60 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s64.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_s64_u64_tied1, svint64_t, svuint64_t,
TEST_DUAL_Z (reinterpret_s64_u64_untied, svint64_t, svuint64_t,
z0 = svreinterpret_s64_u64 (z4),
z0 = svreinterpret_s64 (z4))
+
+/*
+** reinterpret_s64_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s64_bf16_x2_tied1, svint64x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_s64_bf16_x2 (z0),
+ z0_res = svreinterpret_s64 (z0))
+
+/*
+** reinterpret_s64_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s64_f32_x2_untied, svint64x2_t, svfloat32x2_t, z0,
+ svreinterpret_s64_f32_x2 (z4),
+ svreinterpret_s64 (z4))
+
+/*
+** reinterpret_s64_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s64_s64_x3_tied1, svint64x3_t, svint64x3_t,
+ z0_res = svreinterpret_s64_s64_x3 (z0),
+ z0_res = svreinterpret_s64 (z0))
+
+/*
+** reinterpret_s64_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s64_u8_x3_untied, svint64x3_t, svuint8x3_t, z18,
+ svreinterpret_s64_u8_x3 (z23),
+ svreinterpret_s64 (z23))
+
+/*
+** reinterpret_s64_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s64_u32_x4_tied1, svint64x4_t, svuint32x4_t,
+ z0_res = svreinterpret_s64_u32_x4 (z0),
+ z0_res = svreinterpret_s64 (z0))
+
+/*
+** reinterpret_s64_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s64_f64_x4_untied, svint64x4_t, svfloat64x4_t, z28,
+ svreinterpret_s64_f64_x4 (z4),
+ svreinterpret_s64 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s8.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s8.c
index 23a40d0..ab90a54 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_s8.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_s8_u64_tied1, svint8_t, svuint64_t,
TEST_DUAL_Z (reinterpret_s8_u64_untied, svint8_t, svuint64_t,
z0 = svreinterpret_s8_u64 (z4),
z0 = svreinterpret_s8 (z4))
+
+/*
+** reinterpret_s8_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s8_bf16_x2_tied1, svint8x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_s8_bf16_x2 (z0),
+ z0_res = svreinterpret_s8 (z0))
+
+/*
+** reinterpret_s8_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s8_f32_x2_untied, svint8x2_t, svfloat32x2_t, z0,
+ svreinterpret_s8_f32_x2 (z4),
+ svreinterpret_s8 (z4))
+
+/*
+** reinterpret_s8_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s8_s64_x3_tied1, svint8x3_t, svint64x3_t,
+ z0_res = svreinterpret_s8_s64_x3 (z0),
+ z0_res = svreinterpret_s8 (z0))
+
+/*
+** reinterpret_s8_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s8_u8_x3_untied, svint8x3_t, svuint8x3_t, z18,
+ svreinterpret_s8_u8_x3 (z23),
+ svreinterpret_s8 (z23))
+
+/*
+** reinterpret_s8_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_s8_u32_x4_tied1, svint8x4_t, svuint32x4_t,
+ z0_res = svreinterpret_s8_u32_x4 (z0),
+ z0_res = svreinterpret_s8 (z0))
+
+/*
+** reinterpret_s8_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_s8_f64_x4_untied, svint8x4_t, svfloat64x4_t, z28,
+ svreinterpret_s8_f64_x4 (z4),
+ svreinterpret_s8 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u16.c
index 48e8eca..fcfc0eb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u16.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_u16_u64_tied1, svuint16_t, svuint64_t,
TEST_DUAL_Z (reinterpret_u16_u64_untied, svuint16_t, svuint64_t,
z0 = svreinterpret_u16_u64 (z4),
z0 = svreinterpret_u16 (z4))
+
+/*
+** reinterpret_u16_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u16_bf16_x2_tied1, svuint16x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_u16_bf16_x2 (z0),
+ z0_res = svreinterpret_u16 (z0))
+
+/*
+** reinterpret_u16_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u16_f32_x2_untied, svuint16x2_t, svfloat32x2_t, z0,
+ svreinterpret_u16_f32_x2 (z4),
+ svreinterpret_u16 (z4))
+
+/*
+** reinterpret_u16_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u16_s64_x3_tied1, svuint16x3_t, svint64x3_t,
+ z0_res = svreinterpret_u16_s64_x3 (z0),
+ z0_res = svreinterpret_u16 (z0))
+
+/*
+** reinterpret_u16_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u16_u8_x3_untied, svuint16x3_t, svuint8x3_t, z18,
+ svreinterpret_u16_u8_x3 (z23),
+ svreinterpret_u16 (z23))
+
+/*
+** reinterpret_u16_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u16_u32_x4_tied1, svuint16x4_t, svuint32x4_t,
+ z0_res = svreinterpret_u16_u32_x4 (z0),
+ z0_res = svreinterpret_u16 (z0))
+
+/*
+** reinterpret_u16_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u16_f64_x4_untied, svuint16x4_t, svfloat64x4_t, z28,
+ svreinterpret_u16_f64_x4 (z4),
+ svreinterpret_u16 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u32.c
index 1d4e857..6d7e058 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u32.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_u32_u64_tied1, svuint32_t, svuint64_t,
TEST_DUAL_Z (reinterpret_u32_u64_untied, svuint32_t, svuint64_t,
z0 = svreinterpret_u32_u64 (z4),
z0 = svreinterpret_u32 (z4))
+
+/*
+** reinterpret_u32_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u32_bf16_x2_tied1, svuint32x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_u32_bf16_x2 (z0),
+ z0_res = svreinterpret_u32 (z0))
+
+/*
+** reinterpret_u32_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u32_f32_x2_untied, svuint32x2_t, svfloat32x2_t, z0,
+ svreinterpret_u32_f32_x2 (z4),
+ svreinterpret_u32 (z4))
+
+/*
+** reinterpret_u32_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u32_s64_x3_tied1, svuint32x3_t, svint64x3_t,
+ z0_res = svreinterpret_u32_s64_x3 (z0),
+ z0_res = svreinterpret_u32 (z0))
+
+/*
+** reinterpret_u32_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u32_u8_x3_untied, svuint32x3_t, svuint8x3_t, z18,
+ svreinterpret_u32_u8_x3 (z23),
+ svreinterpret_u32 (z23))
+
+/*
+** reinterpret_u32_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u32_u32_x4_tied1, svuint32x4_t, svuint32x4_t,
+ z0_res = svreinterpret_u32_u32_x4 (z0),
+ z0_res = svreinterpret_u32 (z0))
+
+/*
+** reinterpret_u32_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u32_f64_x4_untied, svuint32x4_t, svfloat64x4_t, z28,
+ svreinterpret_u32_f64_x4 (z4),
+ svreinterpret_u32 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u64.c
index 07af69d..55c0bae 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u64.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_u64_u64_tied1, svuint64_t, svuint64_t,
TEST_DUAL_Z (reinterpret_u64_u64_untied, svuint64_t, svuint64_t,
z0 = svreinterpret_u64_u64 (z4),
z0 = svreinterpret_u64 (z4))
+
+/*
+** reinterpret_u64_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u64_bf16_x2_tied1, svuint64x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_u64_bf16_x2 (z0),
+ z0_res = svreinterpret_u64 (z0))
+
+/*
+** reinterpret_u64_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u64_f32_x2_untied, svuint64x2_t, svfloat32x2_t, z0,
+ svreinterpret_u64_f32_x2 (z4),
+ svreinterpret_u64 (z4))
+
+/*
+** reinterpret_u64_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u64_s64_x3_tied1, svuint64x3_t, svint64x3_t,
+ z0_res = svreinterpret_u64_s64_x3 (z0),
+ z0_res = svreinterpret_u64 (z0))
+
+/*
+** reinterpret_u64_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u64_u8_x3_untied, svuint64x3_t, svuint8x3_t, z18,
+ svreinterpret_u64_u8_x3 (z23),
+ svreinterpret_u64 (z23))
+
+/*
+** reinterpret_u64_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u64_u32_x4_tied1, svuint64x4_t, svuint32x4_t,
+ z0_res = svreinterpret_u64_u32_x4 (z0),
+ z0_res = svreinterpret_u64 (z0))
+
+/*
+** reinterpret_u64_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u64_f64_x4_untied, svuint64x4_t, svfloat64x4_t, z28,
+ svreinterpret_u64_f64_x4 (z4),
+ svreinterpret_u64 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u8.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u8.c
index a4c7f4c..f730219 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/reinterpret_u8.c
@@ -205,3 +205,65 @@ TEST_DUAL_Z_REV (reinterpret_u8_u64_tied1, svuint8_t, svuint64_t,
TEST_DUAL_Z (reinterpret_u8_u64_untied, svuint8_t, svuint64_t,
z0 = svreinterpret_u8_u64 (z4),
z0 = svreinterpret_u8 (z4))
+
+/*
+** reinterpret_u8_bf16_x2_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u8_bf16_x2_tied1, svuint8x2_t, svbfloat16x2_t,
+ z0_res = svreinterpret_u8_bf16_x2 (z0),
+ z0_res = svreinterpret_u8 (z0))
+
+/*
+** reinterpret_u8_f32_x2_untied:
+** (
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** |
+** mov z0\.d, z4\.d
+** mov z1\.d, z5\.d
+** )
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u8_f32_x2_untied, svuint8x2_t, svfloat32x2_t, z0,
+ svreinterpret_u8_f32_x2 (z4),
+ svreinterpret_u8 (z4))
+
+/*
+** reinterpret_u8_s64_x3_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u8_s64_x3_tied1, svuint8x3_t, svint64x3_t,
+ z0_res = svreinterpret_u8_s64_x3 (z0),
+ z0_res = svreinterpret_u8 (z0))
+
+/*
+** reinterpret_u8_u8_x3_untied:
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** mov (z18|z19|z20)\.d, (z23|z24|z25)\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u8_u8_x3_untied, svuint8x3_t, svuint8x3_t, z18,
+ svreinterpret_u8_u8_x3 (z23),
+ svreinterpret_u8 (z23))
+
+/*
+** reinterpret_u8_u32_x4_tied1:
+** ret
+*/
+TEST_DUAL_Z_REV (reinterpret_u8_u32_x4_tied1, svuint8x4_t, svuint32x4_t,
+ z0_res = svreinterpret_u8_u32_x4 (z0),
+ z0_res = svreinterpret_u8 (z0))
+
+/*
+** reinterpret_u8_f64_x4_untied:
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** mov (z28|z29|z30|z31)\.d, z[4-7]\.d
+** ret
+*/
+TEST_DUAL_XN (reinterpret_u8_f64_x4_untied, svuint8x4_t, svfloat64x4_t, z28,
+ svreinterpret_u8_f64_x4 (z4),
+ svreinterpret_u8 (z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/set2_b.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/set2_b.c
new file mode 100644
index 0000000..30afb6a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/set2_b.c
@@ -0,0 +1,41 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** set2_b_p8_0:
+** mov p9\.b, p5\.b
+** mov p8\.b, p0\.b
+** ret
+*/
+TEST_SET_B (set2_b_p8_0, svboolx2_t,
+ p8 = svset2_b (p4, 0, p0),
+ p8 = svset2 (p4, 0, p0))
+
+/*
+** set2_b_p8_1:
+** mov p8\.b, p4\.b
+** mov p9\.b, p0\.b
+** ret
+*/
+TEST_SET_B (set2_b_p8_1, svboolx2_t,
+ p8 = svset2_b (p4, 1, p0),
+ p8 = svset2 (p4, 1, p0))
+
+/*
+** set2_b_p4_0:
+** mov p4\.b, p12\.b
+** ret
+*/
+TEST_SET_B (set2_b_p4_0, svboolx2_t,
+ p4 = svset2_b (p4, 0, p12),
+ p4 = svset2 (p4, 0, p12))
+
+/*
+** set2_b_p4_1:
+** mov p5\.b, p13\.b
+** ret
+*/
+TEST_SET_B (set2_b_p4_1, svboolx2_t,
+ p4 = svset2_b (p4, 1, p13),
+ p4 = svset2 (p4, 1, p13))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_f32.c
index cb6774a..1efd434 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_f64.c
index fe978bb..f50c43e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_s32.c
index d244e70..bb6fb10 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_s64.c
index 5c4ebf4..19ec78e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_u32.c
index fe3f725..57fbb91 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_u64.c
index 2321235..60018be 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1_scatter_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_s32.c
index d590333..fb1bb29 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_s64.c
index c7a35f1..65ee9a0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_u32.c
index e098cb9..ceec619 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_u64.c
index 058d131..aeedbc6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1b_scatter_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_s32.c
index 2a23d41..2d69d08 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_s64.c
index 6a1adb0..3e5733e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_u32.c
index 1219731..5cd330a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_u64.c
index 7021ea6..0ee9948 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1h_scatter_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1w_scatter_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1w_scatter_s64.c
index 2363f59..f18bedc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1w_scatter_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1w_scatter_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1w_scatter_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1w_scatter_u64.c
index 767c009..6850865 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1w_scatter_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/st1w_scatter_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h
index fbf392b..756fe4d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/test_sve_acle.h
@@ -11,10 +11,33 @@
#error "Please define -DTEST_OVERLOADS or -DTEST_FULL"
#endif
+#ifdef STREAMING_COMPATIBLE
+#define SM_ATTR __arm_streaming_compatible
+#elif defined(STREAMING)
+#define SM_ATTR __arm_streaming
+#else
+#define SM_ATTR
+#endif
+
+#ifdef SHARED_ZA
+#define ZA_ATTR __arm_inout("za")
+#else
+#define ZA_ATTR
+#endif
+
+#ifdef SHARED_ZT0
+#define ZT0_ATTR __arm_inout("zt0")
+#else
+#define ZT0_ATTR
+#endif
+
+#define ATTR SM_ATTR ZA_ATTR ZT0_ATTR
+
#ifdef __cplusplus
-#define PROTO(NAME, RET, ARGS) extern "C" RET NAME ARGS; RET NAME ARGS
+#define PROTO(NAME, RET, ARGS) \
+ extern "C" RET NAME ARGS ATTR; RET NAME ARGS ATTR
#else
-#define PROTO(NAME, RET, ARGS) RET NAME ARGS
+#define PROTO(NAME, RET, ARGS) RET NAME ARGS ATTR
#endif
#define TEST_UNIFORM_Z(NAME, TYPE, CODE1, CODE2) \
@@ -68,6 +91,21 @@
return z0_res; \
}
+#define TEST_DUAL_P(NAME, TYPE1, TYPE2, CODE1, CODE2) \
+ PROTO (NAME, TYPE1, (TYPE1 p0, TYPE1 p1, TYPE2 p2, TYPE2 p3)) \
+ { \
+ INVOKE (CODE1, CODE2); \
+ return p0; \
+ }
+
+#define TEST_DUAL_P_REV(NAME, TYPE1, TYPE2, CODE1, CODE2) \
+ PROTO (NAME, TYPE1, (TYPE2 p0, TYPE2 p1, TYPE1 p2, TYPE1 p3)) \
+ { \
+ TYPE1 p0_res; \
+ INVOKE (CODE1, CODE2); \
+ return p0_res; \
+ }
+
#define TEST_TRIPLE_Z(NAME, TYPE1, TYPE2, TYPE3, CODE1, CODE2) \
PROTO (NAME, TYPE1, (TYPE1 z0, TYPE1 z1, TYPE2 z2, TYPE2 z3, \
TYPE3 z4, TYPE3 z5, \
@@ -200,6 +238,24 @@
return z0; \
}
+#define TEST_LOAD_COUNT(NAME, TTYPE, STYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (const STYPE *x0, intptr_t x1)) \
+ { \
+ register svcount_t pn0 __asm ("pn0"); \
+ register svcount_t pn7 __asm ("pn7"); \
+ register svcount_t pn8 __asm ("pn8"); \
+ register svcount_t pn15 __asm ("pn15"); \
+ register TTYPE z0 __asm ("z0"); \
+ register TTYPE z17 __asm ("z17"); \
+ register TTYPE z22 __asm ("z22"); \
+ register TTYPE z28 __asm ("z28"); \
+ __asm volatile ("" : "=Upa" (pn0), "=Upa" (pn7), \
+ "=Upa" (pn8), "=Upa" (pn15)); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "w" (z0), "w" (z17), \
+ "w" (z22), "w" (z28)); \
+ }
+
#define TEST_LOAD_GATHER_SZ(NAME, RES_TYPE, STYPE, ZTYPE, CODE1, CODE2) \
PROTO (NAME, RES_TYPE, (ZTYPE z0, ZTYPE z1, svbool_t p0, \
const STYPE *x0)) \
@@ -246,6 +302,24 @@
INVOKE (CODE1, CODE2); \
}
+#define TEST_STORE_COUNT(NAME, TTYPE, STYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (STYPE *x0, intptr_t x1)) \
+ { \
+ register svcount_t pn0 __asm ("pn0"); \
+ register svcount_t pn7 __asm ("pn7"); \
+ register svcount_t pn8 __asm ("pn8"); \
+ register svcount_t pn15 __asm ("pn15"); \
+ register TTYPE z0 __asm ("z0"); \
+ register TTYPE z17 __asm ("z17"); \
+ register TTYPE z22 __asm ("z22"); \
+ register TTYPE z28 __asm ("z28"); \
+ __asm volatile ("" : "=Upa" (pn0), "=Upa" (pn7), \
+ "=Upa" (pn8), "=Upa" (pn15), \
+ "=w" (z0), "=w" (z17), "=w" (z22), \
+ "=w" (z28)); \
+ INVOKE (CODE1, CODE2); \
+ }
+
#define TEST_STORE_SCATTER_SZ(NAME, DATA_TYPE, STYPE, ZTYPE, CODE1, CODE2) \
PROTO (NAME, void, (DATA_TYPE z0, ZTYPE z1, svbool_t p0, \
STYPE *x0)) \
@@ -276,6 +350,79 @@
return x0; \
}
+#define TEST_PN(NAME, CODE1, CODE2) \
+ PROTO (NAME, void, (void)) \
+ { \
+ register svcount_t pn0 __asm("pn0"); \
+ register svcount_t pn7 __asm("pn7"); \
+ register svcount_t pn8 __asm("pn8"); \
+ register svcount_t pn15 __asm("pn15"); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "Upa" (pn0), "Upa" (pn7), \
+ "Upa" (pn8), "Upa" (pn15)); \
+ }
+
+#define TEST_COUNT_PN(NAME, CODE1, CODE2) \
+ PROTO (NAME, void, (void)) \
+ { \
+ register svcount_t pn0 __asm ("pn0"); \
+ register svcount_t pn7 __asm ("pn7"); \
+ register svcount_t pn8 __asm ("pn8"); \
+ register svcount_t pn15 __asm ("pn15"); \
+ register uint64_t x0 __asm ("x0"); \
+ register uint64_t x15 __asm ("x15"); \
+ register uint64_t x17 __asm ("x17"); \
+ __asm volatile ("" : "=Upa" (pn0), "=Upa" (pn7), \
+ "=Upa" (pn8), "=Upa" (pn15)); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "r" (x0), "r" (x15), \
+ "r" (x17)); \
+ }
+
+#define TEST_EXTRACT_PN(NAME, TYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (void)) \
+ { \
+ register svcount_t pn0 __asm ("pn0"); \
+ register TYPE p2 __asm ("p2"); \
+ register TYPE p5 __asm ("p5"); \
+ register svcount_t pn7 __asm ("pn7"); \
+ register svcount_t pn8 __asm ("pn8"); \
+ register TYPE p9 __asm ("p9"); \
+ register svcount_t pn11 __asm ("pn11"); \
+ register TYPE p12 __asm ("p12"); \
+ register svcount_t pn15 __asm ("pn15"); \
+ __asm volatile ("" : "=Upa" (pn0), "=Upa" (pn7), \
+ "=Upa" (pn8), "=Upa" (pn11), \
+ "=Upa" (pn15)); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "Upa" (p2), "Upa" (p5), \
+ "Upa" (p9), "Upa" (p12)); \
+ }
+
+#define TEST_SELECT_P(NAME, TYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (void)) \
+ { \
+ register TYPE p0 __asm ("p0"); \
+ register TYPE p2 __asm ("p2"); \
+ register svbool_t p7 __asm ("p7"); \
+ register svbool_t p8 __asm ("p8"); \
+ register TYPE p13 __asm ("p13"); \
+ register svbool_t p15 __asm ("p15"); \
+ register int32_t w11 __asm ("w11"); \
+ register int32_t w12 __asm ("w12"); \
+ register int32_t w15 __asm ("w15"); \
+ register int32_t w16 __asm ("w16"); \
+ __asm volatile ("" : "=Upa" (p0), "=Upa" (p2), \
+ "=Upa" (p7), "=Upa" (p8), \
+ "=Upa" (p13), "=Upa" (p15), \
+ "=r" (w11), "=r" (w12), \
+ "=r" (w15), "=r" (w16)); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "Upa" (p0), "Upa" (p2), \
+ "Upa" (p7), "Upa" (p8), \
+ "Upa" (p13), "Upa" (p15)); \
+ }
+
#define TEST_COMPARE_S(NAME, TYPE, CODE1, CODE2) \
PROTO (NAME, svbool_t, (TYPE x0, TYPE x1)) \
{ \
@@ -284,6 +431,30 @@
return p0; \
}
+#define TEST_COMPARE_S_X2(NAME, TYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (TYPE x0, TYPE x1)) \
+ { \
+ register svboolx2_t p1 __asm("p1"); \
+ register svboolx2_t p4 __asm("p4"); \
+ register svboolx2_t p9 __asm("p9"); \
+ register svboolx2_t p14 __asm("p14"); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "Upa" (p1), "Upa" (p4), \
+ "Upa" (p9), "Upa" (p14)); \
+ }
+
+#define TEST_COMPARE_S_C(NAME, TYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (TYPE x0, TYPE x1)) \
+ { \
+ register svcount_t pn0 __asm("pn0"); \
+ register svcount_t pn7 __asm("pn7"); \
+ register svcount_t pn8 __asm("pn8"); \
+ register svcount_t pn15 __asm("pn15"); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "Upa" (pn0), "Upa" (pn7), \
+ "Upa" (pn8), "Upa" (pn15)); \
+ }
+
#define TEST_COMPARE_Z(NAME, TYPE, CODE1, CODE2) \
PROTO (NAME, svbool_t, (TYPE z0, TYPE z1, \
svbool_t p0, svbool_t p1)) \
@@ -382,6 +553,15 @@
return z0; \
}
+#define TEST_CREATE_B(NAME, TTYPE, CODE1, CODE2) \
+ PROTO (NAME, TTYPE, (svbool_t p0, svbool_t p1, \
+ svbool_t p2, svbool_t p3)) \
+ { \
+ TTYPE p0_res; \
+ INVOKE (CODE1, CODE2); \
+ return p0_res; \
+ }
+
#define TEST_GET(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
PROTO (NAME, void, (ZTYPE unused0, ZTYPE unused1, \
ZTYPE unused2, ZTYPE unused3, TTYPE z4)) \
@@ -396,6 +576,22 @@
"w" (z6_res), "w" (z7_res)); \
}
+#define TEST_GET_B(NAME, TTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (void)) \
+ { \
+ register svbool_t p0 __asm ("p0"); \
+ register TTYPE p4 __asm ("p4"); \
+ register svbool_t p4_res __asm ("p4"); \
+ register svbool_t p5_res __asm ("p5"); \
+ register svbool_t p6_res __asm ("p6"); \
+ register svbool_t p7_res __asm ("p7"); \
+ __asm volatile ("" : "=Upa" (p0), "=Upa" (p4)); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "Upa" (p0), "Upa" (p4_res), \
+ "Upa" (p5_res), "Upa" (p6_res), \
+ "Upa" (p7_res)); \
+ }
+
#define TEST_SET(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
PROTO (NAME, void, (ZTYPE z0, ZTYPE z1, ZTYPE z2, ZTYPE z3, \
TTYPE z4)) \
@@ -405,6 +601,20 @@
__asm volatile ("" :: "w" (z4), "w" (z24)); \
}
+#define TEST_SET_B(NAME, TTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, (void)) \
+ { \
+ register svbool_t p0 __asm ("p0"); \
+ register TTYPE p4 __asm ("p4"); \
+ register TTYPE p8 __asm ("p8"); \
+ register svbool_t p12 __asm ("p12"); \
+ register svbool_t p13 __asm ("p13"); \
+ __asm volatile ("" : "=Upa" (p0), "=Upa" (p4), \
+ "=Upa" (p12), "=Upa" (p13)); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "Upa" (p4), "Upa" (p8)); \
+ }
+
#define TEST_TBL2(NAME, TTYPE, ZTYPE, UTYPE, CODE1, CODE2) \
PROTO (NAME, ZTYPE, (TTYPE z0, TTYPE z2, UTYPE z4)) \
{ \
@@ -421,4 +631,107 @@
return z0_res; \
}
+#define TEST_XN(NAME, TTYPE, RES, CODE1, CODE2) \
+ PROTO (NAME, void, ()) \
+ { \
+ register TTYPE z0 __asm ("z0"); \
+ register TTYPE z4 __asm ("z4"); \
+ register TTYPE z18 __asm ("z18"); \
+ register TTYPE z23 __asm ("z23"); \
+ register TTYPE z28 __asm ("z28"); \
+ register svcount_t pn0 __asm ("pn0"); \
+ register svcount_t pn7 __asm ("pn7"); \
+ register svcount_t pn8 __asm ("pn8"); \
+ register svcount_t pn15 __asm ("pn15"); \
+ __asm volatile ("" : "=w" (z0), "=w" (z4), "=w" (z18), \
+ "=w" (z23), "=w" (z28), "=Upa" (pn0), \
+ "=Upa" (pn7), "=Upa" (pn8), "=Upa" (pn15)); \
+ INVOKE (RES = CODE1, RES = CODE2); \
+ __asm volatile ("" :: "w" (RES)); \
+ }
+
+#define TEST_DUAL_XN(NAME, TTYPE1, TTYPE2, RES, CODE1, CODE2) \
+ PROTO (NAME, void, ()) \
+ { \
+ register TTYPE1 z0 __asm ("z0"); \
+ register TTYPE2 z4 __asm ("z4"); \
+ register TTYPE1 z18 __asm ("z18"); \
+ register TTYPE2 z23 __asm ("z23"); \
+ register TTYPE1 z28 __asm ("z28"); \
+ __asm volatile ("" : "=w" (z0), "=w" (z4), "=w" (z18), \
+ "=w" (z23), "=w" (z28)); \
+ INVOKE (RES = CODE1, RES = CODE2); \
+ __asm volatile ("" :: "w" (RES)); \
+ }
+
+#define TEST_XN_SINGLE(NAME, TTYPE, ZTYPE, RES, CODE1, CODE2) \
+ PROTO (NAME, void, ()) \
+ { \
+ register ZTYPE z0 __asm ("z0"); \
+ register TTYPE z1 __asm ("z1"); \
+ register ZTYPE z5 __asm ("z5"); \
+ register ZTYPE z7 __asm ("z7"); \
+ register ZTYPE z16 __asm ("z16"); \
+ register TTYPE z18 __asm ("z18"); \
+ register ZTYPE z23 __asm ("z23"); \
+ register TTYPE z24 __asm ("z24"); \
+ register TTYPE z28 __asm ("z28"); \
+ __asm volatile ("" : "=w" (z0), "=w" (z1), "=w" (z5), \
+ "=w" (z7), "=w" (z16), "=w" (z18), \
+ "=w" (z23), "=w" (z24), "=w" (z28)); \
+ INVOKE (RES = CODE1, RES = CODE2); \
+ __asm volatile ("" :: "w" (RES)); \
+ }
+
+#define TEST_XN_SINGLE_Z15(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
+ PROTO (NAME, TTYPE, (TTYPE z0)) \
+ { \
+ register ZTYPE z15 __asm ("z15"); \
+ __asm volatile ("" : "=w" (z15)); \
+ INVOKE (CODE1, CODE2); \
+ return z0; \
+ }
+
+#define TEST_XN_SINGLE_AWKWARD(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
+ PROTO (NAME, TTYPE, (ZTYPE z0, TTYPE z1, ZTYPE zn)) \
+ { \
+ TTYPE z0_res; \
+ INVOKE (CODE1, CODE2); \
+ return z0_res; \
+ }
+
+#define TEST_X2_NARROW(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, ()) \
+ { \
+ register TTYPE z0 __asm ("z0"); \
+ register ZTYPE z5 __asm ("z5"); \
+ register TTYPE z6 __asm ("z6"); \
+ register TTYPE z16 __asm ("z16"); \
+ register ZTYPE z22 __asm ("z22"); \
+ register TTYPE z29 __asm ("z29"); \
+ register ZTYPE z0_res __asm ("z0"); \
+ __asm volatile ("" : "=w" (z0), "=w" (z5), "=w" (z6), \
+ "=w" (z16), "=w" (z22), "=w" (z29)); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "w" (z0_res), "w" (z5), "w" (z22)); \
+ }
+
+#define TEST_X4_NARROW(NAME, TTYPE, ZTYPE, CODE1, CODE2) \
+ PROTO (NAME, void, ()) \
+ { \
+ register TTYPE z0 __asm ("z0"); \
+ register TTYPE z4 __asm ("z4"); \
+ register TTYPE z16 __asm ("z16"); \
+ register TTYPE z21 __asm ("z21"); \
+ register ZTYPE z25 __asm ("z25"); \
+ register TTYPE z26 __asm ("z26"); \
+ register ZTYPE z0_res __asm ("z0"); \
+ register ZTYPE z22_res __asm ("z22"); \
+ __asm volatile ("" : "=w" (z0), "=w" (z4), "=w" (z16), \
+ "=w" (z21), "=w" (z26)); \
+ INVOKE (CODE1, CODE2); \
+ __asm volatile ("" :: "w" (z0_res), "w" (z22_res), \
+ "w" (z25)); \
+ }
+
#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f16.c
index 3a00716..c0b03a0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f32.c
index b73d420..8eef8a1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f64.c
index fc31928..5c96c55 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tmad_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f16.c
index 94bc696..9deed66 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f32.c
index d0ec918..749ea86 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f64.c
index 23e0da3..053abcb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tsmul_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f16.c
index e7c3ea0..3ab251f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f32.c
index 022573a..6c6471c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f64.c
index ffcdf42..9559e0f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/tssel_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/usmmla_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/usmmla_s32.c
index 9440f3f..a0dd7e3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/usmmla_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/usmmla_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-require-effective-target aarch64_asm_i8mm_ok } */
/* { dg-additional-options "-march=armv8.2-a+sve+i8mm" } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/adr_index_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/adr_index_1.c
index 714265e..a17e99f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/adr_index_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/adr_index_1.c
@@ -10,14 +10,14 @@ f1 (svbool_t pg, uint32_t *u32_ptr, svuint8_t u8, svuint16_t u16,
{
svadrh_index (u32); /* { dg-error {too few arguments to function 'svadrh_index'} } */
svadrh_index (u32, u32, u32); /* { dg-error {too many arguments to function 'svadrh_index'} } */
- svadrh_index (u32_ptr, s32); /* { dg-error {passing '[^']*\*'[^\n]* to argument 1 of 'svadrh_index', which expects an SVE vector type} } */
- svadrh_index (0, s32); /* { dg-error {passing 'int' to argument 1 of 'svadrh_index', which expects an SVE vector type} } */
+ svadrh_index (u32_ptr, s32); /* { dg-error {passing '[^']*\*'[^\n]* to argument 1 of 'svadrh_index', which expects an SVE type} } */
+ svadrh_index (0, s32); /* { dg-error {passing 'int' to argument 1 of 'svadrh_index', which expects an SVE type rather than a scalar} } */
svadrh_index (u16, u16); /* { dg-error {passing 'svuint16_t' to argument 1 of 'svadrh_index', which expects 'svuint32_t' or 'svuint64_t'} } */
svadrh_index (s32, s32); /* { dg-error {passing 'svint32_t' to argument 1 of 'svadrh_index', which expects 'svuint32_t' or 'svuint64_t'} } */
svadrh_index (f32, s32); /* { dg-error {passing 'svfloat32_t' to argument 1 of 'svadrh_index', which expects 'svuint32_t' or 'svuint64_t'} } */
svadrh_index (pg, s32); /* { dg-error {passing 'svbool_t' to argument 1 of 'svadrh_index', which expects 'svuint32_t' or 'svuint64_t'} } */
- svadrh_index (u32, 0); /* { dg-error {passing 'int' to argument 2 of 'svadrh_index', which expects an SVE vector type} } */
+ svadrh_index (u32, 0); /* { dg-error {passing 'int' to argument 2 of 'svadrh_index', which expects an SVE type rather than a scalar} } */
svadrh_index (u32, u8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svadrh_index', which expects a vector of 32-bit or 64-bit integers} } */
svadrh_index (u32, u16); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svadrh_index', which expects a vector of 32-bit or 64-bit integers} } */
svadrh_index (u32, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svadrh_index', which expects a vector of integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/adr_offset_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/adr_offset_1.c
index 528d7ac..627ae8a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/adr_offset_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/adr_offset_1.c
@@ -10,14 +10,14 @@ f1 (svbool_t pg, uint32_t *u32_ptr, svuint8_t u8, svuint16_t u16,
{
svadrb_offset (u32); /* { dg-error {too few arguments to function 'svadrb_offset'} } */
svadrb_offset (u32, u32, u32); /* { dg-error {too many arguments to function 'svadrb_offset'} } */
- svadrb_offset (u32_ptr, s32); /* { dg-error {passing '[^']*\*'[^\n]* to argument 1 of 'svadrb_offset', which expects an SVE vector type} } */
- svadrb_offset (0, s32); /* { dg-error {passing 'int' to argument 1 of 'svadrb_offset', which expects an SVE vector type} } */
+ svadrb_offset (u32_ptr, s32); /* { dg-error {passing '[^']*\*'[^\n]* to argument 1 of 'svadrb_offset', which expects an SVE type} } */
+ svadrb_offset (0, s32); /* { dg-error {passing 'int' to argument 1 of 'svadrb_offset', which expects an SVE type rather than a scalar} } */
svadrb_offset (u16, u16); /* { dg-error {passing 'svuint16_t' to argument 1 of 'svadrb_offset', which expects 'svuint32_t' or 'svuint64_t'} } */
svadrb_offset (s32, s32); /* { dg-error {passing 'svint32_t' to argument 1 of 'svadrb_offset', which expects 'svuint32_t' or 'svuint64_t'} } */
svadrb_offset (f32, s32); /* { dg-error {passing 'svfloat32_t' to argument 1 of 'svadrb_offset', which expects 'svuint32_t' or 'svuint64_t'} } */
svadrb_offset (pg, s32); /* { dg-error {passing 'svbool_t' to argument 1 of 'svadrb_offset', which expects 'svuint32_t' or 'svuint64_t'} } */
- svadrb_offset (u32, 0); /* { dg-error {passing 'int' to argument 2 of 'svadrb_offset', which expects an SVE vector type} } */
+ svadrb_offset (u32, 0); /* { dg-error {passing 'int' to argument 2 of 'svadrb_offset', which expects an SVE type rather than a scalar} } */
svadrb_offset (u32, u8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svadrb_offset', which expects a vector of 32-bit or 64-bit integers} } */
svadrb_offset (u32, u16); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svadrb_offset', which expects a vector of 32-bit or 64-bit integers} } */
svadrb_offset (u32, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svadrb_offset', which expects a vector of integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_1.c
index 8ce89fa..2e919d2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_1.c
@@ -7,8 +7,8 @@ f1 (svbool_t pg, svuint8_t u8, svint16_t s16)
{
svzip1 (pg); /* { dg-error {too few arguments to function 'svzip1'} } */
svzip1 (pg, u8, u8); /* { dg-error {too many arguments to function 'svzip1'} } */
- svzip1 (pg, u8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svzip1', but previous arguments had type 'svbool_t'} } */
- svzip1 (u8, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svzip1', but previous arguments had type 'svuint8_t'} } */
- svzip1 (u8, s16); /* { dg-error {passing 'svint16_t' to argument 2 of 'svzip1', but previous arguments had type 'svuint8_t'} } */
- svzip1 (u8, 0); /* { dg-error {passing 'int' to argument 2 of 'svzip1', which expects an SVE vector type} } */
+ svzip1 (pg, u8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svzip1', but argument 1 had type 'svbool_t'} } */
+ svzip1 (u8, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svzip1', but argument 1 had type 'svuint8_t'} } */
+ svzip1 (u8, s16); /* { dg-error {passing 'svint16_t' to argument 2 of 'svzip1', but argument 1 had type 'svuint8_t'} } */
+ svzip1 (u8, 0); /* { dg-error {passing 'int' to argument 2 of 'svzip1', which expects an SVE type rather than a scalar} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_n.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_n.c
index 965e9a1..9902379 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_n.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_n.c
@@ -11,7 +11,7 @@ f1 (svbool_t pg, svfloat16_t f16, svint16_t s16, svuint16_t u16,
svscale_x (s32, f16, s32); /* { dg-error {passing 'svint32_t' to argument 1 of 'svscale_x', which expects 'svbool_t'} } */
svscale_x (1, f16, s32); /* { dg-error {passing 'int' to argument 1 of 'svscale_x', which expects 'svbool_t'} } */
svscale_x (pg, pg, s16); /* { dg-error {'svscale_x' has no form that takes 'svbool_t' arguments} } */
- svscale_x (pg, 1, s16); /* { dg-error {passing 'int' to argument 2 of 'svscale_x', which expects an SVE vector type} } */
+ svscale_x (pg, 1, s16); /* { dg-error {passing 'int' to argument 2 of 'svscale_x', which expects an SVE type rather than a scalar} } */
svscale_x (pg, f16, s16);
svscale_x (pg, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svscale_x', which expects a vector of signed integers} } */
svscale_x (pg, f16, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svscale_x', which expects a vector of signed integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_single_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_single_n_1.c
new file mode 100644
index 0000000..f0b2dbb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_single_n_1.c
@@ -0,0 +1,35 @@
+/* { dg-do compile } */
+
+#pragma GCC target "+sve2"
+
+#include <arm_sve.h>
+
+void
+f1 (svbool_t pg, svfloat16_t f16, svint16_t s16, svuint16_t u16,
+ svfloat32_t f32, svint32_t s32, svuint32_t u32, svint32x2_t s32x2,
+ svuint32x2_t u32x2)
+{
+ svrshl_x (pg, s16); /* { dg-error {too few arguments to function 'svrshl_x'} } */
+ svrshl_x (pg, s16, s16, s16); /* { dg-error {too many arguments to function 'svrshl_x'} } */
+ svrshl_x (s32, s16, s32); /* { dg-error {passing 'svint32_t' to argument 1 of 'svrshl_x', which expects 'svbool_t'} } */
+ svrshl_x (1, s16, s32); /* { dg-error {passing 'int' to argument 1 of 'svrshl_x', which expects 'svbool_t'} } */
+ svrshl_x (pg, pg, s16); /* { dg-error {'svrshl_x' has no form that takes 'svbool_t' arguments} } */
+ svrshl_x (pg, 1, s16); /* { dg-error {passing 'int' to argument 2 of 'svrshl_x', which expects an SVE type rather than a scalar} } */
+ svrshl_x (pg, s16, s16);
+ svrshl_x (pg, s16, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svrshl_x', which expects a vector of signed integers} } */
+ svrshl_x (pg, s16, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svrshl_x', which expects a vector of signed integers} } */
+ svrshl_x (pg, s16, s32); /* { dg-error {arguments 2 and 3 of 'svrshl_x' must have the same element size, but the values passed here have type 'svint16_t' and 'svint32_t' respectively} } */
+ svrshl_x (pg, s16, u32); /* { dg-error {passing 'svuint32_t' to argument 3 of 'svrshl_x', which expects a vector of signed integers} } */
+ svrshl_x (pg, s16, f32); /* { dg-error {passing 'svfloat32_t' to argument 3 of 'svrshl_x', which expects a vector of signed integers} } */
+ svrshl_x (pg, s16, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svrshl_x', which expects a vector of signed integers} } */
+ svrshl_x (pg, s16, 0);
+ svrshl_x (pg, f16, s16); /* { dg-error {'svrshl_x' has no form that takes 'svfloat16_t' arguments} } */
+ svrshl_x (pg, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svrshl_x', which expects a vector of signed integers} } */
+ svrshl_x (pg, f16, s32); /* { dg-error {'svrshl_x' has no form that takes 'svfloat16_t' arguments} } */
+ svrshl_x (pg, f16, u32); /* { dg-error {passing 'svuint32_t' to argument 3 of 'svrshl_x', which expects a vector of signed integers} } */
+ svrshl_x (pg, u16, s16);
+
+ svrshl_x (pg, s32x2, s32x2); /* { dg-error {'svrshl_x' has no form that takes 'svint32x2_t' arguments} } */
+ svrshl_x (pg, s32x2, u32x2); /* { dg-error {passing 'svuint32x2_t' to argument 3 of 'svrshl_x', which expects vectors of signed integers} } */
+ svrshl_x (pg, s32x2, s32); /* { dg-error {'svrshl_x' has no form that takes 'svint32x2_t' arguments} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_single_n_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_single_n_2.c
new file mode 100644
index 0000000..976d5af
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_int_opt_single_n_2.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+
+#pragma GCC target "+sme2"
+
+#include <arm_sve.h>
+
+void
+f1 (svbool_t pg, svfloat16x2_t f16x2, svint16x2_t s16x2, svuint16x2_t u16x2,
+ svfloat32x2_t f32x2, svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint16_t s16, svuint16_t u16, svint32_t s32, svuint32_t u32,
+ svfloat32_t f32)
+ __arm_streaming
+{
+ svrshl (s16x2); /* { dg-error {too few arguments to function 'svrshl'} } */
+ svrshl (s16x2, s16x2, s16x2); /* { dg-error {too many arguments to function 'svrshl'} } */
+ svrshl (pg, s16x2); /* { dg-error {'svrshl' has no form that takes 'svbool_t' arguments} } */
+ svrshl (1, s16x2); /* { dg-error {passing 'int' to argument 1 of 'svrshl', which expects an SVE type rather than a scalar} } */
+ svrshl (s16, s16); /* { dg-error {'svrshl' has no form that takes 'svint16_t' arguments} } */
+ svrshl (s16x2, s16x2);
+ svrshl (s16x2, u16x2); /* { dg-error {passing 'svuint16x2_t' to argument 2 of 'svrshl', which expects vectors of signed integers} } */
+ svrshl (s16x2, f16x2); /* { dg-error {passing 'svfloat16x2_t' to argument 2 of 'svrshl', which expects vectors of signed integers} } */
+ svrshl (s16x2, s32x2); /* { dg-error {arguments 1 and 2 of 'svrshl' must have the same element size, but the values passed here have type 'svint16x2_t' and 'svint32x2_t' respectively} } */
+ svrshl (s32x2, s16); /* { dg-error {arguments 1 and 2 of 'svrshl' must have the same element size, but the values passed here have type 'svint32x2_t' and 'svint16_t' respectively} } */
+ svrshl (s32x2, u16); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svrshl', which expects a vector of signed integers} } */
+ svrshl (s32x2, s32);
+ svrshl (s32x2, u32); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svrshl', which expects a vector of signed integers} } */
+ svrshl (s32x2, f32); /* { dg-error {passing 'svfloat32_t' to argument 2 of 'svrshl', which expects a vector of signed integers} } */
+ svrshl (s16x2, u32x2); /* { dg-error {passing 'svuint32x2_t' to argument 2 of 'svrshl', which expects vectors of signed integers} } */
+ svrshl (s16x2, f32x2); /* { dg-error {passing 'svfloat32x2_t' to argument 2 of 'svrshl', which expects vectors of signed integers} } */
+ svrshl (s16x2, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svrshl', which expects a vector of signed integers} } */
+ svrshl (s16x2, 0); /* { dg-error {passing 'int' to argument 2 of 'svrshl', which expects an SVE type rather than a scalar type} } */
+ svrshl (f16x2, s16x2); /* { dg-error {'svrshl' has no form that takes 'svfloat16x2_t' arguments} } */
+ svrshl (f16x2, u16x2); /* { dg-error {passing 'svuint16x2_t' to argument 2 of 'svrshl', which expects vectors of signed integers} } */
+ svrshl (f16x2, s32x2); /* { dg-error {'svrshl' has no form that takes 'svfloat16x2_t' arguments} } */
+ svrshl (u16x2, s16x2);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_lane_1.c
index 3913ff6..81533b2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_lane_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_lane_1.c
@@ -10,9 +10,9 @@ f1 (svbool_t pg, svfloat16_t f16, svfloat32_t f32, svfloat64_t f64,
svmul_lane (f32, f32, 0, 0); /* { dg-error {too many arguments to function 'svmul_lane'} } */
svmul_lane (pg, pg, 0); /* { dg-error {'svmul_lane' has no form that takes 'svbool_t' arguments} } */
svmul_lane (s32, s32, 0); /* { dg-error {ACLE function 'svmul_lane_s32' requires ISA extension 'sve2'} "" { xfail aarch64_sve2 } } */
- svmul_lane (1, f32, 0); /* { dg-error {passing 'int' to argument 1 of 'svmul_lane', which expects an SVE vector type} } */
- svmul_lane (f32, 1, 0); /* { dg-error {passing 'int' to argument 2 of 'svmul_lane', which expects an SVE vector type} } */
- svmul_lane (f32, f64, 0); /* { dg-error {passing 'svfloat64_t' to argument 2 of 'svmul_lane', but previous arguments had type 'svfloat32_t'} } */
+ svmul_lane (1, f32, 0); /* { dg-error {passing 'int' to argument 1 of 'svmul_lane', which expects an SVE type rather than a scalar} } */
+ svmul_lane (f32, 1, 0); /* { dg-error {passing 'int' to argument 2 of 'svmul_lane', which expects an SVE type rather than a scalar} } */
+ svmul_lane (f32, f64, 0); /* { dg-error {passing 'svfloat64_t' to argument 2 of 'svmul_lane', but argument 1 had type 'svfloat32_t'} } */
svmul_lane (f32, f32, s32); /* { dg-error {argument 3 of 'svmul_lane' must be an integer constant expression} } */
svmul_lane (f32, f32, i); /* { dg-error {argument 3 of 'svmul_lane' must be an integer constant expression} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_long_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_long_lane_1.c
index bfe7808..25b6208 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_long_lane_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_long_lane_1.c
@@ -19,9 +19,9 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
svmullb_lane (f16, f16, 0); /* { dg-error {'svmullb_lane' has no form that takes 'svfloat16_t' arguments} } */
svmullb_lane (f32, f32, 0); /* { dg-error {'svmullb_lane' has no form that takes 'svfloat32_t' arguments} } */
svmullb_lane (f64, f64, 0); /* { dg-error {'svmullb_lane' has no form that takes 'svfloat64_t' arguments} } */
- svmullb_lane (1, u32, 0); /* { dg-error {passing 'int' to argument 1 of 'svmullb_lane', which expects an SVE vector type} } */
- svmullb_lane (u32, 1, 0); /* { dg-error {passing 'int' to argument 2 of 'svmullb_lane', which expects an SVE vector type} } */
- svmullb_lane (u32, s32, 0); /* { dg-error {passing 'svint32_t' to argument 2 of 'svmullb_lane', but previous arguments had type 'svuint32_t'} } */
+ svmullb_lane (1, u32, 0); /* { dg-error {passing 'int' to argument 1 of 'svmullb_lane', which expects an SVE type rather than a scalar} } */
+ svmullb_lane (u32, 1, 0); /* { dg-error {passing 'int' to argument 2 of 'svmullb_lane', which expects an SVE type rather than a scalar} } */
+ svmullb_lane (u32, s32, 0); /* { dg-error {passing 'svint32_t' to argument 2 of 'svmullb_lane', but argument 1 had type 'svuint32_t'} } */
svmullb_lane (u32, u32, s32); /* { dg-error {argument 3 of 'svmullb_lane' must be an integer constant expression} } */
svmullb_lane (u32, u32, i); /* { dg-error {argument 3 of 'svmullb_lane' must be an integer constant expression} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_long_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_long_opt_n_1.c
index 27893c6..1f513dd 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_long_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_long_opt_n_1.c
@@ -23,11 +23,11 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svaddlb (u64, u64); /* { dg-error {'svaddlb' has no form that takes 'svuint64_t' arguments} } */
svaddlb (s64, s64); /* { dg-error {'svaddlb' has no form that takes 'svint64_t' arguments} } */
svaddlb (f16, f16); /* { dg-error {'svaddlb' has no form that takes 'svfloat16_t' arguments} } */
- svaddlb (1, u8); /* { dg-error {passing 'int' to argument 1 of 'svaddlb', which expects an SVE vector type} } */
- svaddlb (u8, s8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svaddlb', but previous arguments had type 'svuint8_t'} } */
- svaddlb (u8, s16); /* { dg-error {passing 'svint16_t' to argument 2 of 'svaddlb', but previous arguments had type 'svuint8_t'} } */
- svaddlb (u8, u16); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svaddlb', but previous arguments had type 'svuint8_t'} } */
- svaddlb (u16, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svaddlb', but previous arguments had type 'svuint16_t'} } */
+ svaddlb (1, u8); /* { dg-error {passing 'int' to argument 1 of 'svaddlb', which expects an SVE type rather than a scalar} } */
+ svaddlb (u8, s8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svaddlb', but argument 1 had type 'svuint8_t'} } */
+ svaddlb (u8, s16); /* { dg-error {passing 'svint16_t' to argument 2 of 'svaddlb', but argument 1 had type 'svuint8_t'} } */
+ svaddlb (u8, u16); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svaddlb', but argument 1 had type 'svuint8_t'} } */
+ svaddlb (u16, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svaddlb', but argument 1 had type 'svuint16_t'} } */
svaddlb (u8, 0);
svaddlb (u16, 0);
svaddlb (u32, 0);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_n_1.c
index 0c69e66..ff4f0ff 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_n_1.c
@@ -7,7 +7,7 @@ f1 (svbool_t pg, svuint8_t u8, svfloat16_t f16, int i, float f)
{
svinsr (u8); /* { dg-error {too few arguments to function 'svinsr'} } */
svinsr (u8, 0, 0); /* { dg-error {too many arguments to function 'svinsr'} } */
- svinsr (0, 0); /* { dg-error {passing 'int' to argument 1 of 'svinsr', which expects an SVE vector type} } */
+ svinsr (0, 0); /* { dg-error {passing 'int' to argument 1 of 'svinsr', which expects an SVE type rather than a scalar} } */
svinsr (u8, 0);
svinsr (u8, -1);
svinsr (u8, i);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_narrowb_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_narrowb_opt_n_1.c
index 920cbd1..4a29b5c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_narrowb_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_narrowb_opt_n_1.c
@@ -23,11 +23,11 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svaddhnb (u64, u64);
svaddhnb (s64, s64);
svaddhnb (f32, f32); /* { dg-error {'svaddhnb' has no form that takes 'svfloat32_t' arguments} } */
- svaddhnb (1, u16); /* { dg-error {passing 'int' to argument 1 of 'svaddhnb', which expects an SVE vector type} } */
- svaddhnb (u16, s8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svaddhnb', but previous arguments had type 'svuint16_t'} } */
- svaddhnb (u16, s16); /* { dg-error {passing 'svint16_t' to argument 2 of 'svaddhnb', but previous arguments had type 'svuint16_t'} } */
- svaddhnb (u16, u32); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svaddhnb', but previous arguments had type 'svuint16_t'} } */
- svaddhnb (u16, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svaddhnb', but previous arguments had type 'svuint16_t'} } */
+ svaddhnb (1, u16); /* { dg-error {passing 'int' to argument 1 of 'svaddhnb', which expects an SVE type rather than a scalar} } */
+ svaddhnb (u16, s8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svaddhnb', but argument 1 had type 'svuint16_t'} } */
+ svaddhnb (u16, s16); /* { dg-error {passing 'svint16_t' to argument 2 of 'svaddhnb', but argument 1 had type 'svuint16_t'} } */
+ svaddhnb (u16, u32); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svaddhnb', but argument 1 had type 'svuint16_t'} } */
+ svaddhnb (u16, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svaddhnb', but argument 1 had type 'svuint16_t'} } */
svaddhnb (u8, 0); /* { dg-error {'svaddhnb' has no form that takes 'svuint8_t' arguments} } */
svaddhnb (u16, 0);
svaddhnb (u32, 0);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_narrowt_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_narrowt_opt_n_1.c
index eb70d05..4a44261 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_narrowt_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_narrowt_opt_n_1.c
@@ -26,12 +26,12 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svaddhnt (u32, u64, u64);
svaddhnt (s32, s64, s64);
svaddhnt (f16, f32, f32); /* { dg-error {'svaddhnt' has no form that takes 'svfloat32_t' arguments} } */
- svaddhnt (1, u16, u16); /* { dg-error {passing 'int' to argument 1 of 'svaddhnt', which expects an SVE vector type} } */
- svaddhnt (u8, 1, u16); /* { dg-error {passing 'int' to argument 2 of 'svaddhnt', which expects an SVE vector type} } */
- svaddhnt (u8, u16, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svaddhnt', but previous arguments had type 'svuint16_t'} } */
- svaddhnt (u8, u16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svaddhnt', but previous arguments had type 'svuint16_t'} } */
- svaddhnt (u8, u16, u32); /* { dg-error {passing 'svuint32_t' to argument 3 of 'svaddhnt', but previous arguments had type 'svuint16_t'} } */
- svaddhnt (u8, u16, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svaddhnt', but previous arguments had type 'svuint16_t'} } */
+ svaddhnt (1, u16, u16); /* { dg-error {passing 'int' to argument 1 of 'svaddhnt', which expects an SVE type rather than a scalar} } */
+ svaddhnt (u8, 1, u16); /* { dg-error {passing 'int' to argument 2 of 'svaddhnt', which expects an SVE type rather than a scalar} } */
+ svaddhnt (u8, u16, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svaddhnt', but argument 2 had type 'svuint16_t'} } */
+ svaddhnt (u8, u16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svaddhnt', but argument 2 had type 'svuint16_t'} } */
+ svaddhnt (u8, u16, u32); /* { dg-error {passing 'svuint32_t' to argument 3 of 'svaddhnt', but argument 2 had type 'svuint16_t'} } */
+ svaddhnt (u8, u16, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svaddhnt', but argument 2 had type 'svuint16_t'} } */
svaddhnt (u8, u8, 0); /* { dg-error {'svaddhnt' has no form that takes 'svuint8_t' arguments} } */
svaddhnt (u16, u16, 0); /* { dg-error {passing 'svuint16_t' instead of the expected 'svuint8_t' to argument 1 of 'svaddhnt', after passing 'svuint16_t' to argument 2} } */
svaddhnt (s8, u16, 0); /* { dg-error {arguments 1 and 2 of 'svaddhnt' must have the same signedness, but the values passed here have type 'svint8_t' and 'svuint16_t' respectively} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_n_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_n_2.c
index 9fa83ca..40447cf 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_n_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_n_2.c
@@ -10,17 +10,17 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svadd_x (pg, u8, u8, u8); /* { dg-error {too many arguments to function 'svadd_x'} } */
svadd_x (u8, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 1 of 'svadd_x', which expects 'svbool_t'} } */
svadd_x (pg, pg, pg); /* { dg-error {'svadd_x' has no form that takes 'svbool_t' arguments} } */
- svadd_x (pg, 1, u8); /* { dg-error {passing 'int' to argument 2 of 'svadd_x', which expects an SVE vector type} } */
- svadd_x (pg, u8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svadd_x', but previous arguments had type 'svuint8_t'} } */
+ svadd_x (pg, 1, u8); /* { dg-error {passing 'int' to argument 2 of 'svadd_x', which expects an SVE type rather than a scalar} } */
+ svadd_x (pg, u8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svadd_x', but argument 2 had type 'svuint8_t'} } */
svadd_x (pg, u8, u8);
- svadd_x (pg, u8, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svadd_x', but previous arguments had type 'svuint8_t'} } */
- svadd_x (pg, u8, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svadd_x', but previous arguments had type 'svuint8_t'} } */
- svadd_x (pg, u8, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svadd_x', but previous arguments had type 'svuint8_t'} } */
- svadd_x (pg, u8, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svadd_x', but previous arguments had type 'svuint8_t'} } */
+ svadd_x (pg, u8, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svadd_x', but argument 2 had type 'svuint8_t'} } */
+ svadd_x (pg, u8, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svadd_x', but argument 2 had type 'svuint8_t'} } */
+ svadd_x (pg, u8, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svadd_x', but argument 2 had type 'svuint8_t'} } */
+ svadd_x (pg, u8, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svadd_x', but argument 2 had type 'svuint8_t'} } */
svadd_x (pg, u8, 0);
- svadd_x (pg, f16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svadd_x', but previous arguments had type 'svfloat16_t'} } */
- svadd_x (pg, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svadd_x', but previous arguments had type 'svfloat16_t'} } */
+ svadd_x (pg, f16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svadd_x', but argument 2 had type 'svfloat16_t'} } */
+ svadd_x (pg, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svadd_x', but argument 2 had type 'svfloat16_t'} } */
svadd_x (pg, f16, f16);
svadd_x (pg, f16, 1);
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_n_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_n_3.c
index 4d0b253..94e20bc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_n_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_n_3.c
@@ -10,20 +10,20 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svand_z (pg, u8, u8, u8); /* { dg-error {too many arguments to function 'svand_z'} } */
svand_z (u8, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 1 of 'svand_z', which expects 'svbool_t'} } */
svand_z (pg, pg, pg);
- svand_z (pg, 1, u8); /* { dg-error {passing 'int' to argument 2 of 'svand_z', which expects an SVE vector type} } */
- svand_z (pg, u8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svand_z', but previous arguments had type 'svuint8_t'} } */
+ svand_z (pg, 1, u8); /* { dg-error {passing 'int' to argument 2 of 'svand_z', which expects an SVE type rather than a scalar} } */
+ svand_z (pg, u8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svand_z', but argument 2 had type 'svuint8_t'} } */
svand_z (pg, u8, u8);
- svand_z (pg, u8, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svand_z', but previous arguments had type 'svuint8_t'} } */
- svand_z (pg, u8, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svand_z', but previous arguments had type 'svuint8_t'} } */
- svand_z (pg, u8, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svand_z', but previous arguments had type 'svuint8_t'} } */
- svand_z (pg, u8, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svand_z', but previous arguments had type 'svuint8_t'} } */
+ svand_z (pg, u8, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svand_z', but argument 2 had type 'svuint8_t'} } */
+ svand_z (pg, u8, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svand_z', but argument 2 had type 'svuint8_t'} } */
+ svand_z (pg, u8, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svand_z', but argument 2 had type 'svuint8_t'} } */
+ svand_z (pg, u8, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svand_z', but argument 2 had type 'svuint8_t'} } */
svand_z (pg, u8, 0);
- svand_z (pg, pg, u8); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svand_z', but previous arguments had type 'svbool_t'} } */
+ svand_z (pg, pg, u8); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svand_z', but argument 2 had type 'svbool_t'} } */
svand_z (pg, pg, 0); /* { dg-error {passing 'int' to argument 3 of 'svand_z', but its 'svbool_t' form does not accept scalars} } */
- svand_z (pg, f16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svand_z', but previous arguments had type 'svfloat16_t'} } */
- svand_z (pg, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svand_z', but previous arguments had type 'svfloat16_t'} } */
+ svand_z (pg, f16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svand_z', but argument 2 had type 'svfloat16_t'} } */
+ svand_z (pg, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svand_z', but argument 2 had type 'svfloat16_t'} } */
svand_z (pg, f16, f16); /* { dg-error {'svand_z' has no form that takes 'svfloat16_t' arguments} } */
svand_z (pg, f16, 1); /* { dg-error {'svand_z' has no form that takes 'svfloat16_t' arguments} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_single_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_single_n_1.c
new file mode 100644
index 0000000..9676de7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_single_n_1.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+
+#pragma GCC target "+sve2"
+
+#include <arm_sve.h>
+
+void
+f1 (svbool_t pg, svfloat16_t f16, svint16_t s16, svuint16_t u16,
+ svfloat32_t f32, svint32_t s32, svuint32_t u32, svint32x2_t s32x2,
+ svuint32x2_t u32x2)
+{
+ svqdmulh (s16); /* { dg-error {too few arguments to function 'svqdmulh'} } */
+ svqdmulh (s16, s16, s16); /* { dg-error {too many arguments to function 'svqdmulh'} } */
+ svqdmulh (pg, pg); /* { dg-error {'svqdmulh' has no form that takes 'svbool_t' arguments} } */
+ svqdmulh (1, s16); /* { dg-error {passing 'int' to argument 1 of 'svqdmulh', which expects an SVE type rather than a scalar} } */
+ svqdmulh (s16, s16);
+ svqdmulh (s16, u16); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svqdmulh', but argument 1 had type 'svint16_t'} } */
+ svqdmulh (s16, f16); /* { dg-error {passing 'svfloat16_t' to argument 2 of 'svqdmulh', but argument 1 had type 'svint16_t'} } */
+ svqdmulh (s16, s32); /* { dg-error {passing 'svint32_t' to argument 2 of 'svqdmulh', but argument 1 had type 'svint16_t'} } */
+ svqdmulh (s32, s32x2); /* { dg-error {passing tuple 'svint32x2_t' to argument 2 of 'svqdmulh' after passing single vector 'svint32_t' to argument 1} } */
+ svqdmulh (s16, 0);
+ svqdmulh (f16, f16); /* { dg-error {'svqdmulh' has no form that takes 'svfloat16_t' arguments} } */
+ svqdmulh (u16, u16); /* { dg-error {'svqdmulh' has no form that takes 'svuint16_t' arguments} } */
+
+ svqdmulh (s32x2, s32x2); /* { dg-error {ACLE function 'svqdmulh_s32_x2' can only be called when SME streaming mode is enabled} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_single_n_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_single_n_2.c
new file mode 100644
index 0000000..5cc8a4c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_opt_single_n_2.c
@@ -0,0 +1,38 @@
+/* { dg-do compile } */
+
+#pragma GCC target "+sme2"
+
+#include <arm_sve.h>
+
+void
+f1 (svbool_t pg, svfloat16x2_t f16x2, svint16x2_t s16x2, svuint16x2_t u16x2,
+ svfloat32x2_t f32x2, svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint32x3_t s32x3, svint32x4_t s32x4,
+ svint16_t s16, svuint16_t u16, svint32_t s32, svuint32_t u32,
+ svfloat32_t f32)
+ __arm_streaming
+{
+ svqdmulh (s16x2); /* { dg-error {too few arguments to function 'svqdmulh'} } */
+ svqdmulh (s16x2, s16x2, s16x2); /* { dg-error {too many arguments to function 'svqdmulh'} } */
+ svqdmulh (pg, s16x2); /* { dg-error {'svqdmulh' has no form that takes 'svbool_t' arguments} } */
+ svqdmulh (1, s16x2); /* { dg-error {passing 'int' to argument 1 of 'svqdmulh', which expects an SVE type rather than a scalar} } */
+ svqdmulh (s16, s16);
+ svqdmulh (s16x2, s16x2);
+ svqdmulh (s16x2, u16x2); /* { dg-error {passing 'svuint16x2_t' to argument 2 of 'svqdmulh', but argument 1 had type 'svint16x2_t'} } */
+ svqdmulh (s16x2, f16x2); /* { dg-error {passing 'svfloat16x2_t' to argument 2 of 'svqdmulh', but argument 1 had type 'svint16x2_t'} } */
+ svqdmulh (s32x2, s16); /* { dg-error {passing 'svint16_t' to argument 2 of 'svqdmulh', but argument 1 was a tuple of 'svint32_t'} } */
+ svqdmulh (s32x2, u16); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svqdmulh', but argument 1 was a tuple of 'svint32_t'} } */
+ svqdmulh (s32x2, s32);
+ svqdmulh (s32x2, s32x3); /* { dg-error {passing mismatched tuple types 'svint32x2_t' and 'svint32x3_t' to arguments 1 and 2 of 'svqdmulh'} } */
+ svqdmulh (s32x2, s32x4); /* { dg-error {passing mismatched tuple types 'svint32x2_t' and 'svint32x4_t' to arguments 1 and 2 of 'svqdmulh'} } */
+ svqdmulh (s32x3, s32x2); /* { dg-error {'svqdmulh' has no form that takes 'svint32x3_t' arguments} } */
+ svqdmulh (s32x3, s32x3); /* { dg-error {'svqdmulh' has no form that takes 'svint32x3_t' arguments} } */
+ svqdmulh (s32x4, s32x2); /* { dg-error {passing mismatched tuple types 'svint32x4_t' and 'svint32x2_t' to arguments 1 and 2 of 'svqdmulh'} } */
+ svqdmulh (s32x4, s32x3); /* { dg-error {passing mismatched tuple types 'svint32x4_t' and 'svint32x3_t' to arguments 1 and 2 of 'svqdmulh'} } */
+ svqdmulh (s32x4, s32x4);
+ svqdmulh (u32x2, u32x2); /* { dg-error {'svqdmulh' has no form that takes 'svuint32x2_t' arguments} } */
+ svqdmulh (u32x2, u32); /* { dg-error {'svqdmulh' has no form that takes 'svuint32x2_t' arguments} } */
+
+ svqdmulh (s16x2, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svqdmulh', but argument 1 was a tuple of 'svint16_t'} } */
+ svqdmulh (s16x2, 0); /* { dg-error {passing 'int' to argument 2 of 'svqdmulh', which expects an SVE type rather than a scalar type} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_rotate_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_rotate_1.c
index 8ffe91b..8939ce2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_rotate_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_rotate_1.c
@@ -10,9 +10,9 @@ f1 (svbool_t pg, svfloat32_t f32, svfloat64_t f64, svint32_t s32, int i)
svcadd_x (f32, f32, f32, 90); /* { dg-error {passing 'svfloat32_t' to argument 1 of 'svcadd_x', which expects 'svbool_t'} } */
svcadd_x (pg, pg, pg, 90); /* { dg-error {'svcadd_x' has no form that takes 'svbool_t' arguments} } */
svcadd_x (pg, s32, s32, 90); /* { dg-error {'svcadd_x' has no form that takes 'svint32_t' arguments} } */
- svcadd_x (pg, 1, f32, 90); /* { dg-error {passing 'int' to argument 2 of 'svcadd_x', which expects an SVE vector type} } */
- svcadd_x (pg, f32, 1, 90); /* { dg-error {passing 'int' to argument 3 of 'svcadd_x', which expects an SVE vector type} } */
- svcadd_x (pg, f32, f64, 90); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svcadd_x', but previous arguments had type 'svfloat32_t'} } */
+ svcadd_x (pg, 1, f32, 90); /* { dg-error {passing 'int' to argument 2 of 'svcadd_x', which expects an SVE type rather than a scalar} } */
+ svcadd_x (pg, f32, 1, 90); /* { dg-error {passing 'int' to argument 3 of 'svcadd_x', which expects an SVE type rather than a scalar} } */
+ svcadd_x (pg, f32, f64, 90); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svcadd_x', but argument 2 had type 'svfloat32_t'} } */
svcadd_x (pg, f32, f32, s32); /* { dg-error {argument 4 of 'svcadd_x' must be an integer constant expression} } */
svcadd_x (pg, f32, f32, i); /* { dg-error {argument 4 of 'svcadd_x' must be an integer constant expression} } */
svcadd_x (pg, f32, f32, -90); /* { dg-error {passing -90 to argument 4 of 'svcadd_x', which expects either 90 or 270} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_single_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_single_1.c
new file mode 100644
index 0000000..aa7633b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_single_1.c
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+
+#pragma GCC target "+sme2"
+
+#include <arm_sve.h>
+
+void
+f1 (svbool_t pg, svfloat16x2_t f16x2, svint16x2_t s16x2, svuint16x2_t u16x2,
+ svfloat32x2_t f32x2, svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint32x3_t s32x3, svint32x4_t s32x4,
+ svint16_t s16, svuint16_t u16, svfloat16_t f16, svint32_t s32,
+ svuint32_t u32, svfloat32_t f32)
+ __arm_streaming
+{
+ svadd (s16x2); /* { dg-error {too few arguments to function 'svadd'} } */
+ svadd (s16x2, s16x2, s16x2); /* { dg-error {too many arguments to function 'svadd'} } */
+ svadd (pg, s16x2); /* { dg-error {passing 'svint16x2_t' to argument 2 of 'svadd', which expects a single SVE vector rather than a tuple} } */
+ svadd (1, s16x2); /* { dg-error {passing 'int' to argument 1 of 'svadd', which expects an SVE type rather than a scalar} } */
+ svadd (s16, s16); /* { dg-error {'svadd' has no form that takes 'svint16_t' arguments} } */
+ svadd (s16x2, s16x2); /* { dg-error {passing 'svint16x2_t' to argument 2 of 'svadd', which expects a single SVE vector rather than a tuple} } */
+ svadd (s16x2, u16x2); /* { dg-error {passing 'svuint16x2_t' to argument 2 of 'svadd', which expects a single SVE vector rather than a tuple} } */
+ svadd (s16x2, s16);
+ svadd (s16x2, u16); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svadd', but argument 1 was a tuple of 'svint16_t'} } */
+ svadd (s16x2, f16); /* { dg-error {passing 'svfloat16_t' to argument 2 of 'svadd', but argument 1 was a tuple of 'svint16_t'} } */
+ svadd (s32x2, s16); /* { dg-error {passing 'svint16_t' to argument 2 of 'svadd', but argument 1 was a tuple of 'svint32_t'} } */
+ svadd (s32x2, u16); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svadd', but argument 1 was a tuple of 'svint32_t'} } */
+ svadd (s32x2, s32);
+ svadd (s32x3, s32); /* { dg-error {'svadd' has no form that takes 'svint32x3_t' arguments} } */
+ svadd (s32x4, s32x2); /* { dg-error {passing 'svint32x2_t' to argument 2 of 'svadd', which expects a single SVE vector rather than a tuple} } */
+ svadd (f32x2, f32); /* { dg-error {'svadd' has no form that takes 'svfloat32x2_t' arguments} } */
+
+ svadd (s16x2, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svadd', but argument 1 was a tuple of 'svint16_t'} } */
+ svadd (s16x2, 0); /* { dg-error {passing 'int' to argument 2 of 'svadd', which expects an SVE type rather than a scalar type} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_to_uint_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_to_uint_1.c
index 213defc..2c3fe5d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_to_uint_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_to_uint_1.c
@@ -11,9 +11,9 @@ f1 (svbool_t pg, svint32_t s32, svuint32_t u32)
svhistcnt_z (pg, s32, s32, 0); /* { dg-error {too many arguments to function 'svhistcnt_z'} } */
svhistcnt_z (0, s32, s32); /* { dg-error {passing 'int' to argument 1 of 'svhistcnt_z', which expects 'svbool_t'} } */
svhistcnt_z (s32, s32, s32); /* { dg-error {passing 'svint32_t' to argument 1 of 'svhistcnt_z', which expects 'svbool_t'} } */
- svhistcnt_z (pg, 0, s32); /* { dg-error {passing 'int' to argument 2 of 'svhistcnt_z', which expects an SVE vector type} } */
- svhistcnt_z (pg, pg, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svhistcnt_z', but previous arguments had type 'svbool_t'} } */
- svhistcnt_z (pg, s32, u32); /* { dg-error {passing 'svuint32_t' to argument 3 of 'svhistcnt_z', but previous arguments had type 'svint32_t'} } */
- svhistcnt_z (pg, s32, 0); /* { dg-error {passing 'int' to argument 3 of 'svhistcnt_z', which expects an SVE vector type} } */
+ svhistcnt_z (pg, 0, s32); /* { dg-error {passing 'int' to argument 2 of 'svhistcnt_z', which expects an SVE type rather than a scalar} } */
+ svhistcnt_z (pg, pg, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svhistcnt_z', but argument 2 had type 'svbool_t'} } */
+ svhistcnt_z (pg, s32, u32); /* { dg-error {passing 'svuint32_t' to argument 3 of 'svhistcnt_z', but argument 2 had type 'svint32_t'} } */
+ svhistcnt_z (pg, s32, 0); /* { dg-error {passing 'int' to argument 3 of 'svhistcnt_z', which expects an SVE type rather than a scalar} } */
svhistcnt_z (pg, pg, pg); /* { dg-error {'svhistcnt_z' has no form that takes 'svbool_t' arguments} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint64_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint64_n_1.c
index c8ca5f7..207552a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint64_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint64_n_1.c
@@ -7,7 +7,7 @@ f1 (svbool_t pg, svuint8_t u8, int i, float f)
{
svdupq_lane (u8); /* { dg-error {too few arguments to function 'svdupq_lane'} } */
svdupq_lane (u8, 0, 0); /* { dg-error {too many arguments to function 'svdupq_lane'} } */
- svdupq_lane (0, 0); /* { dg-error {passing 'int' to argument 1 of 'svdupq_lane', which expects an SVE vector type} } */
+ svdupq_lane (0, 0); /* { dg-error {passing 'int' to argument 1 of 'svdupq_lane', which expects an SVE type rather than a scalar} } */
svdupq_lane (u8, 0);
svdupq_lane (u8, -1);
svdupq_lane (u8, i);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint64_opt_n_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint64_opt_n_2.c
index be21739..c661a66 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint64_opt_n_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint64_opt_n_2.c
@@ -8,7 +8,7 @@ f1 (svbool_t pg, svuint8_t u8, svuint64_t u64)
svlsl_wide_x (pg, u8); /* { dg-error {too few arguments to function 'svlsl_wide_x'} } */
svlsl_wide_x (pg, u8, u8, u8); /* { dg-error {too many arguments to function 'svlsl_wide_x'} } */
svlsl_wide_x (u8, u8, u64); /* { dg-error {passing 'svuint8_t' to argument 1 of 'svlsl_wide_x', which expects 'svbool_t'} } */
- svlsl_wide_x (pg, 1, u64); /* { dg-error {passing 'int' to argument 2 of 'svlsl_wide_x', which expects an SVE vector type} } */
+ svlsl_wide_x (pg, 1, u64); /* { dg-error {passing 'int' to argument 2 of 'svlsl_wide_x', which expects an SVE type rather than a scalar} } */
svlsl_wide_x (pg, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svlsl_wide_x', which expects 'svuint64_t'} } */
svlsl_wide_x (pg, u64, u64); /* { dg-error {'svlsl_wide_x' has no form that takes 'svuint64_t' arguments} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_1.c
index 8f86c50..8493d5d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_1.c
@@ -11,7 +11,7 @@ f1 (svbool_t pg, svuint8_t u8, svint8_t s8, svuint16_t u16, svint16_t s16,
svtbl (pg, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svtbl', which expects a vector of unsigned integers} } */
svtbl (pg, u8); /* { dg-error {'svtbl' has no form that takes 'svbool_t' arguments} } */
- svtbl (u8, 0); /* { dg-error {passing 'int' to argument 2 of 'svtbl', which expects an SVE vector type} } */
+ svtbl (u8, 0); /* { dg-error {passing 'int' to argument 2 of 'svtbl', which expects an SVE type rather than a scalar} } */
svtbl (u8, u8);
svtbl (u8, s8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svtbl', which expects a vector of unsigned integers} } */
svtbl (u8, u16); /* { dg-error {arguments 1 and 2 of 'svtbl' must have the same element size, but the values passed here have type 'svuint8_t' and 'svuint16_t' respectively} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_n_1.c
index 36a902e..d74cb46 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_n_1.c
@@ -7,7 +7,7 @@ f1 (svbool_t pg, svuint8_t u8, int i, float f)
{
svdup_lane (u8); /* { dg-error {too few arguments to function 'svdup_lane'} } */
svdup_lane (u8, 0, 0); /* { dg-error {too many arguments to function 'svdup_lane'} } */
- svdup_lane (0, 0); /* { dg-error {passing 'int' to argument 1 of 'svdup_lane', which expects an SVE vector type} } */
+ svdup_lane (0, 0); /* { dg-error {passing 'int' to argument 1 of 'svdup_lane', which expects an SVE type rather than a scalar} } */
svdup_lane (u8, 0);
svdup_lane (u8, -1);
svdup_lane (u8, i);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_opt_n_1.c
index b162ab4..f44d7a9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_uint_opt_n_1.c
@@ -11,7 +11,7 @@ f1 (svbool_t pg, svfloat16_t f16, svint16_t s16, svuint16_t u16,
svlsl_x (s32, s32, u32); /* { dg-error {passing 'svint32_t' to argument 1 of 'svlsl_x', which expects 'svbool_t'} } */
svlsl_x (1, s32, u32); /* { dg-error {passing 'int' to argument 1 of 'svlsl_x', which expects 'svbool_t'} } */
svlsl_x (pg, pg, u16); /* { dg-error {'svlsl_x' has no form that takes 'svbool_t' arguments} } */
- svlsl_x (pg, 1, s16); /* { dg-error {passing 'int' to argument 2 of 'svlsl_x', which expects an SVE vector type} } */
+ svlsl_x (pg, 1, s16); /* { dg-error {passing 'int' to argument 2 of 'svlsl_x', which expects an SVE type rather than a scalar} } */
svlsl_x (pg, s16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svlsl_x', which expects a vector of unsigned integers} } */
svlsl_x (pg, s16, u16);
svlsl_x (pg, s16, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svlsl_x', which expects a vector of unsigned integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_wide_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_wide_1.c
index f58ab75..ba38361 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_wide_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_wide_1.c
@@ -30,8 +30,8 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svadalp_m (pg, s16, s8);
svadalp_m (pg, f32, f16); /* { dg-error {'svadalp_m' has no form that takes 'svfloat32_t' arguments} } */
svadalp_m (pg, f16, f32); /* { dg-error {'svadalp_m' has no form that takes 'svfloat16_t' arguments} } */
- svadalp_m (pg, 0, u32); /* { dg-error {passing 'int' to argument 2 of 'svadalp_m', which expects an SVE vector type} } */
- svadalp_m (pg, 0, u64); /* { dg-error {passing 'int' to argument 2 of 'svadalp_m', which expects an SVE vector type} } */
- svadalp_m (pg, u8, 0); /* { dg-error {passing 'int' to argument 3 of 'svadalp_m', which expects an SVE vector type} } */
- svadalp_m (pg, u16, 0); /* { dg-error {passing 'int' to argument 3 of 'svadalp_m', which expects an SVE vector type} } */
+ svadalp_m (pg, 0, u32); /* { dg-error {passing 'int' to argument 2 of 'svadalp_m', which expects an SVE type rather than a scalar} } */
+ svadalp_m (pg, 0, u64); /* { dg-error {passing 'int' to argument 2 of 'svadalp_m', which expects an SVE type rather than a scalar} } */
+ svadalp_m (pg, u8, 0); /* { dg-error {passing 'int' to argument 3 of 'svadalp_m', which expects an SVE type rather than a scalar} } */
+ svadalp_m (pg, u16, 0); /* { dg-error {passing 'int' to argument 3 of 'svadalp_m', which expects an SVE type rather than a scalar} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_wide_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_wide_opt_n_1.c
index 5a58211..fd27d85 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_wide_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_wide_opt_n_1.c
@@ -27,8 +27,8 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svaddwb (s16, s8);
svaddwb (f32, f16); /* { dg-error {'svaddwb' has no form that takes 'svfloat32_t' arguments} } */
svaddwb (f16, f32); /* { dg-error {'svaddwb' has no form that takes 'svfloat16_t' arguments} } */
- svaddwb (0, u32); /* { dg-error {passing 'int' to argument 1 of 'svaddwb', which expects an SVE vector type} } */
- svaddwb (0, u64); /* { dg-error {passing 'int' to argument 1 of 'svaddwb', which expects an SVE vector type} } */
+ svaddwb (0, u32); /* { dg-error {passing 'int' to argument 1 of 'svaddwb', which expects an SVE type rather than a scalar} } */
+ svaddwb (0, u64); /* { dg-error {passing 'int' to argument 1 of 'svaddwb', which expects an SVE type rather than a scalar} } */
svaddwb (u8, 0); /* { dg-error {'svaddwb' has no form that takes 'svuint8_t' arguments} } */
svaddwb (u16, 0);
svaddwb (u32, 0);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_int_m_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_int_m_1.c
new file mode 100644
index 0000000..fce1ef1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_int_m_1.c
@@ -0,0 +1,50 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("arch=armv9-a+sme")
+
+void
+f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
+ svint16_t s16, svuint16_t u16, svfloat16_t f16, uint32_t tile)
+ __arm_streaming __arm_inout("za")
+{
+ svusmopa_za32_m (0, pg, pg, u8); /* { dg-error {too few arguments to function 'svusmopa_za32_m'} } */
+ svusmopa_za32_m (0, pg, pg, u8, s8, 0); /* { dg-error {too many arguments to function 'svusmopa_za32_m'} } */
+ svusmopa_za32_m (tile, pg, pg, u8, s8); /* { dg-error {argument 1 of 'svusmopa_za32_m' must be an integer constant expression} } */
+ svusmopa_za32_m (-1, pg, pg, u8, s8); /* { dg-error {passing -1 to argument 1 of 'svusmopa_za32_m', which expects a value in the range \[0, 3\]} } */
+ svusmopa_za32_m (4, pg, pg, u8, s8); /* { dg-error {passing 4 to argument 1 of 'svusmopa_za32_m', which expects a value in the range \[0, 3\]} } */
+ svusmopa_za32_m (0, u8, pg, u8, s8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svusmopa_za32_m', which expects 'svbool_t'} } */
+ svusmopa_za32_m (0, pg, u8, u8, s8); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svusmopa_za32_m', which expects 'svbool_t'} } */
+ svusmopa_za32_m (0, pg, pg, tile, s8); /* { dg-error {passing 'uint32_t'.* to argument 4 of 'svusmopa_za32_m', which expects an SVE type} } */
+ svusmopa_za32_m (0, pg, pg, s8, s8); /* { dg-error {'svusmopa_za32_m' has no form that takes 'svint8_t' arguments} } */
+ svusmopa_za32_m (0, pg, pg, pg, s8); /* { dg-error {'svusmopa_za32_m' has no form that takes 'svbool_t' arguments} } */
+ svusmopa_za32_m (0, pg, pg, f16, s8); /* { dg-error {'svusmopa_za32_m' has no form that takes 'svfloat16_t' arguments} } */
+ svusmopa_za32_m (0, pg, pg, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 5 of 'svusmopa_za32_m', which expects a vector of signed integers} } */
+ svusmopa_za32_m (0, pg, pg, u8, s16); /* { dg-error {arguments 4 and 5 of 'svusmopa_za32_m' must have the same element size, but the values passed here have type 'svuint8_t' and 'svint16_t' respectively} } */
+ svusmopa_za32_m (0, pg, pg, u16, s16); /* { dg-error {'svusmopa_za32_m' has no form that takes 'svuint16_t' arguments} } */
+
+ svusmopa_za64_m (0, pg, pg, u16, s16); /* { dg-error {ACLE function 'svusmopa_za64_u16_m' requires ISA extension 'sme-i16i64'} } */
+}
+
+void
+f2 (svbool_t pg, svint8_t s8, svuint8_t u8) __arm_streaming
+{
+ svusmopa_za32_m (0, pg, pg, u8, s8); /* { dg-error {ACLE function 'svusmopa_za32_u8_m' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svbool_t pg, svint8_t s8, svuint8_t u8) __arm_inout("za")
+{
+ svusmopa_za32_m (0, pg, pg, u8, s8); /* { dg-error {ACLE function 'svusmopa_za32_u8_m' can only be called when SME streaming mode is enabled} } */
+}
+
+#pragma GCC target ("arch=armv9-a+sme-i16i64")
+
+void
+f4 (svbool_t pg, svint16_t s16, svuint16_t u16)
+ __arm_streaming __arm_inout("za")
+{
+ svusmopa_za64_m (-1, pg, pg, u16, s16); /* { dg-error {passing -1 to argument 1 of 'svusmopa_za64_m', which expects a value in the range \[0, 7\]} } */
+ svusmopa_za64_m (8, pg, pg, u16, s16); /* { dg-error {passing 8 to argument 1 of 'svusmopa_za64_m', which expects a value in the range \[0, 7\]} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_m_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_m_1.c
new file mode 100644
index 0000000..44c3e48
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_m_1.c
@@ -0,0 +1,48 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("arch=armv9-a+sme")
+
+void
+f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svint32_t s32,
+ svfloat16_t f16, svfloat32_t f32, svfloat64_t f64, uint32_t tile)
+ __arm_streaming __arm_inout("za")
+{
+ svmopa_za32_m (0, pg, pg, s8); /* { dg-error {too few arguments to function 'svmopa_za32_m'} } */
+ svmopa_za32_m (0, pg, pg, s8, s8, 0); /* { dg-error {too many arguments to function 'svmopa_za32_m'} } */
+ svmopa_za32_m (tile, pg, pg, s8, s8); /* { dg-error {argument 1 of 'svmopa_za32_m' must be an integer constant expression} } */
+ svmopa_za32_m (-1, pg, pg, s8, s8); /* { dg-error {passing -1 to argument 1 of 'svmopa_za32_m', which expects a value in the range \[0, 3\]} } */
+ svmopa_za32_m (4, pg, pg, s8, s8); /* { dg-error {passing 4 to argument 1 of 'svmopa_za32_m', which expects a value in the range \[0, 3\]} } */
+ svmopa_za32_m (0, u8, pg, s8, s8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svmopa_za32_m', which expects 'svbool_t'} } */
+ svmopa_za32_m (0, pg, u8, s8, s8); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svmopa_za32_m', which expects 'svbool_t'} } */
+ svmopa_za32_m (0, pg, pg, tile, s8); /* { dg-error {passing 'uint32_t'.* to argument 4 of 'svmopa_za32_m', which expects an SVE type} } */
+ svmopa_za32_m (0, pg, pg, u8, s8); /* { dg-error {passing 'svint8_t'.* to argument 5 of 'svmopa_za32_m', but argument 4 had type 'svuint8_t'} } */
+ svmopa_za32_m (0, pg, pg, s8, f16); /* { dg-error {passing 'svfloat16_t'.* to argument 5 of 'svmopa_za32_m', but argument 4 had type 'svint8_t'} } */
+ svmopa_za32_m (0, pg, pg, pg, pg); /* { dg-error {'svmopa_za32_m' has no form that takes 'svbool_t' arguments} } */
+ svmopa_za32_m (0, pg, pg, s32, s32); /* { dg-error {'svmopa_za32_m' has no form that takes 'svint32_t' arguments} } */
+ svmopa_za32_m (0, pg, pg, f64, f64); /* { dg-error {'svmopa_za32_m' has no form that takes 'svfloat64_t' arguments} } */
+
+ svmopa_za64_m (0, pg, pg, s16, s16); /* { dg-error {ACLE function 'svmopa_za64_s16_m' requires ISA extension 'sme-i16i64'} } */
+}
+
+void
+f2 (svbool_t pg, svint8_t s8) __arm_streaming
+{
+ svmopa_za32_m (0, pg, pg, s8, s8); /* { dg-error {ACLE function 'svmopa_za32_s8_m' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svbool_t pg, svint8_t s8) __arm_inout("za")
+{
+ svmopa_za32_m (0, pg, pg, s8, s8); /* { dg-error {ACLE function 'svmopa_za32_s8_m' can only be called when SME streaming mode is enabled} } */
+}
+
+#pragma GCC target ("arch=armv9-a+sme-i16i64")
+
+void
+f4 (svbool_t pg, svint16_t s16) __arm_streaming __arm_inout("za")
+{
+ svmopa_za64_m (-1, pg, pg, s16, s16); /* { dg-error {passing -1 to argument 1 of 'svmopa_za64_m', which expects a value in the range \[0, 7\]} } */
+ svmopa_za64_m (8, pg, pg, s16, s16); /* { dg-error {passing 8 to argument 1 of 'svmopa_za64_m', which expects a value in the range \[0, 7\]} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_m_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_m_2.c
new file mode 100644
index 0000000..dfc1b73
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_m_2.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("arch=armv9-a+sme")
+
+void
+f1 (svbool_t pg, svfloat64_t f64) __arm_streaming __arm_inout("za")
+{
+ svmopa_za64_m (0, pg, pg, f64, f64); /* { dg-error {ACLE function 'svmopa_za64_f64_m' requires ISA extension 'sme-f64f64'} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_int_opt_single_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_int_opt_single_1.c
new file mode 100644
index 0000000..01cd88f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_int_opt_single_1.c
@@ -0,0 +1,61 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svint16_t s16, svint8_t s8, svuint8_t u8,
+ svint16x2_t s16x2, svuint16x2_t u16x2, svint8x2_t s8x2, svuint8x2_t u8x2,
+ svint8x3_t s8x3, svuint8x3_t u8x3,
+ svint8x4_t s8x4, svuint8x4_t u8x4,
+ svint64x2_t s64x2, svuint64x2_t u64x2,
+ float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svusdot_za32_vg1x2 (1, u8x2); /* { dg-error {too few arguments to function 'svusdot_za32_vg1x2'} } */
+ svusdot_za32_vg1x2 (1, u8x2, s8x2, s8x2); /* { dg-error {too many arguments to function 'svusdot_za32_vg1x2'} } */
+
+ svusdot_za32_vg1x2 (s8x2, u8x2, s8x2); /* { dg-error {passing 'svint8x2_t' to argument 1 of 'svusdot_za32_vg1x2', which expects 'uint32_t'} } */
+ svusdot_za32_vg1x2 (f, u8x2, s8x2);
+ svusdot_za32_vg1x2 (d, u8x2, s8x2);
+ svusdot_za32_vg1x2 (pg, u8x2, s8x2); /* { dg-error {passing 'svbool_t' to argument 1 of 'svusdot_za32_vg1x2', which expects 'uint32_t'} } */
+
+ svusdot_za32_vg1x2 (1, 1, s8x2); /* { dg-error {passing 'int' to argument 2 of 'svusdot_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svusdot_za32_vg1x2 (1, pg, s8x2); /* { dg-error {passing 'svbool_t' to argument 2 of 'svusdot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svusdot_za32_vg1x2 (1, s8, s8x2); /* { dg-error {passing single vector 'svint8_t' to argument 2 of 'svusdot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svusdot_za32_vg1x2 (1, u8x3, s8x3); /* { dg-error {passing 'svuint8x3_t' to argument 2 of 'svusdot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svusdot_za32_vg1x2 (1, u8x4, s8x4); /* { dg-error {passing 'svuint8x4_t' to argument 2 of 'svusdot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+
+ svusdot_za32_vg1x2 (1, u8x2, 1); /* { dg-error {passing 'int' to argument 3 of 'svusdot_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svusdot_za32_vg1x2 (1, u8x2, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svusdot_za32_vg1x2', which expects a vector of signed integers} } */
+ svusdot_za32_vg1x2 (1, u8x2, s16); /* { dg-error {arguments 2 and 3 of 'svusdot_za32_vg1x2' must have the same element size, but the values passed here have type 'svuint8x2_t' and 'svint16_t' respectively} } */
+ svusdot_za32_vg1x2 (1, u8x2, s16x2); /* { dg-error {arguments 2 and 3 of 'svusdot_za32_vg1x2' must have the same element size, but the values passed here have type 'svuint8x2_t' and 'svint16x2_t' respectively} } */
+ svusdot_za32_vg1x2 (1, u8x2, s8);
+ svusdot_za32_vg1x2 (1, u8x2, s8x2);
+ svusdot_za32_vg1x2 (1, u8x2, s8x3); /* { dg-error {passing 'svint8x3_t' to argument 3 of 'svusdot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svusdot_za32_vg1x2 (1, u8x2, s8x4); /* { dg-error {passing 'svint8x4_t' to argument 3 of 'svusdot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svusdot_za32_vg1x2 (1, u8x2, u8); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svusdot_za32_vg1x2', which expects a vector of signed integers} } */
+ svusdot_za32_vg1x2 (1, u8x2, u8x2); /* { dg-error {passing 'svuint8x2_t' to argument 3 of 'svusdot_za32_vg1x2', which expects vectors of signed integers} } */
+ svusdot_za32_vg1x2 (1, u8x2, s8x3); /* { dg-error {passing 'svint8x3_t' to argument 3 of 'svusdot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svusdot_za32_vg1x2 (1, u8x2, s8x4); /* { dg-error {passing 'svint8x4_t' to argument 3 of 'svusdot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svusdot_za32_vg1x2 (1, s8x2, s8); /* { dg-error {'svusdot_za32_vg1x2' has no form that takes 'svint8x2_t' arguments} } */
+ svusdot_za32_vg1x2 (1, s8x2, s8x2); /* { dg-error {'svusdot_za32_vg1x2' has no form that takes 'svint8x2_t' arguments} } */
+
+ svusdot_za32_vg1x2 (1, u16x2, s16); /* { dg-error {'svusdot_za32_vg1x2' has no form that takes 'svuint16x2_t' arguments} } */
+ svusdot_za32_vg1x2 (1, u16x2, s16x2); /* { dg-error {'svusdot_za32_vg1x2' has no form that takes 'svuint16x2_t' arguments} } */
+ svusdot_za32_vg1x2 (1, s64x2, s64x2); /* { dg-error {'svusdot_za32_vg1x2' has no form that takes 'svint64x2_t' arguments} } */
+ svusdot_za32_vg1x2 (1, u64x2, s64x2); /* { dg-error {'svusdot_za32_vg1x2' has no form that takes 'svuint64x2_t' arguments} } */
+}
+
+void
+f2 (svint8x2_t s8x2, svuint8x2_t u8x2) __arm_streaming
+{
+ svusdot_za32_vg1x2 (0, u8x2, s8x2); /* { dg-error {ACLE function 'svusdot_za32_u8_vg1x2' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint8x2_t s8x2, svuint8x2_t u8x2) __arm_inout("za")
+{
+ svusdot_za32_vg1x2 (0, u8x2, s8x2); /* { dg-error {ACLE function 'svusdot_za32_u8_vg1x2' can only be called when SME streaming mode is enabled} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_1.c
new file mode 100644
index 0000000..937d992
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_1.c
@@ -0,0 +1,73 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svint16_t s16, svuint16_t u16, svint32_t s32, svuint32_t u32,
+ svint16x2_t s16x2, svuint16x2_t u16x2,
+ svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint16x3_t s16x3, svuint16x3_t u16x3,
+ float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svmla_lane_za32_vg2x1 (0, s16, s16); /* { dg-error {too few arguments to function 'svmla_lane_za32_vg2x1'} } */
+ svmla_lane_za32_vg2x1 (0, s16, s16, 0, 0); /* { dg-error {too many arguments to function 'svmla_lane_za32_vg2x1'} } */
+
+ svmla_lane_za32_vg2x1 (s16, s16, s16, 0); /* { dg-error {passing 'svint16_t' to argument 1 of 'svmla_lane_za32_vg2x1', which expects 'uint32_t'} } */
+ svmla_lane_za32_vg2x1 (f, s16, s16, 0);
+ svmla_lane_za32_vg2x1 (d, s16, s16, 0);
+ svmla_lane_za32_vg2x1 (pg, s16, s16, 0); /* { dg-error {passing 'svbool_t' to argument 1 of 'svmla_lane_za32_vg2x1', which expects 'uint32_t'} } */
+
+ svmla_lane_za32_vg2x1 (0, 1, s16, 0); /* { dg-error {passing 'int' to argument 2 of 'svmla_lane_za32_vg2x1', which expects an SVE type rather than a scalar type} } */
+ svmla_lane_za32_vg2x1 (0, pg, s16, 0); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmla_lane_za32_vg2x1', but argument 2 had type 'svbool_t'} } */
+ svmla_lane_za32_vg2x1 (0, s16x2, s16, 0); /* { dg-error {passing 'svint16x2_t' to argument 2 of 'svmla_lane_za32_vg2x1', which expects a single SVE vector rather than a tuple} } */
+ svmla_lane_za32_vg2x1 (0, s16x3, s16, 0); /* { dg-error {passing 'svint16x3_t' to argument 2 of 'svmla_lane_za32_vg2x1', which expects a single SVE vector rather than a tuple} } */
+
+ svmla_lane_za32_vg2x1 (0, s16, 1, 0); /* { dg-error {passing 'int' to argument 3 of 'svmla_lane_za32_vg2x1', which expects an SVE type rather than a scalar type} } */
+ svmla_lane_za32_vg2x1 (0, s16, pg, 0); /* { dg-error {passing 'svbool_t' to argument 3 of 'svmla_lane_za32_vg2x1', but argument 2 had type 'svint16_t'} } */
+ svmla_lane_za32_vg2x1 (0, s16, u16, 0); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svmla_lane_za32_vg2x1', but argument 2 had type 'svint16_t'} } */
+ svmla_lane_za32_vg2x1 (0, s16, s32, 0); /* { dg-error {passing 'svint32_t' to argument 3 of 'svmla_lane_za32_vg2x1', but argument 2 had type 'svint16_t'} } */
+ svmla_lane_za32_vg2x1 (0, s16, s16x2, 0); /* { dg-error {passing 'svint16x2_t' to argument 3 of 'svmla_lane_za32_vg2x1', which expects a single SVE vector rather than a tuple} } */
+ svmla_lane_za32_vg2x1 (0, u16, u16, 0);
+ svmla_lane_za32_vg2x1 (0, u16, s16, 0); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmla_lane_za32_vg2x1', but argument 2 had type 'svuint16_t'} } */
+ svmla_lane_za32_vg2x1 (0, s32, s32, 0); /* { dg-error {'svmla_lane_za32_vg2x1' has no form that takes 'svint32_t' arguments} } */
+ svmla_lane_za32_vg2x1 (0, u32, u32, 0); /* { dg-error {'svmla_lane_za32_vg2x1' has no form that takes 'svuint32_t' arguments} } */
+
+ svmla_lane_za32_vg2x1 (0, s16, s16, -1); /* { dg-error {passing -1 to argument 4 of 'svmla_lane_za32_vg2x1', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za32_vg2x1 (0, s16, s16, 7);
+ svmla_lane_za32_vg2x1 (0, s16, s16, 8); /* { dg-error {passing 8 to argument 4 of 'svmla_lane_za32_vg2x1', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za32_vg2x1 (0, s16, s16, f); /* { dg-error {argument 4 of 'svmla_lane_za32_vg2x1' must be an integer constant expression} } */
+}
+
+void
+f2 (svint16x2_t s16x2, svint16_t s16) __arm_streaming
+{
+ svmla_lane_za32_vg2x1 (0, s16, s16, 0); /* { dg-error {ACLE function 'svmla_lane_za32_s16_vg2x1' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint16x2_t s16x2, svint16_t s16) __arm_inout("za")
+{
+ svmla_lane_za32_vg2x1 (0, s16, s16, 0); /* { dg-error {ACLE function 'svmla_lane_za32_s16_vg2x1' can only be called when SME streaming mode is enabled} } */
+}
+
+#pragma GCC target ("+sme-i16i64")
+
+void
+f4 (svint16_t s16, svuint16_t u16,
+ svint32_t s32, svuint32_t u32,
+ svint64_t s64, svuint64_t u64)
+ __arm_streaming __arm_inout("za")
+{
+ svmla_lane_za64_vg4x1 (0, s16, s16, 0);
+ svmla_lane_za64_vg4x1 (0, u16, u16, 0);
+ svmla_lane_za64_vg4x1 (0, s16, s16, -1); /* { dg-error {passing -1 to argument 4 of 'svmla_lane_za64_vg4x1', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za64_vg4x1 (0, s16, s16, 7);
+ svmla_lane_za64_vg4x1 (0, u16, u16, 8); /* { dg-error {passing 8 to argument 4 of 'svmla_lane_za64_vg4x1', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za64_vg4x1 (0, s32, s32, 0); /* { dg-error {'svmla_lane_za64_vg4x1' has no form that takes 'svint32_t' arguments} } */
+ svmla_lane_za64_vg4x1 (0, u32, u32, 0); /* { dg-error {'svmla_lane_za64_vg4x1' has no form that takes 'svuint32_t' arguments} } */
+ svmla_lane_za64_vg4x1 (0, s64, s64, 0); /* { dg-error {'svmla_lane_za64_vg4x1' has no form that takes 'svint64_t' arguments} } */
+ svmla_lane_za64_vg4x1 (0, u64, u64, 0); /* { dg-error {'svmla_lane_za64_vg4x1' has no form that takes 'svuint64_t' arguments} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_2.c
new file mode 100644
index 0000000..126a764
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_2.c
@@ -0,0 +1,78 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svint16_t s16, svuint16_t u16, svint32_t s32, svuint32_t u32,
+ svint16x2_t s16x2, svuint16x2_t u16x2,
+ svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint16x3_t s16x3, svuint16x3_t u16x3,
+ svint16x4_t s16x4, svuint16x4_t u16x4,
+ float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svmla_lane_za32_vg2x2 (0, s16x2, s16); /* { dg-error {too few arguments to function 'svmla_lane_za32_vg2x2'} } */
+ svmla_lane_za32_vg2x2 (0, s16x2, s16, 0, 0); /* { dg-error {too many arguments to function 'svmla_lane_za32_vg2x2'} } */
+
+ svmla_lane_za32_vg2x2 (s16x2, s16x2, s16, 0); /* { dg-error {passing 'svint16x2_t' to argument 1 of 'svmla_lane_za32_vg2x2', which expects 'uint32_t'} } */
+ svmla_lane_za32_vg2x2 (f, s16x2, s16, 0);
+ svmla_lane_za32_vg2x2 (d, s16x2, s16, 0);
+ svmla_lane_za32_vg2x2 (pg, s16x2, s16, 0); /* { dg-error {passing 'svbool_t' to argument 1 of 'svmla_lane_za32_vg2x2', which expects 'uint32_t'} } */
+
+ svmla_lane_za32_vg2x2 (0, 1, s16, 0); /* { dg-error {passing 'int' to argument 2 of 'svmla_lane_za32_vg2x2', which expects an SVE type rather than a scalar type} } */
+ svmla_lane_za32_vg2x2 (0, pg, s16, 0); /* { dg-error {passing 'svbool_t' to argument 2 of 'svmla_lane_za32_vg2x2', which expects a tuple of 2 vectors} } */
+ svmla_lane_za32_vg2x2 (0, s16, s16, 0); /* { dg-error {passing single vector 'svint16_t' to argument 2 of 'svmla_lane_za32_vg2x2', which expects a tuple of 2 vectors} } */
+ svmla_lane_za32_vg2x2 (0, s16x3, s16, 0); /* { dg-error {passing 'svint16x3_t' to argument 2 of 'svmla_lane_za32_vg2x2', which expects a tuple of 2 vectors} } */
+ svmla_lane_za32_vg2x2 (0, s16x4, s16, 0); /* { dg-error {passing 'svint16x4_t' to argument 2 of 'svmla_lane_za32_vg2x2', which expects a tuple of 2 vectors} } */
+
+ svmla_lane_za32_vg2x2 (0, s16x2, 1, 0); /* { dg-error {passing 'int' to argument 3 of 'svmla_lane_za32_vg2x2', which expects an SVE type rather than a scalar type} } */
+ svmla_lane_za32_vg2x2 (0, s16x2, pg, 0); /* { dg-error {passing 'svbool_t' to argument 3 of 'svmla_lane_za32_vg2x2', but argument 2 was a tuple of 'svint16_t'} } */
+ svmla_lane_za32_vg2x2 (0, s16x2, u16, 0); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svmla_lane_za32_vg2x2', but argument 2 was a tuple of 'svint16_t'} } */
+ svmla_lane_za32_vg2x2 (0, s16x2, s32, 0); /* { dg-error {passing 'svint32_t' to argument 3 of 'svmla_lane_za32_vg2x2', but argument 2 was a tuple of 'svint16_t'} } */
+ svmla_lane_za32_vg2x2 (0, s16x2, s16x2, 0); /* { dg-error {passing 'svint16x2_t' to argument 3 of 'svmla_lane_za32_vg2x2', which expects a single SVE vector rather than a tuple} } */
+ svmla_lane_za32_vg2x2 (0, u16x2, u16, 0);
+ svmla_lane_za32_vg2x2 (0, u16x2, s16, 0); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmla_lane_za32_vg2x2', but argument 2 was a tuple of 'svuint16_t'} } */
+ svmla_lane_za32_vg2x2 (0, s32x2, s32, 0); /* { dg-error {'svmla_lane_za32_vg2x2' has no form that takes 'svint32x2_t' arguments} } */
+ svmla_lane_za32_vg2x2 (0, u32x2, u32, 0); /* { dg-error {'svmla_lane_za32_vg2x2' has no form that takes 'svuint32x2_t' arguments} } */
+
+ svmla_lane_za32_vg2x2 (0, s16x2, s16, -1); /* { dg-error {passing -1 to argument 4 of 'svmla_lane_za32_vg2x2', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za32_vg2x2 (0, s16x2, s16, 7);
+ svmla_lane_za32_vg2x2 (0, s16x2, s16, 8); /* { dg-error {passing 8 to argument 4 of 'svmla_lane_za32_vg2x2', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za32_vg2x2 (0, s16x2, s16, f); /* { dg-error {argument 4 of 'svmla_lane_za32_vg2x2' must be an integer constant expression} } */
+}
+
+void
+f2 (svint16x2_t s16x2, svint16_t s16) __arm_streaming
+{
+ svmla_lane_za32_vg2x2 (0, s16x2, s16, 0); /* { dg-error {ACLE function 'svmla_lane_za32_s16_vg2x2' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint16x2_t s16x2, svint16_t s16) __arm_inout("za")
+{
+ svmla_lane_za32_vg2x2 (0, s16x2, s16, 0); /* { dg-error {ACLE function 'svmla_lane_za32_s16_vg2x2' can only be called when SME streaming mode is enabled} } */
+}
+
+#pragma GCC target ("+sme-i16i64")
+
+void
+f4 (svint16_t s16, svuint16_t u16,
+ svint16x2_t s16x2, svuint16x2_t u16x2,
+ svint32_t s32, svuint32_t u32,
+ svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint64_t s64, svuint64_t u64,
+ svint64x2_t s64x2, svuint64x2_t u64x2)
+ __arm_streaming __arm_inout("za")
+{
+ svmla_lane_za64_vg4x2 (0, s16x2, s16, 0);
+ svmla_lane_za64_vg4x2 (0, u16x2, u16, 0);
+ svmla_lane_za64_vg4x2 (0, s16x2, s16, -1); /* { dg-error {passing -1 to argument 4 of 'svmla_lane_za64_vg4x2', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za64_vg4x2 (0, s16x2, s16, 7);
+ svmla_lane_za64_vg4x2 (0, u16x2, u16, 8); /* { dg-error {passing 8 to argument 4 of 'svmla_lane_za64_vg4x2', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za64_vg4x2 (0, s32x2, s32, 0); /* { dg-error {'svmla_lane_za64_vg4x2' has no form that takes 'svint32x2_t' arguments} } */
+ svmla_lane_za64_vg4x2 (0, u32x2, u32, 0); /* { dg-error {'svmla_lane_za64_vg4x2' has no form that takes 'svuint32x2_t' arguments} } */
+ svmla_lane_za64_vg4x2 (0, s64x2, s64, 0); /* { dg-error {'svmla_lane_za64_vg4x2' has no form that takes 'svint64x2_t' arguments} } */
+ svmla_lane_za64_vg4x2 (0, u64x2, u64, 0); /* { dg-error {'svmla_lane_za64_vg4x2' has no form that takes 'svuint64x2_t' arguments} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_3.c
new file mode 100644
index 0000000..17bed0c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_3.c
@@ -0,0 +1,78 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svint16_t s16, svuint16_t u16, svint32_t s32, svuint32_t u32,
+ svint16x2_t s16x2, svuint16x2_t u16x2,
+ svint16x3_t s16x3, svuint16x3_t u16x3,
+ svint16x4_t s16x4, svuint16x4_t u16x4,
+ svint32x4_t s32x4, svuint32x4_t u32x4,
+ float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svmla_lane_za32_vg2x4 (0, s16x4, s16); /* { dg-error {too few arguments to function 'svmla_lane_za32_vg2x4'} } */
+ svmla_lane_za32_vg2x4 (0, s16x4, s16, 0, 0); /* { dg-error {too many arguments to function 'svmla_lane_za32_vg2x4'} } */
+
+ svmla_lane_za32_vg2x4 (s16x4, s16x4, s16, 0); /* { dg-error {passing 'svint16x4_t' to argument 1 of 'svmla_lane_za32_vg2x4', which expects 'uint32_t'} } */
+ svmla_lane_za32_vg2x4 (f, s16x4, s16, 0);
+ svmla_lane_za32_vg2x4 (d, s16x4, s16, 0);
+ svmla_lane_za32_vg2x4 (pg, s16x4, s16, 0); /* { dg-error {passing 'svbool_t' to argument 1 of 'svmla_lane_za32_vg2x4', which expects 'uint32_t'} } */
+
+ svmla_lane_za32_vg2x4 (0, 1, s16, 0); /* { dg-error {passing 'int' to argument 2 of 'svmla_lane_za32_vg2x4', which expects an SVE type rather than a scalar type} } */
+ svmla_lane_za32_vg2x4 (0, pg, s16, 0); /* { dg-error {passing 'svbool_t' to argument 2 of 'svmla_lane_za32_vg2x4', which expects a tuple of 4 vectors} } */
+ svmla_lane_za32_vg2x4 (0, s16, s16, 0); /* { dg-error {passing single vector 'svint16_t' to argument 2 of 'svmla_lane_za32_vg2x4', which expects a tuple of 4 vectors} } */
+ svmla_lane_za32_vg2x4 (0, s16x2, s16, 0); /* { dg-error {passing 'svint16x2_t' to argument 2 of 'svmla_lane_za32_vg2x4', which expects a tuple of 4 vectors} } */
+ svmla_lane_za32_vg2x4 (0, s16x3, s16, 0); /* { dg-error {passing 'svint16x3_t' to argument 2 of 'svmla_lane_za32_vg2x4', which expects a tuple of 4 vectors} } */
+
+ svmla_lane_za32_vg2x4 (0, s16x4, 1, 0); /* { dg-error {passing 'int' to argument 3 of 'svmla_lane_za32_vg2x4', which expects an SVE type rather than a scalar type} } */
+ svmla_lane_za32_vg2x4 (0, s16x4, pg, 0); /* { dg-error {passing 'svbool_t' to argument 3 of 'svmla_lane_za32_vg2x4', but argument 2 was a tuple of 'svint16_t'} } */
+ svmla_lane_za32_vg2x4 (0, s16x4, u16, 0); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svmla_lane_za32_vg2x4', but argument 2 was a tuple of 'svint16_t'} } */
+ svmla_lane_za32_vg2x4 (0, s16x4, s32, 0); /* { dg-error {passing 'svint32_t' to argument 3 of 'svmla_lane_za32_vg2x4', but argument 2 was a tuple of 'svint16_t'} } */
+ svmla_lane_za32_vg2x4 (0, s16x4, s16x4, 0); /* { dg-error {passing 'svint16x4_t' to argument 3 of 'svmla_lane_za32_vg2x4', which expects a single SVE vector rather than a tuple} } */
+ svmla_lane_za32_vg2x4 (0, u16x4, u16, 0);
+ svmla_lane_za32_vg2x4 (0, u16x4, s16, 0); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmla_lane_za32_vg2x4', but argument 2 was a tuple of 'svuint16_t'} } */
+ svmla_lane_za32_vg2x4 (0, s32x4, s32, 0); /* { dg-error {'svmla_lane_za32_vg2x4' has no form that takes 'svint32x4_t' arguments} } */
+ svmla_lane_za32_vg2x4 (0, u32x4, u32, 0); /* { dg-error {'svmla_lane_za32_vg2x4' has no form that takes 'svuint32x4_t' arguments} } */
+
+ svmla_lane_za32_vg2x4 (0, s16x4, s16, -1); /* { dg-error {passing -1 to argument 4 of 'svmla_lane_za32_vg2x4', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za32_vg2x4 (0, s16x4, s16, 7);
+ svmla_lane_za32_vg2x4 (0, s16x4, s16, 8); /* { dg-error {passing 8 to argument 4 of 'svmla_lane_za32_vg2x4', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za32_vg2x4 (0, s16x4, s16, f); /* { dg-error {argument 4 of 'svmla_lane_za32_vg2x4' must be an integer constant expression} } */
+}
+
+void
+f2 (svint16x4_t s16x4, svint16_t s16) __arm_streaming
+{
+ svmla_lane_za32_vg2x4 (0, s16x4, s16, 0); /* { dg-error {ACLE function 'svmla_lane_za32_s16_vg2x4' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint16x4_t s16x4, svint16_t s16) __arm_inout("za")
+{
+ svmla_lane_za32_vg2x4 (0, s16x4, s16, 0); /* { dg-error {ACLE function 'svmla_lane_za32_s16_vg2x4' can only be called when SME streaming mode is enabled} } */
+}
+
+#pragma GCC target ("+sme-i16i64")
+
+void
+f4 (svint16_t s16, svuint16_t u16,
+ svint16x4_t s16x4, svuint16x4_t u16x4,
+ svint32_t s32, svuint32_t u32,
+ svint32x4_t s32x4, svuint32x4_t u32x4,
+ svint64_t s64, svuint64_t u64,
+ svint64x4_t s64x4, svuint64x4_t u64x4)
+ __arm_streaming __arm_inout("za")
+{
+ svmla_lane_za64_vg4x4 (0, s16x4, s16, 0);
+ svmla_lane_za64_vg4x4 (0, u16x4, u16, 0);
+ svmla_lane_za64_vg4x4 (0, s16x4, s16, -1); /* { dg-error {passing -1 to argument 4 of 'svmla_lane_za64_vg4x4', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za64_vg4x4 (0, s16x4, s16, 7);
+ svmla_lane_za64_vg4x4 (0, u16x4, u16, 8); /* { dg-error {passing 8 to argument 4 of 'svmla_lane_za64_vg4x4', which expects a value in the range \[0, 7\]} } */
+ svmla_lane_za64_vg4x4 (0, s32x4, s32, 0); /* { dg-error {'svmla_lane_za64_vg4x4' has no form that takes 'svint32x4_t' arguments} } */
+ svmla_lane_za64_vg4x4 (0, u32x4, u32, 0); /* { dg-error {'svmla_lane_za64_vg4x4' has no form that takes 'svuint32x4_t' arguments} } */
+ svmla_lane_za64_vg4x4 (0, s64x4, s64, 0); /* { dg-error {'svmla_lane_za64_vg4x4' has no form that takes 'svint64x4_t' arguments} } */
+ svmla_lane_za64_vg4x4 (0, u64x4, u64, 0); /* { dg-error {'svmla_lane_za64_vg4x4' has no form that takes 'svuint64x4_t' arguments} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_4.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_4.c
new file mode 100644
index 0000000..d2a67c6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_lane_4.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
+ svint32_t s32, svuint32_t u32, svfloat32_t f32,
+ svint32x2_t s32x2, svuint32x2_t u32x2, svfloat32x2_t f32x2, int i)
+ __arm_streaming __arm_inout("za")
+{
+ svmla_lane_za32_vg4x1 (0, s8, s8, -1); /* { dg-error {passing -1 to argument 4 of 'svmla_lane_za32_vg4x1', which expects a value in the range \[0, 15\]} } */
+ svmla_lane_za32_vg4x1 (0, u8, u8, 0);
+ svmla_lane_za32_vg4x1 (0, s8, s8, 15);
+ svmla_lane_za32_vg4x1 (0, u8, u8, 16); /* { dg-error {passing 16 to argument 4 of 'svmla_lane_za32_vg4x1', which expects a value in the range \[0, 15\]} } */
+ svmla_lane_za32_vg4x1 (0, s16, s16, 0); /* { dg-error {'svmla_lane_za32_vg4x1' has no form that takes 'svint16_t' arguments} } */
+ svmla_lane_za32_vg4x1 (0, u16, u16, 0); /* { dg-error {'svmla_lane_za32_vg4x1' has no form that takes 'svuint16_t' arguments} } */
+
+ svmla_lane_za32_vg1x2 (0, s32x2, s32, 0); /* { dg-error {'svmla_lane_za32_vg1x2' has no form that takes 'svint32x2_t' arguments} } */
+ svmla_lane_za32_vg1x2 (0, u32x2, u32, 0); /* { dg-error {'svmla_lane_za32_vg1x2' has no form that takes 'svuint32x2_t' arguments} } */
+ svmla_lane_za32_vg1x2 (0, f32x2, f32, 0);
+ svmla_lane_za32_vg1x2 (0, f32x2, f32, -1); /* { dg-error {passing -1 to argument 4 of 'svmla_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svmla_lane_za32_vg1x2 (0, f32x2, f32, 4); /* { dg-error {passing 4 to argument 4 of 'svmla_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svmla_lane_za32_vg1x2 (0, f32x2, f32, i); /* { dg-error {argument 4 of 'svmla_lane_za32_vg1x2' must be an integer constant expression} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_1.c
new file mode 100644
index 0000000..8307a28
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_1.c
@@ -0,0 +1,76 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svint16_t s16, svint32_t s32, svuint32_t u32,
+ svint16x2_t s16x2, svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint32x3_t s32x3, svuint32x3_t u32x3,
+ svint32x4_t s32x4, svuint32x4_t u32x4,
+ svint64x2_t s64x2, svuint64x2_t u64x2,
+ float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svadd_write_za32_vg1x2 (1, s32x2); /* { dg-error {too few arguments to function 'svadd_write_za32_vg1x2'} } */
+ svadd_write_za32_vg1x2 (1, s32x2, s32x2, s32x2); /* { dg-error {too many arguments to function 'svadd_write_za32_vg1x2'} } */
+
+ svadd_write_za32_vg1x2 (s32x2, s32x2, s32x2); /* { dg-error {passing 'svint32x2_t' to argument 1 of 'svadd_write_za32_vg1x2', which expects 'uint32_t'} } */
+ svadd_write_za32_vg1x2 (f, s32x2, s32x2);
+ svadd_write_za32_vg1x2 (d, s32x2, s32x2);
+ svadd_write_za32_vg1x2 (pg, s32x2, s32x2); /* { dg-error {passing 'svbool_t' to argument 1 of 'svadd_write_za32_vg1x2', which expects 'uint32_t'} } */
+
+ svadd_write_za32_vg1x2 (1, 1, s32x2); /* { dg-error {passing 'int' to argument 2 of 'svadd_write_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svadd_write_za32_vg1x2 (1, pg, s32x2); /* { dg-error {passing 'svbool_t' to argument 2 of 'svadd_write_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svadd_write_za32_vg1x2 (1, s32, s32x2); /* { dg-error {passing single vector 'svint32_t' to argument 2 of 'svadd_write_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svadd_write_za32_vg1x2 (1, s32x3, s32x3); /* { dg-error {passing 'svint32x3_t' to argument 2 of 'svadd_write_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svadd_write_za32_vg1x2 (1, s32x4, s32x4); /* { dg-error {passing 'svint32x4_t' to argument 2 of 'svadd_write_za32_vg1x2', which expects a tuple of 2 vectors} } */
+
+ svadd_write_za32_vg1x2 (1, s32x2, 1); /* { dg-error {passing 'int' to argument 3 of 'svadd_write_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svadd_write_za32_vg1x2 (1, s32x2, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svadd_write_za32_vg1x2', but argument 2 was a tuple of 'svint32_t'} } */
+ svadd_write_za32_vg1x2 (1, s32x2, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svadd_write_za32_vg1x2', but argument 2 was a tuple of 'svint32_t'} } */
+ svadd_write_za32_vg1x2 (1, s32x2, s16x2); /* { dg-error {passing 'svint16x2_t' to argument 3 of 'svadd_write_za32_vg1x2', but argument 2 had type 'svint32x2_t'} } */
+ svadd_write_za32_vg1x2 (1, s32x2, s32);
+ svadd_write_za32_vg1x2 (1, s32x2, s32x2);
+ svadd_write_za32_vg1x2 (1, s32x2, s32x3); /* { dg-error {passing 'svint32x3_t' to argument 3 of 'svadd_write_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svadd_write_za32_vg1x2 (1, s32x2, s32x4); /* { dg-error {passing 'svint32x4_t' to argument 3 of 'svadd_write_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svadd_write_za32_vg1x2 (1, s32x2, u32); /* { dg-error {passing 'svuint32_t' to argument 3 of 'svadd_write_za32_vg1x2', but argument 2 was a tuple of 'svint32_t'} } */
+ svadd_write_za32_vg1x2 (1, s32x2, u32x2); /* { dg-error {passing 'svuint32x2_t' to argument 3 of 'svadd_write_za32_vg1x2', but argument 2 had type 'svint32x2_t'} } */
+ svadd_write_za32_vg1x2 (1, s32x2, u32x3); /* { dg-error {passing 'svuint32x3_t' to argument 3 of 'svadd_write_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svadd_write_za32_vg1x2 (1, s32x2, u32x4); /* { dg-error {passing 'svuint32x4_t' to argument 3 of 'svadd_write_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svadd_write_za32_vg1x2 (1, u32x2, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svadd_write_za32_vg1x2', but argument 2 was a tuple of 'svuint32_t'} } */
+ svadd_write_za32_vg1x2 (1, u32x2, s32x2); /* { dg-error {passing 'svint32x2_t' to argument 3 of 'svadd_write_za32_vg1x2', but argument 2 had type 'svuint32x2_t'} } */
+ svadd_write_za32_vg1x2 (1, u32x2, u32);
+ svadd_write_za32_vg1x2 (1, u32x2, u32x2);
+
+ svadd_write_za32_vg1x2 (1, s16x2, s16); /* { dg-error {'svadd_write_za32_vg1x2' has no form that takes 'svint16x2_t' arguments} } */
+ svadd_write_za32_vg1x2 (1, s16x2, s16x2); /* { dg-error {'svadd_write_za32_vg1x2' has no form that takes 'svint16x2_t' arguments} } */
+ svadd_write_za32_vg1x2 (1, s64x2, s64x2); /* { dg-error {'svadd_write_za32_vg1x2' has no form that takes 'svint64x2_t' arguments} } */
+ svadd_write_za32_vg1x2 (1, u64x2, u64x2); /* { dg-error {'svadd_write_za32_vg1x2' has no form that takes 'svuint64x2_t' arguments} } */
+}
+
+void
+f2 (svint32x2_t s32x2) __arm_streaming
+{
+ svadd_write_za32_vg1x2 (0, s32x2, s32x2); /* { dg-error {ACLE function 'svadd_write_za32_s32_vg1x2' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint32x2_t s32x2) __arm_inout("za")
+{
+ svadd_write_za32_vg1x2 (0, s32x2, s32x2); /* { dg-error {ACLE function 'svadd_write_za32_s32_vg1x2' can only be called when SME streaming mode is enabled} } */
+}
+
+#pragma GCC target ("+sme-i16i64")
+
+void
+f4 (svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint64x2_t s64x2, svuint64x2_t u64x2)
+ __arm_streaming __arm_inout("za")
+{
+ svadd_write_za64_vg1x2 (1, s32x2, s32x2); /* { dg-error {'svadd_write_za64_vg1x2' has no form that takes 'svint32x2_t' arguments} } */
+ svadd_write_za64_vg1x2 (1, u32x2, u32x2); /* { dg-error {'svadd_write_za64_vg1x2' has no form that takes 'svuint32x2_t' arguments} } */
+ svadd_write_za64_vg1x2 (1, s64x2, s64x2);
+ svadd_write_za64_vg1x2 (1, u64x2, u64x2);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_2.c
new file mode 100644
index 0000000..181f509
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_2.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svint16_t s16, svint32_t s32, svuint32_t u32,
+ svint16x2_t s16x2, svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint32x3_t s32x3, svuint32x3_t u32x3,
+ svint32x4_t s32x4, svuint32x4_t u32x4,
+ svint64x2_t s64x2, svuint64x2_t u64x2,
+ float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svadd_write_za32_vg1x4 (1, s32x4); /* { dg-error {too few arguments to function 'svadd_write_za32_vg1x4'} } */
+ svadd_write_za32_vg1x4 (1, s32x4, s32x4, s32x4); /* { dg-error {too many arguments to function 'svadd_write_za32_vg1x4'} } */
+
+ svadd_write_za32_vg1x4 (s32x4, s32x4, s32x4); /* { dg-error {passing 'svint32x4_t' to argument 1 of 'svadd_write_za32_vg1x4', which expects 'uint32_t'} } */
+ svadd_write_za32_vg1x4 (f, s32x4, s32x4);
+ svadd_write_za32_vg1x4 (d, s32x4, s32x4);
+ svadd_write_za32_vg1x4 (pg, s32x4, s32x4); /* { dg-error {passing 'svbool_t' to argument 1 of 'svadd_write_za32_vg1x4', which expects 'uint32_t'} } */
+
+ svadd_write_za32_vg1x4 (1, 1, s32x4); /* { dg-error {passing 'int' to argument 2 of 'svadd_write_za32_vg1x4', which expects an SVE type rather than a scalar} } */
+ svadd_write_za32_vg1x4 (1, pg, s32x4); /* { dg-error {passing 'svbool_t' to argument 2 of 'svadd_write_za32_vg1x4', which expects a tuple of 4 vectors} } */
+ svadd_write_za32_vg1x4 (1, s32, s32x4); /* { dg-error {passing single vector 'svint32_t' to argument 2 of 'svadd_write_za32_vg1x4', which expects a tuple of 4 vectors} } */
+ svadd_write_za32_vg1x4 (1, s32x2, s32x2); /* { dg-error {passing 'svint32x2_t' to argument 2 of 'svadd_write_za32_vg1x4', which expects a tuple of 4 vectors} } */
+ svadd_write_za32_vg1x4 (1, s32x3, s32x3); /* { dg-error {passing 'svint32x3_t' to argument 2 of 'svadd_write_za32_vg1x4', which expects a tuple of 4 vectors} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_3.c
new file mode 100644
index 0000000..8c8414e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_opt_single_3.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2+nosme-i16i64")
+
+void
+f1 (svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint64x2_t s64x2, svuint64x2_t u64x2)
+ __arm_streaming __arm_inout("za")
+{
+ svadd_write_za64_vg1x2 (1, s32x2, s32x2); /* { dg-error {'svadd_write_za64_vg1x2' has no form that takes 'svint32x2_t' arguments} } */
+ svadd_write_za64_vg1x2 (1, u32x2, u32x2); /* { dg-error {'svadd_write_za64_vg1x2' has no form that takes 'svuint32x2_t' arguments} } */
+ svadd_write_za64_vg1x2 (1, s64x2, s64x2); /* { dg-error {ACLE function 'svadd_write_za64_s64_vg1x2' requires ISA extension 'sme-i16i64'} } */
+ svadd_write_za64_vg1x2 (1, u64x2, u64x2);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_uint_opt_single_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_uint_opt_single_1.c
new file mode 100644
index 0000000..b00c043
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_slice_uint_opt_single_1.c
@@ -0,0 +1,61 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svuint16_t u16, svint8_t s8, svuint8_t u8,
+ svint16x2_t s16x2, svuint16x2_t u16x2, svint8x2_t s8x2, svuint8x2_t u8x2,
+ svint8x3_t s8x3, svuint8x3_t u8x3,
+ svint8x4_t s8x4, svuint8x4_t u8x4,
+ svint64x2_t s64x2, svuint64x2_t u64x2,
+ float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svsudot_za32_vg1x2 (1, s8x2); /* { dg-error {too few arguments to function 'svsudot_za32_vg1x2'} } */
+ svsudot_za32_vg1x2 (1, s8x2, u8x2, u8x2); /* { dg-error {too many arguments to function 'svsudot_za32_vg1x2'} } */
+
+ svsudot_za32_vg1x2 (s8x2, s8x2, u8x2); /* { dg-error {passing 'svint8x2_t' to argument 1 of 'svsudot_za32_vg1x2', which expects 'uint32_t'} } */
+ svsudot_za32_vg1x2 (f, s8x2, u8x2);
+ svsudot_za32_vg1x2 (d, s8x2, u8x2);
+ svsudot_za32_vg1x2 (pg, s8x2, u8x2); /* { dg-error {passing 'svbool_t' to argument 1 of 'svsudot_za32_vg1x2', which expects 'uint32_t'} } */
+
+ svsudot_za32_vg1x2 (1, 1, u8x2); /* { dg-error {passing 'int' to argument 2 of 'svsudot_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svsudot_za32_vg1x2 (1, pg, u8x2); /* { dg-error {passing 'svbool_t' to argument 2 of 'svsudot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svsudot_za32_vg1x2 (1, s8, u8x2); /* { dg-error {passing single vector 'svint8_t' to argument 2 of 'svsudot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svsudot_za32_vg1x2 (1, s8x3, u8x3); /* { dg-error {passing 'svint8x3_t' to argument 2 of 'svsudot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svsudot_za32_vg1x2 (1, s8x4, u8x4); /* { dg-error {passing 'svint8x4_t' to argument 2 of 'svsudot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+
+ svsudot_za32_vg1x2 (1, s8x2, 1); /* { dg-error {passing 'int' to argument 3 of 'svsudot_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svsudot_za32_vg1x2 (1, s8x2, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svsudot_za32_vg1x2', which expects a vector of unsigned integers} } */
+ svsudot_za32_vg1x2 (1, s8x2, u16); /* { dg-error {arguments 2 and 3 of 'svsudot_za32_vg1x2' must have the same element size, but the values passed here have type 'svint8x2_t' and 'svuint16_t' respectively} } */
+ svsudot_za32_vg1x2 (1, s8x2, u16x2); /* { dg-error {arguments 2 and 3 of 'svsudot_za32_vg1x2' must have the same element size, but the values passed here have type 'svint8x2_t' and 'svuint16x2_t' respectively} } */
+ svsudot_za32_vg1x2 (1, s8x2, u8);
+ svsudot_za32_vg1x2 (1, s8x2, u8x2);
+ svsudot_za32_vg1x2 (1, s8x2, u8x3); /* { dg-error {passing 'svuint8x3_t' to argument 3 of 'svsudot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svsudot_za32_vg1x2 (1, s8x2, u8x4); /* { dg-error {passing 'svuint8x4_t' to argument 3 of 'svsudot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svsudot_za32_vg1x2 (1, s8x2, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svsudot_za32_vg1x2', which expects a vector of unsigned integers} } */
+ svsudot_za32_vg1x2 (1, s8x2, s8x2); /* { dg-error {passing 'svint8x2_t' to argument 3 of 'svsudot_za32_vg1x2', which expects vectors of unsigned integers} } */
+ svsudot_za32_vg1x2 (1, s8x2, u8x3); /* { dg-error {passing 'svuint8x3_t' to argument 3 of 'svsudot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svsudot_za32_vg1x2 (1, s8x2, u8x4); /* { dg-error {passing 'svuint8x4_t' to argument 3 of 'svsudot_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svsudot_za32_vg1x2 (1, u8x2, u8); /* { dg-error {'svsudot_za32_vg1x2' has no form that takes 'svuint8x2_t' arguments} } */
+ svsudot_za32_vg1x2 (1, u8x2, u8x2); /* { dg-error {'svsudot_za32_vg1x2' has no form that takes 'svuint8x2_t' arguments} } */
+
+ svsudot_za32_vg1x2 (1, s16x2, u16); /* { dg-error {'svsudot_za32_vg1x2' has no form that takes 'svint16x2_t' arguments} } */
+ svsudot_za32_vg1x2 (1, s16x2, u16x2); /* { dg-error {'svsudot_za32_vg1x2' has no form that takes 'svint16x2_t' arguments} } */
+ svsudot_za32_vg1x2 (1, s64x2, u64x2); /* { dg-error {'svsudot_za32_vg1x2' has no form that takes 'svint64x2_t' arguments} } */
+ svsudot_za32_vg1x2 (1, u64x2, u64x2); /* { dg-error {'svsudot_za32_vg1x2' has no form that takes 'svuint64x2_t' arguments} } */
+}
+
+void
+f2 (svint8x2_t s8x2, svuint8x2_t u8x2) __arm_streaming
+{
+ svsudot_za32_vg1x2 (0, s8x2, u8x2); /* { dg-error {ACLE function 'svsudot_za32_s8_vg1x2' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint8x2_t s8x2, svuint8x2_t u8x2) __arm_inout("za")
+{
+ svsudot_za32_vg1x2 (0, s8x2, u8x2); /* { dg-error {ACLE function 'svsudot_za32_s8_vg1x2' can only be called when SME streaming mode is enabled} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_uint_m_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_uint_m_1.c
new file mode 100644
index 0000000..555f95a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binary_za_uint_m_1.c
@@ -0,0 +1,50 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("arch=armv9-a+sme")
+
+void
+f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
+ svint16_t s16, svuint16_t u16, svfloat16_t f16, uint32_t tile)
+ __arm_streaming __arm_inout("za")
+{
+ svsumopa_za32_m (0, pg, pg, s8); /* { dg-error {too few arguments to function 'svsumopa_za32_m'} } */
+ svsumopa_za32_m (0, pg, pg, s8, u8, 0); /* { dg-error {too many arguments to function 'svsumopa_za32_m'} } */
+ svsumopa_za32_m (tile, pg, pg, s8, u8); /* { dg-error {argument 1 of 'svsumopa_za32_m' must be an integer constant expression} } */
+ svsumopa_za32_m (-1, pg, pg, s8, u8); /* { dg-error {passing -1 to argument 1 of 'svsumopa_za32_m', which expects a value in the range \[0, 3\]} } */
+ svsumopa_za32_m (4, pg, pg, s8, u8); /* { dg-error {passing 4 to argument 1 of 'svsumopa_za32_m', which expects a value in the range \[0, 3\]} } */
+ svsumopa_za32_m (0, u8, pg, s8, u8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svsumopa_za32_m', which expects 'svbool_t'} } */
+ svsumopa_za32_m (0, pg, u8, s8, u8); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svsumopa_za32_m', which expects 'svbool_t'} } */
+ svsumopa_za32_m (0, pg, pg, tile, s8); /* { dg-error {passing 'uint32_t'.* to argument 4 of 'svsumopa_za32_m', which expects an SVE type} } */
+ svsumopa_za32_m (0, pg, pg, u8, u8); /* { dg-error {'svsumopa_za32_m' has no form that takes 'svuint8_t' arguments} } */
+ svsumopa_za32_m (0, pg, pg, pg, u8); /* { dg-error {'svsumopa_za32_m' has no form that takes 'svbool_t' arguments} } */
+ svsumopa_za32_m (0, pg, pg, f16, u8); /* { dg-error {'svsumopa_za32_m' has no form that takes 'svfloat16_t' arguments} } */
+ svsumopa_za32_m (0, pg, pg, s8, s8); /* { dg-error {passing 'svint8_t' to argument 5 of 'svsumopa_za32_m', which expects a vector of unsigned integers} } */
+ svsumopa_za32_m (0, pg, pg, s8, u16); /* { dg-error {arguments 4 and 5 of 'svsumopa_za32_m' must have the same element size, but the values passed here have type 'svint8_t' and 'svuint16_t' respectively} } */
+ svsumopa_za32_m (0, pg, pg, s16, u16); /* { dg-error {'svsumopa_za32_m' has no form that takes 'svint16_t' arguments} } */
+
+ svsumopa_za64_m (0, pg, pg, s16, u16); /* { dg-error {ACLE function 'svsumopa_za64_s16_m' requires ISA extension 'sme-i16i64'} } */
+}
+
+void
+f2 (svbool_t pg, svint8_t s8, svuint8_t u8) __arm_streaming
+{
+ svsumopa_za32_m (0, pg, pg, s8, u8); /* { dg-error {ACLE function 'svsumopa_za32_s8_m' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svbool_t pg, svint8_t s8, svuint8_t u8) __arm_inout("za")
+{
+ svsumopa_za32_m (0, pg, pg, s8, u8); /* { dg-error {ACLE function 'svsumopa_za32_s8_m' can only be called when SME streaming mode is enabled} } */
+}
+
+#pragma GCC target ("arch=armv9-a+sme-i16i64")
+
+void
+f4 (svbool_t pg, svint16_t s16, svuint16_t u16)
+ __arm_streaming __arm_inout("za")
+{
+ svsumopa_za64_m (-1, pg, pg, s16, u16); /* { dg-error {passing -1 to argument 1 of 'svsumopa_za64_m', which expects a value in the range \[0, 7\]} } */
+ svsumopa_za64_m (8, pg, pg, s16, u16); /* { dg-error {passing 8 to argument 1 of 'svsumopa_za64_m', which expects a value in the range \[0, 7\]} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binaryxn_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binaryxn_1.c
new file mode 100644
index 0000000..98b2433
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binaryxn_1.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+
+void
+f1 (svbool_t pg, svcount_t pn, svuint8_t u8, svint16_t s16,
+ svuint8x2_t u8x2, svuint8x3_t u8x3, svuint8x4_t u8x4)
+{
+ svsel (pg, u8); /* { dg-error {too few arguments to function 'svsel'} } */
+ svsel (pg, u8, u8, u8); /* { dg-error {too many arguments to function 'svsel'} } */
+ svsel (0, u8, u8); /* { dg-error {passing 'int' to argument 1 of 'svsel', which expects an 'svbool_t' or 'svcount_t'} } */
+ svsel (u8, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 1 of 'svsel', which expects an 'svbool_t' or 'svcount_t'} } */
+ svsel (pn, u8, u8); /* { dg-error {operations on single vectors must be predicated by 'svbool_t' rather than 'svcount_t'} } */
+ svsel (pg, pg, u8); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svsel', but argument 2 had type 'svbool_t'} } */
+ svsel (pg, u8, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svsel', but argument 2 had type 'svuint8_t'} } */
+ svsel (pg, u8, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svsel', but argument 2 had type 'svuint8_t'} } */
+ svsel (pg, u8, 0); /* { dg-error {passing 'int' to argument 3 of 'svsel', which expects an SVE type rather than a scalar} } */
+ svsel (pg, pg, pg);
+ svsel (pg, u8, u8);
+ svsel (pg, u8, u8x2); /* { dg-error {passing tuple 'svuint8x2_t' to argument 3 of 'svsel' after passing single vector 'svuint8_t' to argument 2} } */
+ svsel (pg, u8, u8x3); /* { dg-error {passing tuple 'svuint8x3_t' to argument 3 of 'svsel' after passing single vector 'svuint8_t' to argument 2} } */
+ svsel (pg, u8, u8x4); /* { dg-error {passing tuple 'svuint8x4_t' to argument 3 of 'svsel' after passing single vector 'svuint8_t' to argument 2} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binaryxn_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binaryxn_2.c
new file mode 100644
index 0000000..600b7fc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/binaryxn_2.c
@@ -0,0 +1,33 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sme2"
+
+void
+f1 (svbool_t pg, svcount_t pn, svuint8_t u8, svint16_t s16,
+ svint8x2_t s8x2, svint8x3_t s8x3, svint8x4_t s8x4,
+ svuint8x2_t u8x2, svuint8x3_t u8x3, svuint8x4_t u8x4,
+ svuint16x2_t u16x2) __arm_streaming
+{
+ svsel (pn, u8x2); /* { dg-error {too few arguments to function 'svsel'} } */
+ svsel (pn, u8x2, u8x2, u8x2); /* { dg-error {too many arguments to function 'svsel'} } */
+ svsel (0, u8x2, u8x2); /* { dg-error {passing 'int' to argument 1 of 'svsel', which expects an 'svbool_t' or 'svcount_t'} } */
+ svsel (u8x2, u8x2, u8x2); /* { dg-error {passing 'svuint8x2_t' to argument 1 of 'svsel', which expects an 'svbool_t' or 'svcount_t'} } */
+ svsel (pg, u8x2, u8x2); /* { dg-error {operations on multiple vectors must be predicated by 'svcount_t' rather than 'svbool_t'} } */
+ svsel (pn, u8x2, s8x2); /* { dg-error {passing 'svint8x2_t' to argument 3 of 'svsel', but argument 2 had type 'svuint8x2_t'} } */
+ svsel (pn, u8x2, u16x2); /* { dg-error {passing 'svuint16x2_t' to argument 3 of 'svsel', but argument 2 had type 'svuint8x2_t'} } */
+ svsel (pn, u8x2, 0); /* { dg-error {passing 'int' to argument 3 of 'svsel', which expects an SVE type rather than a scalar} } */
+ svsel (pn, u8x2, u8); /* { dg-error {passing single vector 'svuint8_t' to argument 3 of 'svsel' after passing tuple 'svuint8x2_t' to argument 2} } */
+ svsel (pn, u8x2, u8x2);
+ svsel (pn, u8x2, u8x3); /* { dg-error {passing mismatched tuple types 'svuint8x2_t' and 'svuint8x3_t' to arguments 2 and 3 of 'svsel'} } */
+ svsel (pn, u8x2, s8x3); /* { dg-error {passing mismatched tuple types 'svuint8x2_t' and 'svint8x3_t' to arguments 2 and 3 of 'svsel'} } */
+ svsel (pn, u8x2, u8x4); /* { dg-error {passing mismatched tuple types 'svuint8x2_t' and 'svuint8x4_t' to arguments 2 and 3 of 'svsel'} } */
+ svsel (pn, s8x4, s8x2); /* { dg-error {passing mismatched tuple types 'svint8x4_t' and 'svint8x2_t' to arguments 2 and 3 of 'svsel'} } */
+}
+
+void
+f2 (svcount_t pn, svuint8x2_t u8x2)
+{
+ svsel (pn, u8x2, u8x2); /* { dg-error {ACLE function 'svsel_u8_x2' can only be called when SME streaming mode is enabled} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/clamp_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/clamp_1.c
new file mode 100644
index 0000000..342bebc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/clamp_1.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sme2"
+
+void
+f1 (svcount_t pn, svfloat16_t f16, svint16_t s16, svfloat32_t f32,
+ svfloat16x2_t f16x2, svfloat16x3_t f16x3, svfloat16x4_t f16x4)
+ __arm_streaming
+{
+ svclamp (f16, f16); /* { dg-error {too few arguments to function 'svclamp'} } */
+ svclamp (f16, f16, f16, f16); /* { dg-error {too many arguments to function 'svclamp'} } */
+ svclamp (0, f16, f16); /* { dg-error {passing 'int' to argument 1 of 'svclamp', which expects an SVE type rather than a scalar type} } */
+ svclamp (f16, f16, f16);
+ svclamp (s16, s16, s16); /* { dg-error {'svclamp' has no form that takes 'svint16_t' arguments} } */
+ svclamp (pn, f16, f16); /* { dg-error {passing 'svfloat16_t' to argument 2 of 'svclamp', but argument 1 had type 'svcount_t'} } */
+ svclamp (f16, s16, f16); /* { dg-error {passing 'svint16_t' to argument 2 of 'svclamp', but argument 1 had type 'svfloat16_t'} } */
+ svclamp (f16, f32, f32); /* { dg-error {passing 'svfloat32_t' to argument 2 of 'svclamp', but argument 1 had type 'svfloat16_t'} } */
+ svclamp (f16, f16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svclamp', but argument 1 had type 'svfloat16_t'} } */
+ svclamp (f16, f16, 0); /* { dg-error {passing 'int' to argument 3 of 'svclamp', which expects an SVE type rather than a scalar} } */
+ svclamp (f16, f16x2, f16); /* { dg-error {passing 'svfloat16x2_t' to argument 2 of 'svclamp', which expects a single SVE vector rather than a tuple} } */
+ svclamp (f16, f16x4, f16); /* { dg-error {passing 'svfloat16x4_t' to argument 2 of 'svclamp', which expects a single SVE vector rather than a tuple} } */
+ svclamp (f16, f16, f16x2); /* { dg-error {passing 'svfloat16x2_t' to argument 3 of 'svclamp', which expects a single SVE vector rather than a tuple} } */
+ svclamp (f16, f16, f16x3); /* { dg-error {passing 'svfloat16x3_t' to argument 3 of 'svclamp', which expects a single SVE vector rather than a tuple} } */
+
+ svclamp (f16x2, f16x2, f16x2); /* { dg-error {passing 'svfloat16x2_t' to argument 2 of 'svclamp', which expects a single SVE vector rather than a tuple} } */
+ svclamp (f16x2, s16, f16); /* { dg-error {passing 'svint16_t' to argument 2 of 'svclamp', but argument 1 was a tuple of 'svfloat16_t'} } */
+ svclamp (f16x2, f16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svclamp', but argument 1 was a tuple of 'svfloat16_t'} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/clast_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/clast_1.c
index cb9ac94..47ce473 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/clast_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/clast_1.c
@@ -6,10 +6,10 @@ test (svbool_t pg, svint32_t s32, svint64_t s64, int i)
svclasta (pg, 1); /* { dg-error {too few arguments to function 'svclasta'} } */
svclasta (pg, 1, s32, 1); /* { dg-error {too many arguments to function 'svclasta'} } */
svclasta (1, 1, s32); /* { dg-error {passing 'int' to argument 1 of 'svclasta', which expects 'svbool_t'} } */
- svclasta (pg, 1, 1); /* { dg-error {passing 'int' to argument 3 of 'svclasta', which expects an SVE vector type} } */
+ svclasta (pg, 1, 1); /* { dg-error {passing 'int' to argument 3 of 'svclasta', which expects an SVE type rather than a scalar} } */
svclasta (pg, 1, pg); /* { dg-error {'svclasta' has no form that takes 'svbool_t' arguments} } */
svclasta (pg, i, s32);
- svclasta (pg, s32, 1); /* { dg-error {passing 'int' to argument 3 of 'svclasta', which expects an SVE vector type} } */
- svclasta (pg, s32, s64); /* { dg-error {passing 'svint64_t' to argument 3 of 'svclasta', but previous arguments had type 'svint32_t'} } */
+ svclasta (pg, s32, 1); /* { dg-error {passing 'int' to argument 3 of 'svclasta', which expects an SVE type rather than a scalar} } */
+ svclasta (pg, s32, s64); /* { dg-error {passing 'svint64_t' to argument 3 of 'svclasta', but argument 2 had type 'svint32_t'} } */
svclasta (pg, pg, pg); /* { dg-error {'svclasta' has no form that takes 'svbool_t' arguments} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_1.c
index 12511a8..0dd0ad9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_1.c
@@ -12,16 +12,16 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svmatch (pg, u8, u8, u8); /* { dg-error {too many arguments to function 'svmatch'} } */
svmatch (u8, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 1 of 'svmatch', which expects 'svbool_t'} } */
svmatch (pg, pg, pg); /* { dg-error {'svmatch' has no form that takes 'svbool_t' arguments} } */
- svmatch (pg, 1, u8); /* { dg-error {passing 'int' to argument 2 of 'svmatch', which expects an SVE vector type} } */
- svmatch (pg, u8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svmatch', but previous arguments had type 'svuint8_t'} } */
+ svmatch (pg, 1, u8); /* { dg-error {passing 'int' to argument 2 of 'svmatch', which expects an SVE type rather than a scalar} } */
+ svmatch (pg, u8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svmatch', but argument 2 had type 'svuint8_t'} } */
svmatch (pg, u8, u8);
- svmatch (pg, u8, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmatch', but previous arguments had type 'svuint8_t'} } */
- svmatch (pg, u8, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svmatch', but previous arguments had type 'svuint8_t'} } */
- svmatch (pg, u8, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svmatch', but previous arguments had type 'svuint8_t'} } */
- svmatch (pg, u8, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svmatch', but previous arguments had type 'svuint8_t'} } */
- svmatch (pg, u8, 0); /* { dg-error {passing 'int' to argument 3 of 'svmatch', which expects an SVE vector type} } */
+ svmatch (pg, u8, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmatch', but argument 2 had type 'svuint8_t'} } */
+ svmatch (pg, u8, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svmatch', but argument 2 had type 'svuint8_t'} } */
+ svmatch (pg, u8, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svmatch', but argument 2 had type 'svuint8_t'} } */
+ svmatch (pg, u8, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svmatch', but argument 2 had type 'svuint8_t'} } */
+ svmatch (pg, u8, 0); /* { dg-error {passing 'int' to argument 3 of 'svmatch', which expects an SVE type rather than a scalar} } */
- svmatch (pg, f16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmatch', but previous arguments had type 'svfloat16_t'} } */
- svmatch (pg, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svmatch', but previous arguments had type 'svfloat16_t'} } */
+ svmatch (pg, f16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmatch', but argument 2 had type 'svfloat16_t'} } */
+ svmatch (pg, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svmatch', but argument 2 had type 'svfloat16_t'} } */
svmatch (pg, f16, f16); /* { dg-error {'svmatch' has no form that takes 'svfloat16_t' arguments} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_opt_n_1.c
index 71c8e86..cfa50d38 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_opt_n_1.c
@@ -10,17 +10,17 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svcmpeq (pg, u8, u8, u8); /* { dg-error {too many arguments to function 'svcmpeq'} } */
svcmpeq (u8, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 1 of 'svcmpeq', which expects 'svbool_t'} } */
svcmpeq (pg, pg, pg); /* { dg-error {'svcmpeq' has no form that takes 'svbool_t' arguments} } */
- svcmpeq (pg, 1, u8); /* { dg-error {passing 'int' to argument 2 of 'svcmpeq', which expects an SVE vector type} } */
- svcmpeq (pg, u8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svcmpeq', but previous arguments had type 'svuint8_t'} } */
+ svcmpeq (pg, 1, u8); /* { dg-error {passing 'int' to argument 2 of 'svcmpeq', which expects an SVE type rather than a scalar} } */
+ svcmpeq (pg, u8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svcmpeq', but argument 2 had type 'svuint8_t'} } */
svcmpeq (pg, u8, u8);
- svcmpeq (pg, u8, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svcmpeq', but previous arguments had type 'svuint8_t'} } */
- svcmpeq (pg, u8, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svcmpeq', but previous arguments had type 'svuint8_t'} } */
- svcmpeq (pg, u8, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svcmpeq', but previous arguments had type 'svuint8_t'} } */
- svcmpeq (pg, u8, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svcmpeq', but previous arguments had type 'svuint8_t'} } */
+ svcmpeq (pg, u8, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svcmpeq', but argument 2 had type 'svuint8_t'} } */
+ svcmpeq (pg, u8, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svcmpeq', but argument 2 had type 'svuint8_t'} } */
+ svcmpeq (pg, u8, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svcmpeq', but argument 2 had type 'svuint8_t'} } */
+ svcmpeq (pg, u8, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svcmpeq', but argument 2 had type 'svuint8_t'} } */
svcmpeq (pg, u8, 0);
- svcmpeq (pg, f16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svcmpeq', but previous arguments had type 'svfloat16_t'} } */
- svcmpeq (pg, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svcmpeq', but previous arguments had type 'svfloat16_t'} } */
+ svcmpeq (pg, f16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svcmpeq', but argument 2 had type 'svfloat16_t'} } */
+ svcmpeq (pg, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svcmpeq', but argument 2 had type 'svfloat16_t'} } */
svcmpeq (pg, f16, f16);
svcmpeq (pg, f16, 1);
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_scalar_count_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_scalar_count_1.c
new file mode 100644
index 0000000..47077f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_scalar_count_1.c
@@ -0,0 +1,55 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+#include <stdbool.h>
+
+#pragma GCC target "+sme2"
+
+enum signed_enum { SA = -1, SB };
+enum unsigned_enum { UA, UB };
+
+void
+test (int32_t s32, int64_t s64, uint16_t u16, uint32_t u32, uint64_t u64,
+ bool b, int *ptr, float f32, svbool_t pg, svint32_t vec)
+ __arm_streaming
+{
+ svwhilele_c8 (s64, 2); /* { dg-error {too few arguments to function 'svwhilele_c8'} } */
+ svwhilele_c8 (s64, s64, 2, 2); /* { dg-error {too many arguments to function 'svwhilele_c8'} } */
+
+ svwhilele_c8 (b, b, 2); /* { dg-error {passing '_Bool' and '_Bool' to arguments 1 and 2 of 'svwhilele_c8', which expects a pair of 64-bit integers} } */
+ svwhilele_c8 (u16, u16, 2); /* { dg-error {expects a pair of 64-bit integers} } */
+ svwhilele_c8 (ptr, ptr, 2); /* { dg-error {expects a pair of 64-bit integers} } */
+ svwhilele_c8 (f32, f32, 2); /* { dg-error {expects a pair of 64-bit integers} } */
+ svwhilele_c8 (pg, pg, 2); /* { dg-error {expects a pair of 64-bit integers} } */
+ svwhilele_c8 (vec, vec, 2); /* { dg-error {expects a pair of 64-bit integers} } */
+ svwhilele_c8 (0, 0, 2); /* { dg-error {expects a pair of 64-bit integers} } */
+ svwhilele_c8 (s32, s32, 2); /* { dg-error {expects a pair of 64-bit integers} } */
+
+ svwhilele_c8 (0, s64, 2);
+ svwhilele_c8 (0U, s64, 2);
+ svwhilele_c8 (0, u64, 2); /* { dg-error {mismatched integer types} } */
+ svwhilele_c8 (0U, u64, 2);
+
+ svwhilele_c8 (s32, s64, 2);
+ svwhilele_c8 (u32, s64, 2);
+ svwhilele_c8 (s32, u64, 2); /* { dg-error {mismatched integer types} } */
+ svwhilele_c8 (u32, u64, 2);
+
+ svwhilele_c8 (s64, s64, 2);
+ svwhilele_c8 (u64, s64, 2); /* { dg-error {mismatched integer types} } */
+ svwhilele_c8 (s64, u64, 2); /* { dg-error {mismatched integer types} } */
+ svwhilele_c8 (u64, u64, 2);
+
+ svwhilele_c8 (s64, 0, 2);
+ svwhilele_c8 (s64, 0U, 2);
+ svwhilele_c8 (u64, 0, 2); /* { dg-error {mismatched integer types} } */
+ svwhilele_c8 (u64, 0U, 2);
+
+ svwhilele_c8 (s64, s32, 2);
+ svwhilele_c8 (s64, u32, 2);
+ svwhilele_c8 (u64, s32, 2); /* { dg-error {mismatched integer types} } */
+ svwhilele_c8 (u64, u32, 2);
+
+ svwhilele_c8 (u64, u64, u64); /* { dg-error {argument 3 of 'svwhilele_c8' must be an integer constant expression} } */
+ svwhilele_c8 (u64, u64, 1); /* { dg-error {passing 1 to argument 3 of 'svwhilele_c8', which expects either 2 or 4} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_wide_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_wide_opt_n_1.c
index fc5e456..655f033 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_wide_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/compare_wide_opt_n_1.c
@@ -9,7 +9,7 @@ f1 (svbool_t pg, svuint8_t u8, svint8_t s8, svint64_t s64, svuint64_t u64,
svcmpeq_wide (pg, s8); /* { dg-error {too few arguments to function 'svcmpeq_wide'} } */
svcmpeq_wide (pg, s8, s64, s8); /* { dg-error {too many arguments to function 'svcmpeq_wide'} } */
svcmpeq_wide (s8, s8, s64); /* { dg-error {passing 'svint8_t' to argument 1 of 'svcmpeq_wide', which expects 'svbool_t'} } */
- svcmpeq_wide (pg, 0, s64); /* { dg-error {passing 'int' to argument 2 of 'svcmpeq_wide', which expects an SVE vector type} } */
+ svcmpeq_wide (pg, 0, s64); /* { dg-error {passing 'int' to argument 2 of 'svcmpeq_wide', which expects an SVE type rather than a scalar} } */
svcmpeq_wide (pg, s8, 0);
svcmpeq_wide (pg, s8, x);
svcmpeq_wide (pg, s8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svcmpeq_wide', which expects a vector of 64-bit elements} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/count_vector_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/count_vector_1.c
index daf9e0d..b57d9de 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/count_vector_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/count_vector_1.c
@@ -7,7 +7,7 @@ f1 (svbool_t pg, svuint32_t u32, svuint32x2_t u32x2)
{
svlen (); /* { dg-error {too few arguments to function 'svlen'} } */
svlen (u32, u32); /* { dg-error {too many arguments to function 'svlen'} } */
- svlen (0); /* { dg-error {passing 'int' to argument 1 of 'svlen', which expects an SVE vector type} } */
+ svlen (0); /* { dg-error {passing 'int' to argument 1 of 'svlen', which expects an SVE type rather than a scalar} } */
svlen (pg); /* { dg-error {'svlen' has no form that takes 'svbool_t' arguments} } */
svlen (u32x2); /* { dg-error {passing 'svuint32x2_t' to argument 1 of 'svlen', which expects a single SVE vector rather than a tuple} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_1.c
index 31321a0..22b031a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_1.c
@@ -10,12 +10,12 @@ f1 (svuint8x2_t *ptr, svbool_t pg, svuint8_t u8, svfloat64_t f64,
*ptr = svcreate2 (u8); /* { dg-error {too few arguments to function 'svcreate2'} } */
*ptr = svcreate2 (u8, u8, u8); /* { dg-error {too many arguments to function 'svcreate2'} } */
*ptr = svcreate2 (u8x2, u8x2); /* { dg-error {passing 'svuint8x2_t' to argument 1 of 'svcreate2', which expects a single SVE vector rather than a tuple} } */
- *ptr = svcreate2 (u8, f64); /* { dg-error {passing 'svfloat64_t' to argument 2 of 'svcreate2', but previous arguments had type 'svuint8_t'} } */
- *ptr = svcreate2 (u8, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svcreate2', but previous arguments had type 'svuint8_t'} } */
- *ptr = svcreate2 (u8, x); /* { dg-error {passing 'int' to argument 2 of 'svcreate2', which expects an SVE vector type} } */
- *ptr = svcreate2 (x, u8); /* { dg-error {passing 'int' to argument 1 of 'svcreate2', which expects an SVE vector type} } */
- *ptr = svcreate2 (pg, u8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svcreate2', but previous arguments had type 'svbool_t'} } */
- *ptr = svcreate2 (pg, pg); /* { dg-error {'svcreate2' has no form that takes 'svbool_t' arguments} } */
+ *ptr = svcreate2 (u8, f64); /* { dg-error {passing 'svfloat64_t' to argument 2 of 'svcreate2', but argument 1 had type 'svuint8_t'} } */
+ *ptr = svcreate2 (u8, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svcreate2', but argument 1 had type 'svuint8_t'} } */
+ *ptr = svcreate2 (u8, x); /* { dg-error {passing 'int' to argument 2 of 'svcreate2', which expects an SVE type rather than a scalar} } */
+ *ptr = svcreate2 (x, u8); /* { dg-error {passing 'int' to argument 1 of 'svcreate2', which expects an SVE type rather than a scalar} } */
+ *ptr = svcreate2 (pg, u8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svcreate2', but argument 1 had type 'svbool_t'} } */
+ *ptr = svcreate2 (pg, pg); /* { dg-error {incompatible types when assigning to type 'svuint8x2_t' from type 'svboolx2_t'} } */
*ptr = svcreate2 (u8, u8);
*ptr = svcreate2 (f64, f64); /* { dg-error {incompatible types when assigning to type 'svuint8x2_t' from type 'svfloat64x2_t'} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_3.c
index a88e56b..40f3a1f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_3.c
@@ -11,11 +11,11 @@ f1 (svfloat16x3_t *ptr, svbool_t pg, svfloat16_t f16, svfloat64_t f64,
*ptr = svcreate3 (f16, f16); /* { dg-error {too few arguments to function 'svcreate3'} } */
*ptr = svcreate3 (f16, f16, f16, f16); /* { dg-error {too many arguments to function 'svcreate3'} } */
*ptr = svcreate3 (f16x3, f16x3, f16x3); /* { dg-error {passing 'svfloat16x3_t' to argument 1 of 'svcreate3', which expects a single SVE vector rather than a tuple} } */
- *ptr = svcreate3 (f16, f16, f64); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svcreate3', but previous arguments had type 'svfloat16_t'} } */
- *ptr = svcreate3 (f16, pg, f16); /* { dg-error {passing 'svbool_t' to argument 2 of 'svcreate3', but previous arguments had type 'svfloat16_t'} } */
- *ptr = svcreate3 (f16, x, f16); /* { dg-error {passing 'int' to argument 2 of 'svcreate3', which expects an SVE vector type} } */
- *ptr = svcreate3 (x, f16, f16); /* { dg-error {passing 'int' to argument 1 of 'svcreate3', which expects an SVE vector type} } */
- *ptr = svcreate3 (pg, f16, f16); /* { dg-error {passing 'svfloat16_t' to argument 2 of 'svcreate3', but previous arguments had type 'svbool_t'} } */
+ *ptr = svcreate3 (f16, f16, f64); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svcreate3', but argument 1 had type 'svfloat16_t'} } */
+ *ptr = svcreate3 (f16, pg, f16); /* { dg-error {passing 'svbool_t' to argument 2 of 'svcreate3', but argument 1 had type 'svfloat16_t'} } */
+ *ptr = svcreate3 (f16, x, f16); /* { dg-error {passing 'int' to argument 2 of 'svcreate3', which expects an SVE type rather than a scalar} } */
+ *ptr = svcreate3 (x, f16, f16); /* { dg-error {passing 'int' to argument 1 of 'svcreate3', which expects an SVE type rather than a scalar} } */
+ *ptr = svcreate3 (pg, f16, f16); /* { dg-error {passing 'svfloat16_t' to argument 2 of 'svcreate3', but argument 1 had type 'svbool_t'} } */
*ptr = svcreate3 (pg, pg, pg); /* { dg-error {'svcreate3' has no form that takes 'svbool_t' arguments} } */
*ptr = svcreate3 (f16, f16, f16);
*ptr = svcreate3 (f64, f64, f64); /* { dg-error {incompatible types when assigning to type 'svfloat16x3_t' from type 'svfloat64x3_t'} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_5.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_5.c
index fed1245..bf3dd5d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/create_5.c
@@ -12,11 +12,11 @@ f1 (svint32x4_t *ptr, svbool_t pg, svint32_t s32, svfloat64_t f64,
*ptr = svcreate4 (s32, s32, s32); /* { dg-error {too few arguments to function 'svcreate4'} } */
*ptr = svcreate4 (s32, s32, s32, s32, s32); /* { dg-error {too many arguments to function 'svcreate4'} } */
*ptr = svcreate4 (s32x4, s32x4, s32x4, s32x4); /* { dg-error {passing 'svint32x4_t' to argument 1 of 'svcreate4', which expects a single SVE vector rather than a tuple} } */
- *ptr = svcreate4 (s32, s32, s32, f64); /* { dg-error {passing 'svfloat64_t' to argument 4 of 'svcreate4', but previous arguments had type 'svint32_t'} } */
- *ptr = svcreate4 (s32, s32, pg, s32); /* { dg-error {passing 'svbool_t' to argument 3 of 'svcreate4', but previous arguments had type 'svint32_t'} } */
- *ptr = svcreate4 (s32, x, s32, s32); /* { dg-error {passing 'int' to argument 2 of 'svcreate4', which expects an SVE vector type} } */
- *ptr = svcreate4 (x, s32, s32, s32); /* { dg-error {passing 'int' to argument 1 of 'svcreate4', which expects an SVE vector type} } */
- *ptr = svcreate4 (pg, s32, s32, s32); /* { dg-error {passing 'svint32_t' to argument 2 of 'svcreate4', but previous arguments had type 'svbool_t'} } */
+ *ptr = svcreate4 (s32, s32, s32, f64); /* { dg-error {passing 'svfloat64_t' to argument 4 of 'svcreate4', but argument 1 had type 'svint32_t'} } */
+ *ptr = svcreate4 (s32, s32, pg, s32); /* { dg-error {passing 'svbool_t' to argument 3 of 'svcreate4', but argument 1 had type 'svint32_t'} } */
+ *ptr = svcreate4 (s32, x, s32, s32); /* { dg-error {passing 'int' to argument 2 of 'svcreate4', which expects an SVE type rather than a scalar} } */
+ *ptr = svcreate4 (x, s32, s32, s32); /* { dg-error {passing 'int' to argument 1 of 'svcreate4', which expects an SVE type rather than a scalar} } */
+ *ptr = svcreate4 (pg, s32, s32, s32); /* { dg-error {passing 'svint32_t' to argument 2 of 'svcreate4', but argument 1 had type 'svbool_t'} } */
*ptr = svcreate4 (pg, pg, pg, pg); /* { dg-error {'svcreate4' has no form that takes 'svbool_t' arguments} } */
*ptr = svcreate4 (s32, s32, s32, s32);
*ptr = svcreate4 (f64, f64, f64, f64); /* { dg-error {incompatible types when assigning to type 'svint32x4_t' from type 'svfloat64x4_t'} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_int_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_int_lane_1.c
new file mode 100644
index 0000000..ca2a039
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_int_lane_1.c
@@ -0,0 +1,59 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
+ svint32_t s32, svuint32_t u32,
+ svint8x2_t s8x2, svuint8x2_t u8x2,
+ svint8x3_t s8x3, svuint8x3_t u8x3,
+ svint8x4_t s8x4, svuint8x4_t u8x4,
+ svint16x2_t s16x2, svuint16x2_t u16x2,
+ float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8); /* { dg-error {too few arguments to function 'svusdot_lane_za32_vg1x2'} } */
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8, 0, 0); /* { dg-error {too many arguments to function 'svusdot_lane_za32_vg1x2'} } */
+
+ svusdot_lane_za32_vg1x2 (u8x2, u8x2, s8, 0); /* { dg-error {passing 'svuint8x2_t' to argument 1 of 'svusdot_lane_za32_vg1x2', which expects 'uint32_t'} } */
+ svusdot_lane_za32_vg1x2 (f, u8x2, s8, 0);
+ svusdot_lane_za32_vg1x2 (d, u8x2, s8, 0);
+ svusdot_lane_za32_vg1x2 (pg, u8x2, s8, 0); /* { dg-error {passing 'svbool_t' to argument 1 of 'svusdot_lane_za32_vg1x2', which expects 'uint32_t'} } */
+
+ svusdot_lane_za32_vg1x2 (0, 1, s8, 0); /* { dg-error {passing 'int' to argument 2 of 'svusdot_lane_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svusdot_lane_za32_vg1x2 (0, pg, s8, 0); /* { dg-error {passing 'svbool_t' to argument 2 of 'svusdot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svusdot_lane_za32_vg1x2 (0, u8, s8, 0); /* { dg-error {passing single vector 'svuint8_t' to argument 2 of 'svusdot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svusdot_lane_za32_vg1x2 (0, u8x3, s8, 0); /* { dg-error {passing 'svuint8x3_t' to argument 2 of 'svusdot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svusdot_lane_za32_vg1x2 (0, u8x4, s8, 0); /* { dg-error {passing 'svuint8x4_t' to argument 2 of 'svusdot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+
+ svusdot_lane_za32_vg1x2 (0, u8x2, 1, 0); /* { dg-error {passing 'int' to argument 3 of 'svusdot_lane_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svusdot_lane_za32_vg1x2 (0, u8x2, pg, 0); /* { dg-error {passing 'svbool_t' to argument 3 of 'svusdot_lane_za32_vg1x2', which expects a vector of signed integers} } */
+ svusdot_lane_za32_vg1x2 (0, u8x2, u8, 0); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svusdot_lane_za32_vg1x2', which expects a vector of signed integers} } */
+ svusdot_lane_za32_vg1x2 (0, u8x2, s32, 0); /* { dg-error {arguments 2 and 3 of 'svusdot_lane_za32_vg1x2' must have the same element size, but the values passed here have type 'svuint8x2_t' and 'svint32_t' respectively} } */
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8x2, 0); /* { dg-error {passing 'svint8x2_t' to argument 3 of 'svusdot_lane_za32_vg1x2', which expects a single SVE vector rather than a tuple} } */
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8, 0);
+ svusdot_lane_za32_vg1x2 (0, s8x2, s8, 0); /* { dg-error {'svusdot_lane_za32_vg1x2' has no form that takes 'svint8x2_t' arguments} } */
+ svusdot_lane_za32_vg1x2 (0, u16x2, s16, 0); /* { dg-error {'svusdot_lane_za32_vg1x2' has no form that takes 'svuint16x2_t' arguments} } */
+
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8, -1); /* { dg-error {passing -1 to argument 4 of 'svusdot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8, 3);
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8, 4); /* { dg-error {passing 4 to argument 4 of 'svusdot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8, -1); /* { dg-error {passing -1 to argument 4 of 'svusdot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8, 3);
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8, 4); /* { dg-error {passing 4 to argument 4 of 'svusdot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8, f); /* { dg-error {argument 4 of 'svusdot_lane_za32_vg1x2' must be an integer constant expression} } */
+}
+
+void
+f2 (svuint8x2_t u8x2, svint8_t s8) __arm_streaming
+{
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8, 0); /* { dg-error {ACLE function 'svusdot_lane_za32_u8_vg1x2' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svuint8x2_t u8x2, svint8_t s8) __arm_inout("za")
+{
+ svusdot_lane_za32_vg1x2 (0, u8x2, s8, 0); /* { dg-error {ACLE function 'svusdot_lane_za32_u8_vg1x2' can only be called when SME streaming mode is enabled} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_lane_1.c
new file mode 100644
index 0000000..e37d24a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_lane_1.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
+ svint32_t s32, svuint32_t u32,
+ svint8x2_t s8x2, svuint8x2_t u8x2,
+ svint16x2_t s16x2, svuint16x2_t u16x2,
+ svint16x3_t s16x3, svuint16x3_t u16x3,
+ svint16x4_t s16x4, svuint16x4_t u16x4,
+ svint32x2_t s32x2, svuint32x2_t u32x2,
+ float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svdot_lane_za32_vg1x2 (0, s16x2, s16); /* { dg-error {too few arguments to function 'svdot_lane_za32_vg1x2'} } */
+ svdot_lane_za32_vg1x2 (0, s16x2, s16, 0, 0); /* { dg-error {too many arguments to function 'svdot_lane_za32_vg1x2'} } */
+
+ svdot_lane_za32_vg1x2 (s16x2, s16x2, s16, 0); /* { dg-error {passing 'svint16x2_t' to argument 1 of 'svdot_lane_za32_vg1x2', which expects 'uint32_t'} } */
+ svdot_lane_za32_vg1x2 (f, s16x2, s16, 0);
+ svdot_lane_za32_vg1x2 (d, s16x2, s16, 0);
+ svdot_lane_za32_vg1x2 (pg, s16x2, s16, 0); /* { dg-error {passing 'svbool_t' to argument 1 of 'svdot_lane_za32_vg1x2', which expects 'uint32_t'} } */
+
+ svdot_lane_za32_vg1x2 (0, 1, s16, 0); /* { dg-error {passing 'int' to argument 2 of 'svdot_lane_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svdot_lane_za32_vg1x2 (0, pg, s16, 0); /* { dg-error {passing 'svbool_t' to argument 2 of 'svdot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svdot_lane_za32_vg1x2 (0, s16, s16, 0); /* { dg-error {passing single vector 'svint16_t' to argument 2 of 'svdot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svdot_lane_za32_vg1x2 (0, s16x3, s16, 0); /* { dg-error {passing 'svint16x3_t' to argument 2 of 'svdot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svdot_lane_za32_vg1x2 (0, s16x4, s16, 0); /* { dg-error {passing 'svint16x4_t' to argument 2 of 'svdot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+
+ svdot_lane_za32_vg1x2 (0, s16x2, 1, 0); /* { dg-error {passing 'int' to argument 3 of 'svdot_lane_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svdot_lane_za32_vg1x2 (0, s16x2, pg, 0); /* { dg-error {passing 'svbool_t' to argument 3 of 'svdot_lane_za32_vg1x2', but argument 2 was a tuple of 'svint16_t'} } */
+ svdot_lane_za32_vg1x2 (0, s16x2, u16, 0); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svdot_lane_za32_vg1x2', but argument 2 was a tuple of 'svint16_t'} } */
+ svdot_lane_za32_vg1x2 (0, s16x2, s32, 0); /* { dg-error {passing 'svint32_t' to argument 3 of 'svdot_lane_za32_vg1x2', but argument 2 was a tuple of 'svint16_t'} } */
+ svdot_lane_za32_vg1x2 (0, s16x2, s16x2, 0); /* { dg-error {passing 'svint16x2_t' to argument 3 of 'svdot_lane_za32_vg1x2', which expects a single SVE vector rather than a tuple} } */
+ svdot_lane_za32_vg1x2 (0, u16x2, u16, 0);
+ svdot_lane_za32_vg1x2 (0, u16x2, s16, 0); /* { dg-error {passing 'svint16_t' to argument 3 of 'svdot_lane_za32_vg1x2', but argument 2 was a tuple of 'svuint16_t'} } */
+ svdot_lane_za32_vg1x2 (0, s32x2, s32, 0); /* { dg-error {'svdot_lane_za32_vg1x2' has no form that takes 'svint32x2_t' arguments} } */
+ svdot_lane_za32_vg1x2 (0, u32x2, u32, 0); /* { dg-error {'svdot_lane_za32_vg1x2' has no form that takes 'svuint32x2_t' arguments} } */
+
+ svdot_lane_za32_vg1x2 (0, s8x2, s8, -1); /* { dg-error {passing -1 to argument 4 of 'svdot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svdot_lane_za32_vg1x2 (0, s8x2, s8, 3);
+ svdot_lane_za32_vg1x2 (0, s8x2, s8, 4); /* { dg-error {passing 4 to argument 4 of 'svdot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svdot_lane_za32_vg1x2 (0, s16x2, s16, -1); /* { dg-error {passing -1 to argument 4 of 'svdot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svdot_lane_za32_vg1x2 (0, s16x2, s16, 3);
+ svdot_lane_za32_vg1x2 (0, s16x2, s16, 4); /* { dg-error {passing 4 to argument 4 of 'svdot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svdot_lane_za32_vg1x2 (0, s16x2, s16, f); /* { dg-error {argument 4 of 'svdot_lane_za32_vg1x2' must be an integer constant expression} } */
+}
+
+void
+f2 (svint16x2_t s16x2, svint16_t s16) __arm_streaming
+{
+ svdot_lane_za32_vg1x2 (0, s16x2, s16, 0); /* { dg-error {ACLE function 'svdot_lane_za32_s16_vg1x2' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint16x2_t s16x2, svint16_t s16) __arm_inout("za")
+{
+ svdot_lane_za32_vg1x2 (0, s16x2, s16, 0); /* { dg-error {ACLE function 'svdot_lane_za32_s16_vg1x2' can only be called when SME streaming mode is enabled} } */
+}
+
+#pragma GCC target ("+sme-i16i64")
+
+void
+f4 (svint16_t s16, svuint16_t u16,
+ svint16x2_t s16x2, svuint16x2_t u16x2,
+ svint32_t s32, svuint32_t u32,
+ svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint64_t s64, svuint64_t u64,
+ svint64x2_t s64x2, svuint64x2_t u64x2)
+ __arm_streaming __arm_inout("za")
+{
+ svdot_lane_za64_vg1x2 (0, s16x2, s16, 0);
+ svdot_lane_za64_vg1x2 (0, u16x2, u16, 0);
+ svdot_lane_za64_vg1x2 (0, s16x2, s16, -1); /* { dg-error {passing -1 to argument 4 of 'svdot_lane_za64_vg1x2', which expects a value in the range \[0, 1\]} } */
+ svdot_lane_za64_vg1x2 (0, s16x2, s16, 1);
+ svdot_lane_za64_vg1x2 (0, u16x2, u16, 2); /* { dg-error {passing 2 to argument 4 of 'svdot_lane_za64_vg1x2', which expects a value in the range \[0, 1\]} } */
+ svdot_lane_za64_vg1x2 (0, s32x2, s32, 0); /* { dg-error {'svdot_lane_za64_vg1x2' has no form that takes 'svint32x2_t' arguments} } */
+ svdot_lane_za64_vg1x2 (0, u32x2, u32, 0); /* { dg-error {'svdot_lane_za64_vg1x2' has no form that takes 'svuint32x2_t' arguments} } */
+ svdot_lane_za64_vg1x2 (0, s64x2, s64, 0); /* { dg-error {'svdot_lane_za64_vg1x2' has no form that takes 'svint64x2_t' arguments} } */
+ svdot_lane_za64_vg1x2 (0, u64x2, u64, 0); /* { dg-error {'svdot_lane_za64_vg1x2' has no form that takes 'svuint64x2_t' arguments} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_lane_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_lane_2.c
new file mode 100644
index 0000000..7af3c6f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_lane_2.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
+ svint32_t s32, svuint32_t u32,
+ svint8x4_t s8x4, svuint8x4_t u8x4,
+ svint16x2_t s16x2, svuint16x2_t u16x2,
+ svint16x3_t s16x3, svuint16x3_t u16x3,
+ svint16x4_t s16x4, svuint16x4_t u16x4,
+ svint32x4_t s32x4, svuint32x4_t u32x4,
+ float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svdot_lane_za32_vg1x4 (0, s16x4, s16); /* { dg-error {too few arguments to function 'svdot_lane_za32_vg1x4'} } */
+ svdot_lane_za32_vg1x4 (0, s16x4, s16, 0, 0); /* { dg-error {too many arguments to function 'svdot_lane_za32_vg1x4'} } */
+
+ svdot_lane_za32_vg1x4 (s16x4, s16x4, s16, 0); /* { dg-error {passing 'svint16x4_t' to argument 1 of 'svdot_lane_za32_vg1x4', which expects 'uint32_t'} } */
+ svdot_lane_za32_vg1x4 (f, s16x4, s16, 0);
+ svdot_lane_za32_vg1x4 (d, s16x4, s16, 0);
+ svdot_lane_za32_vg1x4 (pg, s16x4, s16, 0); /* { dg-error {passing 'svbool_t' to argument 1 of 'svdot_lane_za32_vg1x4', which expects 'uint32_t'} } */
+
+ svdot_lane_za32_vg1x4 (0, 1, s16, 0); /* { dg-error {passing 'int' to argument 2 of 'svdot_lane_za32_vg1x4', which expects an SVE type rather than a scalar type} } */
+ svdot_lane_za32_vg1x4 (0, pg, s16, 0); /* { dg-error {passing 'svbool_t' to argument 2 of 'svdot_lane_za32_vg1x4', which expects a tuple of 4 vectors} } */
+ svdot_lane_za32_vg1x4 (0, s16, s16, 0); /* { dg-error {passing single vector 'svint16_t' to argument 2 of 'svdot_lane_za32_vg1x4', which expects a tuple of 4 vectors} } */
+ svdot_lane_za32_vg1x4 (0, s16x2, s16, 0); /* { dg-error {passing 'svint16x2_t' to argument 2 of 'svdot_lane_za32_vg1x4', which expects a tuple of 4 vectors} } */
+ svdot_lane_za32_vg1x4 (0, s16x3, s16, 0); /* { dg-error {passing 'svint16x3_t' to argument 2 of 'svdot_lane_za32_vg1x4', which expects a tuple of 4 vectors} } */
+
+ svdot_lane_za32_vg1x4 (0, s16x4, 1, 0); /* { dg-error {passing 'int' to argument 3 of 'svdot_lane_za32_vg1x4', which expects an SVE type rather than a scalar type} } */
+ svdot_lane_za32_vg1x4 (0, s16x4, pg, 0); /* { dg-error {passing 'svbool_t' to argument 3 of 'svdot_lane_za32_vg1x4', but argument 2 was a tuple of 'svint16_t'} } */
+ svdot_lane_za32_vg1x4 (0, s16x4, u16, 0); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svdot_lane_za32_vg1x4', but argument 2 was a tuple of 'svint16_t'} } */
+ svdot_lane_za32_vg1x4 (0, s16x4, s32, 0); /* { dg-error {passing 'svint32_t' to argument 3 of 'svdot_lane_za32_vg1x4', but argument 2 was a tuple of 'svint16_t'} } */
+ svdot_lane_za32_vg1x4 (0, s16x4, s16x4, 0); /* { dg-error {passing 'svint16x4_t' to argument 3 of 'svdot_lane_za32_vg1x4', which expects a single SVE vector rather than a tuple} } */
+ svdot_lane_za32_vg1x4 (0, u16x4, u16, 0);
+ svdot_lane_za32_vg1x4 (0, u16x4, s16, 0); /* { dg-error {passing 'svint16_t' to argument 3 of 'svdot_lane_za32_vg1x4', but argument 2 was a tuple of 'svuint16_t'} } */
+ svdot_lane_za32_vg1x4 (0, s32x4, s32, 0); /* { dg-error {'svdot_lane_za32_vg1x4' has no form that takes 'svint32x4_t' arguments} } */
+ svdot_lane_za32_vg1x4 (0, u32x4, u32, 0); /* { dg-error {'svdot_lane_za32_vg1x4' has no form that takes 'svuint32x4_t' arguments} } */
+
+ svdot_lane_za32_vg1x4 (0, s8x4, s8, -1); /* { dg-error {passing -1 to argument 4 of 'svdot_lane_za32_vg1x4', which expects a value in the range \[0, 3\]} } */
+ svdot_lane_za32_vg1x4 (0, s8x4, s8, 3);
+ svdot_lane_za32_vg1x4 (0, s8x4, s8, 4); /* { dg-error {passing 4 to argument 4 of 'svdot_lane_za32_vg1x4', which expects a value in the range \[0, 3\]} } */
+ svdot_lane_za32_vg1x4 (0, s16x4, s16, -1); /* { dg-error {passing -1 to argument 4 of 'svdot_lane_za32_vg1x4', which expects a value in the range \[0, 3\]} } */
+ svdot_lane_za32_vg1x4 (0, s16x4, s16, 3);
+ svdot_lane_za32_vg1x4 (0, s16x4, s16, 4); /* { dg-error {passing 4 to argument 4 of 'svdot_lane_za32_vg1x4', which expects a value in the range \[0, 3\]} } */
+ svdot_lane_za32_vg1x4 (0, s16x4, s16, f); /* { dg-error {argument 4 of 'svdot_lane_za32_vg1x4' must be an integer constant expression} } */
+}
+
+void
+f2 (svint16x4_t s16x4, svint16_t s16) __arm_streaming
+{
+ svdot_lane_za32_vg1x4 (0, s16x4, s16, 0); /* { dg-error {ACLE function 'svdot_lane_za32_s16_vg1x4' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint16x4_t s16x4, svint16_t s16) __arm_inout("za")
+{
+ svdot_lane_za32_vg1x4 (0, s16x4, s16, 0); /* { dg-error {ACLE function 'svdot_lane_za32_s16_vg1x4' can only be called when SME streaming mode is enabled} } */
+}
+
+#pragma GCC target ("+sme-i16i64")
+
+void
+f4 (svint16_t s16, svuint16_t u16,
+ svint16x4_t s16x4, svuint16x4_t u16x4,
+ svint32_t s32, svuint32_t u32,
+ svint32x4_t s32x4, svuint32x4_t u32x4,
+ svint64_t s64, svuint64_t u64,
+ svint64x4_t s64x4, svuint64x4_t u64x4)
+ __arm_streaming __arm_inout("za")
+{
+ svdot_lane_za64_vg1x4 (0, s16x4, s16, 0);
+ svdot_lane_za64_vg1x4 (0, u16x4, u16, 0);
+ svdot_lane_za64_vg1x4 (0, s16x4, s16, -1); /* { dg-error {passing -1 to argument 4 of 'svdot_lane_za64_vg1x4', which expects a value in the range \[0, 1\]} } */
+ svdot_lane_za64_vg1x4 (0, s16x4, s16, 1);
+ svdot_lane_za64_vg1x4 (0, u16x4, u16, 2); /* { dg-error {passing 2 to argument 4 of 'svdot_lane_za64_vg1x4', which expects a value in the range \[0, 1\]} } */
+ svdot_lane_za64_vg1x4 (0, s32x4, s32, 0); /* { dg-error {'svdot_lane_za64_vg1x4' has no form that takes 'svint32x4_t' arguments} } */
+ svdot_lane_za64_vg1x4 (0, u32x4, u32, 0); /* { dg-error {'svdot_lane_za64_vg1x4' has no form that takes 'svuint32x4_t' arguments} } */
+ svdot_lane_za64_vg1x4 (0, s64x4, s64, 0); /* { dg-error {'svdot_lane_za64_vg1x4' has no form that takes 'svint64x4_t' arguments} } */
+ svdot_lane_za64_vg1x4 (0, u64x4, u64, 0); /* { dg-error {'svdot_lane_za64_vg1x4' has no form that takes 'svuint64x4_t' arguments} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_uint_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_uint_lane_1.c
new file mode 100644
index 0000000..2efa2eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/dot_za_slice_uint_lane_1.c
@@ -0,0 +1,59 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
+ svint32_t s32, svuint32_t u32,
+ svint8x2_t s8x2, svuint8x2_t u8x2,
+ svint8x3_t s8x3, svuint8x3_t u8x3,
+ svint8x4_t s8x4, svuint8x4_t u8x4,
+ svint16x2_t s16x2, svuint16x2_t u16x2,
+ float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8); /* { dg-error {too few arguments to function 'svsudot_lane_za32_vg1x2'} } */
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8, 0, 0); /* { dg-error {too many arguments to function 'svsudot_lane_za32_vg1x2'} } */
+
+ svsudot_lane_za32_vg1x2 (u8x2, s8x2, u8, 0); /* { dg-error {passing 'svuint8x2_t' to argument 1 of 'svsudot_lane_za32_vg1x2', which expects 'uint32_t'} } */
+ svsudot_lane_za32_vg1x2 (f, s8x2, u8, 0);
+ svsudot_lane_za32_vg1x2 (d, s8x2, u8, 0);
+ svsudot_lane_za32_vg1x2 (pg, s8x2, u8, 0); /* { dg-error {passing 'svbool_t' to argument 1 of 'svsudot_lane_za32_vg1x2', which expects 'uint32_t'} } */
+
+ svsudot_lane_za32_vg1x2 (0, 1, u8, 0); /* { dg-error {passing 'int' to argument 2 of 'svsudot_lane_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svsudot_lane_za32_vg1x2 (0, pg, u8, 0); /* { dg-error {passing 'svbool_t' to argument 2 of 'svsudot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svsudot_lane_za32_vg1x2 (0, s8, u8, 0); /* { dg-error {passing single vector 'svint8_t' to argument 2 of 'svsudot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svsudot_lane_za32_vg1x2 (0, s8x3, u8, 0); /* { dg-error {passing 'svint8x3_t' to argument 2 of 'svsudot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svsudot_lane_za32_vg1x2 (0, s8x4, u8, 0); /* { dg-error {passing 'svint8x4_t' to argument 2 of 'svsudot_lane_za32_vg1x2', which expects a tuple of 2 vectors} } */
+
+ svsudot_lane_za32_vg1x2 (0, s8x2, 1, 0); /* { dg-error {passing 'int' to argument 3 of 'svsudot_lane_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svsudot_lane_za32_vg1x2 (0, s8x2, pg, 0); /* { dg-error {passing 'svbool_t' to argument 3 of 'svsudot_lane_za32_vg1x2', which expects a vector of unsigned integers} } */
+ svsudot_lane_za32_vg1x2 (0, s8x2, s8, 0); /* { dg-error {passing 'svint8_t' to argument 3 of 'svsudot_lane_za32_vg1x2', which expects a vector of unsigned integers} } */
+ svsudot_lane_za32_vg1x2 (0, s8x2, u32, 0); /* { dg-error {arguments 2 and 3 of 'svsudot_lane_za32_vg1x2' must have the same element size, but the values passed here have type 'svint8x2_t' and 'svuint32_t' respectively} } */
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8x2, 0); /* { dg-error {passing 'svuint8x2_t' to argument 3 of 'svsudot_lane_za32_vg1x2', which expects a single SVE vector rather than a tuple} } */
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8, 0);
+ svsudot_lane_za32_vg1x2 (0, u8x2, u8, 0); /* { dg-error {'svsudot_lane_za32_vg1x2' has no form that takes 'svuint8x2_t' arguments} } */
+ svsudot_lane_za32_vg1x2 (0, s16x2, u16, 0); /* { dg-error {'svsudot_lane_za32_vg1x2' has no form that takes 'svint16x2_t' arguments} } */
+
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8, -1); /* { dg-error {passing -1 to argument 4 of 'svsudot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8, 3);
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8, 4); /* { dg-error {passing 4 to argument 4 of 'svsudot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8, -1); /* { dg-error {passing -1 to argument 4 of 'svsudot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8, 3);
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8, 4); /* { dg-error {passing 4 to argument 4 of 'svsudot_lane_za32_vg1x2', which expects a value in the range \[0, 3\]} } */
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8, f); /* { dg-error {argument 4 of 'svsudot_lane_za32_vg1x2' must be an integer constant expression} } */
+}
+
+void
+f2 (svint8x2_t s8x2, svuint8_t u8) __arm_streaming
+{
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8, 0); /* { dg-error {ACLE function 'svsudot_lane_za32_s8_vg1x2' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint8x2_t s8x2, svuint8_t u8) __arm_inout("za")
+{
+ svsudot_lane_za32_vg1x2 (0, s8x2, u8, 0); /* { dg-error {ACLE function 'svsudot_lane_za32_s8_vg1x2' can only be called when SME streaming mode is enabled} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/fold_left_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/fold_left_1.c
index 1d29278..181d1b01 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/fold_left_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/fold_left_1.c
@@ -15,7 +15,7 @@ f1 (svbool_t pg, int i, float f, double d, void *ptr, svfloat32_t f32,
svadda (pg, ptr, f32); /* { dg-error {incompatible type for argument 2 of 'svadda_f32'} } */
svadda (pg, pg, f32); /* { dg-error {passing 'svbool_t' to argument 2 of 'svadda', which expects a scalar element} } */
svadda (pg, f32, f32); /* { dg-error {passing 'svfloat32_t' to argument 2 of 'svadda', which expects a scalar element} } */
- svadda (pg, f, f); /* { dg-error {passing 'float' to argument 3 of 'svadda', which expects an SVE vector type} } */
+ svadda (pg, f, f); /* { dg-error {passing 'float' to argument 3 of 'svadda', which expects an SVE type rather than a scalar} } */
svadda (pg, i, i32); /* { dg-error {'svadda' has no form that takes 'svint32_t' arguments} } */
- svadda (pg, i, i); /* { dg-error {passing 'int' to argument 3 of 'svadda', which expects an SVE vector type} } */
+ svadda (pg, i, i); /* { dg-error {passing 'int' to argument 3 of 'svadda', which expects an SVE type rather than a scalar} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/func_redef_4.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/func_redef_4.c
index 9591e3d..5aa0ea6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/func_redef_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/func_redef_4.c
@@ -4,6 +4,7 @@
to be diagnosed. Any attempt to call the function before including
arm_sve.h will lead to a link failure. (Same for taking its address,
etc.) */
-extern __SVUint8_t svadd_u8_x (__SVBool_t, __SVUint8_t, __SVUint8_t);
+extern __SVUint8_t svadd_u8_x (__SVBool_t, __SVUint8_t, __SVUint8_t)
+ __arm_streaming_compatible;
#pragma GCC aarch64 "arm_sve.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/func_redef_5.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/func_redef_5.c
index 8592361..ede9a80 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/func_redef_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/func_redef_5.c
@@ -8,6 +8,7 @@
explicit definition "wins". This isn't supported behavior though. */
__SVUint8_t
svadd_u8_x (__SVBool_t pg, __SVUint8_t x, __SVUint8_t y)
+ __arm_streaming_compatible
{
return x;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/inc_dec_pred_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/inc_dec_pred_1.c
index a61afcd..4de082d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/inc_dec_pred_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/inc_dec_pred_1.c
@@ -7,7 +7,7 @@ test (svbool_t pg, svint8_t s8, svuint8_t u8,
{
svqincp (s32); /* { dg-error {too few arguments to function 'svqincp'} } */
svqincp (s32, pg, pg); /* { dg-error {too many arguments to function 'svqincp'} } */
- svqincp (i, pg); /* { dg-error {passing 'int' to argument 1 of 'svqincp', which expects an SVE vector type} } */
+ svqincp (i, pg); /* { dg-error {passing 'int' to argument 1 of 'svqincp', which expects an SVE type rather than a scalar} } */
svqincp (pg, pg); /* { dg-error {'svqincp' has no form that takes 'svbool_t' arguments} } */
svqincp (s8, pg); /* { dg-error {'svqincp' has no form that takes 'svint8_t' arguments} } */
svqincp (u8, pg); /* { dg-error {'svqincp' has no form that takes 'svuint8_t' arguments} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ld1sh_gather_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ld1sh_gather_1.c
index 91f37f6..c9f49b6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ld1sh_gather_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ld1sh_gather_1.c
@@ -11,7 +11,7 @@ f1 (svbool_t pg, short *s16_ptr, unsigned short *u16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1sh_gather_index (pg, s16_ptr, s32); /* { dg-warning {implicit declaration of function 'svld1sh_gather_index'; did you mean 'svld1_gather_index'} } */
+ svld1sh_gather_index (pg, s16_ptr, s32); /* { dg-error {implicit declaration of function 'svld1sh_gather_index'; did you mean 'svld1_gather_index'} } */
svld1sh_gather_index_u32 (pg, s16_ptr); /* { dg-error {too few arguments to function 'svld1sh_gather_index_u32'} } */
svld1sh_gather_index_u32 (pg, s16_ptr, s32, 0); /* { dg-error {too many arguments to function 'svld1sh_gather_index_u32'} } */
svld1sh_gather_index_u32 (pg, u16_ptr, s32); /* { dg-warning {pointer targets in passing argument 2 of 'svld1sh_gather_s32index_u32' differ in signedness} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_1.c
index 784fdc3..564295a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_1.c
@@ -6,12 +6,14 @@
struct s { signed char x; };
svuint8_t
-f1 (svbool_t pg, signed char *s8_ptr, void *void_ptr, struct s *s_ptr,
+f1 (svbool_t pg, svcount_t pn,
+ signed char *s8_ptr, void *void_ptr, struct s *s_ptr,
float *f32_ptr, _Complex float *cf32_ptr, int **ptr_ptr)
{
svld1 (pg); /* { dg-error {too few arguments to function 'svld1'} } */
svld1 (pg, s8_ptr, 0); /* { dg-error {too many arguments to function 'svld1'} } */
svld1 (0, s8_ptr); /* { dg-error {passing 'int' to argument 1 of 'svld1', which expects 'svbool_t'} } */
+ svld1 (pn, s8_ptr); /* { dg-error {passing 'svcount_t' to argument 1 of 'svld1', which expects 'svbool_t'} } */
svld1 (pg, 0); /* { dg-error {passing 'int' to argument 2 of 'svld1', which expects a pointer type} } */
svld1 (pg, (int32_t *) 0);
svld1 (pg, void_ptr); /* { dg-error {passing 'void \*' to argument 2 of 'svld1', but 'void' is not a valid SVE element type} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_2.c
index a828876..5f4cbea 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_2.c
@@ -15,8 +15,8 @@ f1 (svbool_t pg, signed char *s8_ptr, void *void_ptr, struct s *s_ptr,
svld1_s8 (pg, 0);
svld1_s32 (pg, (int32_t *) 0);
svld1_s8 (pg, void_ptr);
- svld1_s8 (pg, s_ptr); /* { dg-warning {passing argument 2 of 'svld1_s8' from incompatible pointer type} } */
+ svld1_s8 (pg, s_ptr); /* { dg-error {passing argument 2 of 'svld1_s8' from incompatible pointer type} } */
svld1_f32 (pg, f32_ptr);
- svld1_f32 (pg, cf32_ptr); /* { dg-warning {passing argument 2 of 'svld1_f32' from incompatible pointer type} } */
+ svld1_f32 (pg, cf32_ptr); /* { dg-error {passing argument 2 of 'svld1_f32' from incompatible pointer type} } */
return svld1_s8 (pg, s8_ptr); /* { dg-error {incompatible types when returning type 'svint8_t' but 'svuint8_t' was expected} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_3.c
index 770203f..3416639 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_3.c
@@ -13,6 +13,6 @@ f1 (svbool_t pg, signed char *s8_ptr, svint8_t s8)
svld1_vnum (pg, s8_ptr, 0, 0); /* { dg-error {too many arguments to function 'svld1_vnum'} } */
svld1_vnum (0, s8_ptr, 0); /* { dg-error {passing 'int' to argument 1 of 'svld1_vnum', which expects 'svbool_t'} } */
svld1_vnum (pg, 0, 0); /* { dg-error {passing 'int' to argument 2 of 'svld1_vnum', which expects a pointer type} } */
- svld1_vnum (pg, s8_ptr, s8_ptr); /* { dg-warning "passing argument 3 of 'svld1_vnum_s8' makes integer from pointer without a cast" } */
+ svld1_vnum (pg, s8_ptr, s8_ptr); /* { dg-error "passing argument 3 of 'svld1_vnum_s8' makes integer from pointer without a cast" } */
svld1_vnum (pg, s8_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svld1_vnum', which expects 'int64_t'} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c
index 91f37f6..c9f49b6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c
@@ -11,7 +11,7 @@ f1 (svbool_t pg, short *s16_ptr, unsigned short *u16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1sh_gather_index (pg, s16_ptr, s32); /* { dg-warning {implicit declaration of function 'svld1sh_gather_index'; did you mean 'svld1_gather_index'} } */
+ svld1sh_gather_index (pg, s16_ptr, s32); /* { dg-error {implicit declaration of function 'svld1sh_gather_index'; did you mean 'svld1_gather_index'} } */
svld1sh_gather_index_u32 (pg, s16_ptr); /* { dg-error {too few arguments to function 'svld1sh_gather_index_u32'} } */
svld1sh_gather_index_u32 (pg, s16_ptr, s32, 0); /* { dg-error {too many arguments to function 'svld1sh_gather_index_u32'} } */
svld1sh_gather_index_u32 (pg, u16_ptr, s32); /* { dg-warning {pointer targets in passing argument 2 of 'svld1sh_gather_s32index_u32' differ in signedness} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_restricted_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_restricted_1.c
index c47e541..a2dcf9a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_restricted_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_restricted_1.c
@@ -13,7 +13,7 @@ f1 (svbool_t pg, short *s16_ptr, unsigned short *u16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svldnt1sh_gather_index (pg, s16_ptr, s64); /* { dg-warning {implicit declaration of function 'svldnt1sh_gather_index'; did you mean 'svldnt1_gather_index'} } */
+ svldnt1sh_gather_index (pg, s16_ptr, s64); /* { dg-error {implicit declaration of function 'svldnt1sh_gather_index'; did you mean 'svldnt1_gather_index'} } */
svldnt1sh_gather_index_u64 (pg, s16_ptr); /* { dg-error {too few arguments to function 'svldnt1sh_gather_index_u64'} } */
svldnt1sh_gather_index_u64 (pg, s16_ptr, s64, 0); /* { dg-error {too many arguments to function 'svldnt1sh_gather_index_u64'} } */
svldnt1sh_gather_index_u64 (pg, u16_ptr, s64); /* { dg-warning {pointer targets in passing argument 2 of 'svldnt1sh_gather_s64index_u64' differ in signedness} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c
index dae4d0c..41bf2da 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c
@@ -11,10 +11,10 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1sb_gather_offset (pg, s8_ptr, s32); /* { dg-warning {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
+ svld1sb_gather_offset (pg, s8_ptr, s32); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
svld1sb_gather_offset_s32 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svld1sb_gather_offset_s32'} } */
svld1sb_gather_offset_s32 (pg, s8_ptr, s32, 0); /* { dg-error {too many arguments to function 'svld1sb_gather_offset_s32'} } */
- svld1sb_gather_offset_s32 (pg, s16_ptr, s32); /* { dg-warning {passing argument 2 of 'svld1sb_gather_s32offset_s32' from incompatible pointer type} } */
+ svld1sb_gather_offset_s32 (pg, s16_ptr, s32); /* { dg-error {passing argument 2 of 'svld1sb_gather_s32offset_s32' from incompatible pointer type} } */
svld1sb_gather_offset_s32 (pg, s8_ptr, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svld1sb_gather_offset_s32', which expects a vector of 32-bit integers} } */
svld1sb_gather_offset_s32 (pg, s8_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svld1sb_gather_offset_s32', which expects a vector of 32-bit integers} } */
svld1sb_gather_offset_s32 (pg, s8_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svld1sb_gather_offset_s32', which expects a vector of 32-bit integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c
index 1bc6697..1261b49 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c
@@ -11,10 +11,10 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1sb_gather_offset (pg, s8_ptr, s32); /* { dg-warning {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
+ svld1sb_gather_offset (pg, s8_ptr, s32); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
svld1sb_gather_offset_u32 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svld1sb_gather_offset_u32'} } */
svld1sb_gather_offset_u32 (pg, s8_ptr, s32, 0); /* { dg-error {too many arguments to function 'svld1sb_gather_offset_u32'} } */
- svld1sb_gather_offset_u32 (pg, s16_ptr, s32); /* { dg-warning {passing argument 2 of 'svld1sb_gather_s32offset_u32' from incompatible pointer type} } */
+ svld1sb_gather_offset_u32 (pg, s16_ptr, s32); /* { dg-error {passing argument 2 of 'svld1sb_gather_s32offset_u32' from incompatible pointer type} } */
svld1sb_gather_offset_u32 (pg, s8_ptr, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svld1sb_gather_offset_u32', which expects a vector of 32-bit integers} } */
svld1sb_gather_offset_u32 (pg, s8_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svld1sb_gather_offset_u32', which expects a vector of 32-bit integers} } */
svld1sb_gather_offset_u32 (pg, s8_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svld1sb_gather_offset_u32', which expects a vector of 32-bit integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c
index 6522889..518348d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c
@@ -11,10 +11,10 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1sb_gather_offset (pg, s8_ptr, s64); /* { dg-warning {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
+ svld1sb_gather_offset (pg, s8_ptr, s64); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
svld1sb_gather_offset_s64 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svld1sb_gather_offset_s64'} } */
svld1sb_gather_offset_s64 (pg, s8_ptr, s64, 0); /* { dg-error {too many arguments to function 'svld1sb_gather_offset_s64'} } */
- svld1sb_gather_offset_s64 (pg, s16_ptr, s64); /* { dg-warning {passing argument 2 of 'svld1sb_gather_s64offset_s64' from incompatible pointer type} } */
+ svld1sb_gather_offset_s64 (pg, s16_ptr, s64); /* { dg-error {passing argument 2 of 'svld1sb_gather_s64offset_s64' from incompatible pointer type} } */
svld1sb_gather_offset_s64 (pg, s8_ptr, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svld1sb_gather_offset_s64', which expects a vector of 64-bit integers} } */
svld1sb_gather_offset_s64 (pg, s8_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svld1sb_gather_offset_s64', which expects a vector of 64-bit integers} } */
svld1sb_gather_offset_s64 (pg, s8_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svld1sb_gather_offset_s64', which expects a vector of 64-bit integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c
index 0256219..6086911 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c
@@ -11,10 +11,10 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1sb_gather_offset (pg, s8_ptr, s64); /* { dg-warning {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
+ svld1sb_gather_offset (pg, s8_ptr, s64); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
svld1sb_gather_offset_u64 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svld1sb_gather_offset_u64'} } */
svld1sb_gather_offset_u64 (pg, s8_ptr, s64, 0); /* { dg-error {too many arguments to function 'svld1sb_gather_offset_u64'} } */
- svld1sb_gather_offset_u64 (pg, s16_ptr, s64); /* { dg-warning {passing argument 2 of 'svld1sb_gather_s64offset_u64' from incompatible pointer type} } */
+ svld1sb_gather_offset_u64 (pg, s16_ptr, s64); /* { dg-error {passing argument 2 of 'svld1sb_gather_s64offset_u64' from incompatible pointer type} } */
svld1sb_gather_offset_u64 (pg, s8_ptr, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svld1sb_gather_offset_u64', which expects a vector of 64-bit integers} } */
svld1sb_gather_offset_u64 (pg, s8_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svld1sb_gather_offset_u64', which expects a vector of 64-bit integers} } */
svld1sb_gather_offset_u64 (pg, s8_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svld1sb_gather_offset_u64', which expects a vector of 64-bit integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c
index 8d57aa0..9e2ccee 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c
@@ -11,10 +11,10 @@ f1 (svbool_t pg, unsigned char *s8_ptr, unsigned short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1ub_gather_offset (pg, s8_ptr, s32); /* { dg-warning {implicit declaration of function 'svld1ub_gather_offset'; did you mean 'svld1_gather_offset'} } */
+ svld1ub_gather_offset (pg, s8_ptr, s32); /* { dg-error {implicit declaration of function 'svld1ub_gather_offset'; did you mean 'svld1_gather_offset'} } */
svld1ub_gather_offset_s32 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svld1ub_gather_offset_s32'} } */
svld1ub_gather_offset_s32 (pg, s8_ptr, s32, 0); /* { dg-error {too many arguments to function 'svld1ub_gather_offset_s32'} } */
- svld1ub_gather_offset_s32 (pg, s16_ptr, s32); /* { dg-warning {passing argument 2 of 'svld1ub_gather_s32offset_s32' from incompatible pointer type} } */
+ svld1ub_gather_offset_s32 (pg, s16_ptr, s32); /* { dg-error {passing argument 2 of 'svld1ub_gather_s32offset_s32' from incompatible pointer type} } */
svld1ub_gather_offset_s32 (pg, s8_ptr, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svld1ub_gather_offset_s32', which expects a vector of 32-bit integers} } */
svld1ub_gather_offset_s32 (pg, s8_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svld1ub_gather_offset_s32', which expects a vector of 32-bit integers} } */
svld1ub_gather_offset_s32 (pg, s8_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svld1ub_gather_offset_s32', which expects a vector of 32-bit integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_1.c
index 353fec2..18e1663 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_1.c
@@ -13,10 +13,10 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svldnt1sb_gather_offset (pg, s8_ptr, s32); /* { dg-warning {implicit declaration of function 'svldnt1sb_gather_offset'; did you mean 'svldnt1_gather_offset'} } */
+ svldnt1sb_gather_offset (pg, s8_ptr, s32); /* { dg-error {implicit declaration of function 'svldnt1sb_gather_offset'; did you mean 'svldnt1_gather_offset'} } */
svldnt1sb_gather_offset_s32 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svldnt1sb_gather_offset_s32'} } */
svldnt1sb_gather_offset_s32 (pg, s8_ptr, u32, 0); /* { dg-error {too many arguments to function 'svldnt1sb_gather_offset_s32'} } */
- svldnt1sb_gather_offset_s32 (pg, s16_ptr, u32); /* { dg-warning {passing argument 2 of 'svldnt1sb_gather_u32offset_s32' from incompatible pointer type} } */
+ svldnt1sb_gather_offset_s32 (pg, s16_ptr, u32); /* { dg-error {passing argument 2 of 'svldnt1sb_gather_u32offset_s32' from incompatible pointer type} } */
svldnt1sb_gather_offset_s32 (pg, s8_ptr, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svldnt1sb_gather_offset_s32', which expects a vector of 32-bit integers} } */
svldnt1sb_gather_offset_s32 (pg, s8_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svldnt1sb_gather_offset_s32', which expects a vector of 32-bit integers} } */
svldnt1sb_gather_offset_s32 (pg, s8_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svldnt1sb_gather_offset_s32', which expects a vector of 32-bit integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_2.c
index e22b3dd..6bb9998 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_2.c
@@ -13,10 +13,10 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svldnt1sb_gather_offset (pg, s8_ptr, s32); /* { dg-warning {implicit declaration of function 'svldnt1sb_gather_offset'; did you mean 'svldnt1_gather_offset'} } */
+ svldnt1sb_gather_offset (pg, s8_ptr, s32); /* { dg-error {implicit declaration of function 'svldnt1sb_gather_offset'; did you mean 'svldnt1_gather_offset'} } */
svldnt1sb_gather_offset_u32 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svldnt1sb_gather_offset_u32'} } */
svldnt1sb_gather_offset_u32 (pg, s8_ptr, u32, 0); /* { dg-error {too many arguments to function 'svldnt1sb_gather_offset_u32'} } */
- svldnt1sb_gather_offset_u32 (pg, s16_ptr, u32); /* { dg-warning {passing argument 2 of 'svldnt1sb_gather_u32offset_u32' from incompatible pointer type} } */
+ svldnt1sb_gather_offset_u32 (pg, s16_ptr, u32); /* { dg-error {passing argument 2 of 'svldnt1sb_gather_u32offset_u32' from incompatible pointer type} } */
svldnt1sb_gather_offset_u32 (pg, s8_ptr, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svldnt1sb_gather_offset_u32', which expects a vector of 32-bit integers} } */
svldnt1sb_gather_offset_u32 (pg, s8_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svldnt1sb_gather_offset_u32', which expects a vector of 32-bit integers} } */
svldnt1sb_gather_offset_u32 (pg, s8_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svldnt1sb_gather_offset_u32', which expects a vector of 32-bit integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_3.c
index 73b5715..8dc160c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_3.c
@@ -13,10 +13,10 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svldnt1sb_gather_offset (pg, s8_ptr, s64); /* { dg-warning {implicit declaration of function 'svldnt1sb_gather_offset'; did you mean 'svldnt1_gather_offset'} } */
+ svldnt1sb_gather_offset (pg, s8_ptr, s64); /* { dg-error {implicit declaration of function 'svldnt1sb_gather_offset'; did you mean 'svldnt1_gather_offset'} } */
svldnt1sb_gather_offset_s64 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svldnt1sb_gather_offset_s64'} } */
svldnt1sb_gather_offset_s64 (pg, s8_ptr, s64, 0); /* { dg-error {too many arguments to function 'svldnt1sb_gather_offset_s64'} } */
- svldnt1sb_gather_offset_s64 (pg, s16_ptr, s64); /* { dg-warning {passing argument 2 of 'svldnt1sb_gather_s64offset_s64' from incompatible pointer type} } */
+ svldnt1sb_gather_offset_s64 (pg, s16_ptr, s64); /* { dg-error {passing argument 2 of 'svldnt1sb_gather_s64offset_s64' from incompatible pointer type} } */
svldnt1sb_gather_offset_s64 (pg, s8_ptr, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svldnt1sb_gather_offset_s64', which expects a vector of 64-bit integers} } */
svldnt1sb_gather_offset_s64 (pg, s8_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svldnt1sb_gather_offset_s64', which expects a vector of 64-bit integers} } */
svldnt1sb_gather_offset_s64 (pg, s8_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svldnt1sb_gather_offset_s64', which expects a vector of 64-bit integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_4.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_4.c
index e2ceb18..9a418f2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_restricted_4.c
@@ -13,10 +13,10 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svldnt1sb_gather_offset (pg, s8_ptr, s64); /* { dg-warning {implicit declaration of function 'svldnt1sb_gather_offset'; did you mean 'svldnt1_gather_offset'} } */
+ svldnt1sb_gather_offset (pg, s8_ptr, s64); /* { dg-error {implicit declaration of function 'svldnt1sb_gather_offset'; did you mean 'svldnt1_gather_offset'} } */
svldnt1sb_gather_offset_u64 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svldnt1sb_gather_offset_u64'} } */
svldnt1sb_gather_offset_u64 (pg, s8_ptr, s64, 0); /* { dg-error {too many arguments to function 'svldnt1sb_gather_offset_u64'} } */
- svldnt1sb_gather_offset_u64 (pg, s16_ptr, s64); /* { dg-warning {passing argument 2 of 'svldnt1sb_gather_s64offset_u64' from incompatible pointer type} } */
+ svldnt1sb_gather_offset_u64 (pg, s16_ptr, s64); /* { dg-error {passing argument 2 of 'svldnt1sb_gather_s64offset_u64' from incompatible pointer type} } */
svldnt1sb_gather_offset_u64 (pg, s8_ptr, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svldnt1sb_gather_offset_u64', which expects a vector of 64-bit integers} } */
svldnt1sb_gather_offset_u64 (pg, s8_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svldnt1sb_gather_offset_u64', which expects a vector of 64-bit integers} } */
svldnt1sb_gather_offset_u64 (pg, s8_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svldnt1sb_gather_offset_u64', which expects a vector of 64-bit integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/mmla_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/mmla_1.c
index 5b0b00e..ca2ab8a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/mmla_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/mmla_1.c
@@ -23,34 +23,34 @@ f2 (svbool_t pg, svint8_t s8, svuint8_t u8, svuint32_t u32, svint32_t s32,
{
svmmla (s32, s8); /* { dg-error {too few arguments to function 'svmmla'} } */
svmmla (s32, s8, s8, s8); /* { dg-error {too many arguments to function 'svmmla'} } */
- svmmla (0, s8, s8); /* { dg-error {passing 'int' to argument 1 of 'svmmla', which expects an SVE vector type} } */
+ svmmla (0, s8, s8); /* { dg-error {passing 'int' to argument 1 of 'svmmla', which expects an SVE type rather than a scalar} } */
svmmla (pg, s8, s8); /* { dg-error {'svmmla' has no form that takes 'svbool_t' arguments} } */
svmmla (u8, s8, s8); /* { dg-error {'svmmla' has no form that takes 'svuint8_t' arguments} } */
- svmmla (s32, 0, s8); /* { dg-error {passing 'int' to argument 2 of 'svmmla', which expects an SVE vector type} } */
+ svmmla (s32, 0, s8); /* { dg-error {passing 'int' to argument 2 of 'svmmla', which expects an SVE type rather than a scalar} } */
svmmla (s32, u8, s8); /* { dg-error {arguments 1 and 2 of 'svmmla' must have the same signedness, but the values passed here have type 'svint32_t' and 'svuint8_t' respectively} } */
svmmla (s32, s8, u8); /* { dg-error {arguments 1 and 3 of 'svmmla' must have the same signedness, but the values passed here have type 'svint32_t' and 'svuint8_t' respectively} } */
- svmmla (s32, s8, 0); /* { dg-error {passing 'int' to argument 3 of 'svmmla', which expects an SVE vector type} } */
+ svmmla (s32, s8, 0); /* { dg-error {passing 'int' to argument 3 of 'svmmla', which expects an SVE type rather than a scalar} } */
svmmla (s32, s8, s8);
svmmla (s32, s32, s32); /* { dg-error {passing 'svint32_t' instead of the expected 'svint8_t' to argument 2 of 'svmmla', after passing 'svint32_t' to argument 1} } */
svmmla (s32, u32, u32); /* { dg-error {passing 'svuint32_t' instead of the expected 'svint8_t' to argument 2 of 'svmmla', after passing 'svint32_t' to argument 1} } */
- svmmla (u32, 0, u8); /* { dg-error {passing 'int' to argument 2 of 'svmmla', which expects an SVE vector type} } */
+ svmmla (u32, 0, u8); /* { dg-error {passing 'int' to argument 2 of 'svmmla', which expects an SVE type rather than a scalar} } */
svmmla (u32, s8, u8); /* { dg-error {arguments 1 and 2 of 'svmmla' must have the same signedness, but the values passed here have type 'svuint32_t' and 'svint8_t' respectively} } */
svmmla (u32, u8, s8); /* { dg-error {arguments 1 and 3 of 'svmmla' must have the same signedness, but the values passed here have type 'svuint32_t' and 'svint8_t' respectively} } */
- svmmla (u32, u8, 0); /* { dg-error {passing 'int' to argument 3 of 'svmmla', which expects an SVE vector type} } */
+ svmmla (u32, u8, 0); /* { dg-error {passing 'int' to argument 3 of 'svmmla', which expects an SVE type rather than a scalar} } */
svmmla (u32, u8, u8);
svmmla (u32, s32, s32); /* { dg-error {passing 'svint32_t' instead of the expected 'svuint8_t' to argument 2 of 'svmmla', after passing 'svuint32_t' to argument 1} } */
svmmla (u32, u32, u32); /* { dg-error {passing 'svuint32_t' instead of the expected 'svuint8_t' to argument 2 of 'svmmla', after passing 'svuint32_t' to argument 1} } */
svmmla (f16, s8, s8); /* { dg-error {'svmmla' has no form that takes 'svfloat16_t' arguments} } */
- svmmla (f32, s8, s8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svmmla', but previous arguments had type 'svfloat32_t'} } */
- svmmla (f32, s32, s32); /* { dg-error {passing 'svint32_t' to argument 2 of 'svmmla', but previous arguments had type 'svfloat32_t'} } */
- svmmla (f32, f16, f16); /* { dg-error {passing 'svfloat16_t' to argument 2 of 'svmmla', but previous arguments had type 'svfloat32_t'} } */
- svmmla (f64, f16, f16); /* { dg-error {passing 'svfloat16_t' to argument 2 of 'svmmla', but previous arguments had type 'svfloat64_t'} } */
- svmmla (f32, f32, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svmmla', but previous arguments had type 'svfloat32_t'} } */
- svmmla (f64, f32, f16); /* { dg-error {passing 'svfloat32_t' to argument 2 of 'svmmla', but previous arguments had type 'svfloat64_t'} } */
- svmmla (f64, f64, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svmmla', but previous arguments had type 'svfloat64_t'} } */
+ svmmla (f32, s8, s8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svmmla', but argument 1 had type 'svfloat32_t'} } */
+ svmmla (f32, s32, s32); /* { dg-error {passing 'svint32_t' to argument 2 of 'svmmla', but argument 1 had type 'svfloat32_t'} } */
+ svmmla (f32, f16, f16); /* { dg-error {passing 'svfloat16_t' to argument 2 of 'svmmla', but argument 1 had type 'svfloat32_t'} } */
+ svmmla (f64, f16, f16); /* { dg-error {passing 'svfloat16_t' to argument 2 of 'svmmla', but argument 1 had type 'svfloat64_t'} } */
+ svmmla (f32, f32, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svmmla', but argument 1 had type 'svfloat32_t'} } */
+ svmmla (f64, f32, f16); /* { dg-error {passing 'svfloat32_t' to argument 2 of 'svmmla', but argument 1 had type 'svfloat64_t'} } */
+ svmmla (f64, f64, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svmmla', but argument 1 had type 'svfloat64_t'} } */
svmmla (f16, f16, f16); /* { dg-error {'svmmla' has no form that takes 'svfloat16_t' arguments} } */
svmmla (f32, f32, f32);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_2.c
index b74721f..88e0c35 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_2.c
@@ -12,7 +12,7 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svprfb_gather (pg, u32); /* { dg-error {too few arguments to function 'svprfb_gather'} } */
svprfb_gather (pg, u32, SV_PLDL1KEEP, 0); /* { dg-error {too many arguments to function 'svprfb_gather'} } */
svprfb_gather (0, u32, SV_PLDL1KEEP); /* { dg-error {passing 'int' to argument 1 of 'svprfb_gather', which expects 'svbool_t'} } */
- svprfb_gather (pg, 0, SV_PLDL1KEEP); /* { dg-error {passing 'int' to argument 2 of 'svprfb_gather', which expects an SVE vector type} } */
+ svprfb_gather (pg, 0, SV_PLDL1KEEP); /* { dg-error {passing 'int' to argument 2 of 'svprfb_gather', which expects an SVE type rather than a scalar} } */
svprfb_gather (pg, s8, SV_PLDL1KEEP); /* { dg-error {passing 'svint8_t' to argument 2 of 'svprfb_gather', which expects 'svuint32_t' or 'svuint64_t'} } */
svprfb_gather (pg, u8, SV_PLDL1KEEP); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svprfb_gather', which expects 'svuint32_t' or 'svuint64_t'} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/read_za_m_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/read_za_m_1.c
new file mode 100644
index 0000000..421979e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/read_za_m_1.c
@@ -0,0 +1,48 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("arch=armv9-a+sme")
+
+void
+f1 (svbool_t pg, svint8_t s8, svint64_t s64, svuint8_t u8, svuint16_t u16,
+ svfloat32_t f32, uint32_t tile)
+ __arm_streaming __arm_inout("za")
+{
+ svread_hor_za8_m (s8, pg, 0); /* { dg-error {too few arguments to function 'svread_hor_za8_m'} } */
+ svread_hor_za8_m (s8, pg, 0, 0, 0); /* { dg-error {too many arguments to function 'svread_hor_za8_m'} } */
+ svread_hor_za8_m (tile, pg, 0, 0); /* { dg-error {passing 'uint32_t'.* to argument 1 of 'svread_hor_za8_m', which expects an SVE type} } */
+ svread_hor_za8_m (pg, pg, 0, 0); /* { dg-error {'svread_hor_za8_m' has no form that takes 'svbool_t' arguments} } */
+ svread_hor_za8_m (u16, pg, 0, 0); /* { dg-error {'svread_hor_za8_m' has no form that takes 'svuint16_t' arguments} } */
+ svread_hor_za8_m (s8, s8, 0, 0); /* { dg-error {passing 'svint8_t' to argument 2 of 'svread_hor_za8_m', which expects 'svbool_t'} } */
+ svread_hor_za8_m (s8, pg, tile, 0); /* { dg-error {argument 3 of 'svread_hor_za8_m' must be an integer constant expression} } */
+ svread_hor_za8_m (s8, pg, -1, 0); /* { dg-error {passing -1 to argument 3 of 'svread_hor_za8_m', which expects the value 0} } */
+ svread_hor_za8_m (s8, pg, 1, 0); /* { dg-error {passing 1 to argument 3 of 'svread_hor_za8_m', which expects the value 0} } */
+ svread_hor_za8_m (s8, pg, 0, u8); /* { dg-error {passing 'svuint8_t' to argument 4 of 'svread_hor_za8_m', which expects 'uint32_t'} } */
+
+ svread_hor_za16_m (u16, pg, -1, 0); /* { dg-error {passing -1 to argument 3 of 'svread_hor_za16_m', which expects a value in the range \[0, 1\]} } */
+ svread_hor_za16_m (u16, pg, 2, 0); /* { dg-error {passing 2 to argument 3 of 'svread_hor_za16_m', which expects a value in the range \[0, 1\]} } */
+
+ svread_hor_za32_m (f32, pg, -1, 0); /* { dg-error {passing -1 to argument 3 of 'svread_hor_za32_m', which expects a value in the range \[0, 3\]} } */
+ svread_hor_za32_m (f32, pg, 4, 0); /* { dg-error {passing 4 to argument 3 of 'svread_hor_za32_m', which expects a value in the range \[0, 3\]} } */
+
+ svread_hor_za64_m (s64, pg, -1, 0); /* { dg-error {passing -1 to argument 3 of 'svread_hor_za64_m', which expects a value in the range \[0, 7\]} } */
+ svread_hor_za64_m (s64, pg, 8, 0); /* { dg-error {passing 8 to argument 3 of 'svread_hor_za64_m', which expects a value in the range \[0, 7\]} } */
+
+ svread_hor_za128_m (s8, pg, -1, 0); /* { dg-error {passing -1 to argument 3 of 'svread_hor_za128_m', which expects a value in the range \[0, 15\]} } */
+ svread_hor_za128_m (s8, pg, 16, 0); /* { dg-error {passing 16 to argument 3 of 'svread_hor_za128_m', which expects a value in the range \[0, 15\]} } */
+ svread_hor_za128_m (f32, pg, -1, 0); /* { dg-error {passing -1 to argument 3 of 'svread_hor_za128_m', which expects a value in the range \[0, 15\]} } */
+ svread_hor_za128_m (f32, pg, 16, 0); /* { dg-error {passing 16 to argument 3 of 'svread_hor_za128_m', which expects a value in the range \[0, 15\]} } */
+}
+
+void
+f2 (svbool_t pg, svint8_t s8) __arm_streaming
+{
+ svread_hor_za8_m (s8, pg, 0, 0); /* { dg-error {ACLE function 'svread_hor_za8_s8_m' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svbool_t pg, svint8_t s8) __arm_inout("za")
+{
+ svread_hor_za8_m (s8, pg, 0, 0); /* { dg-error {ACLE function 'svread_hor_za8_s8_m' can only be called when SME streaming mode is enabled} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/reduction_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/reduction_1.c
index ab0ef30..025795e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/reduction_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/reduction_1.c
@@ -10,7 +10,7 @@ f1 (svbool_t pg, svint32_t s32, svuint32_t u32, svfloat32_t f32,
svorv (pg, u32, u32); /* { dg-error {too many arguments to function 'svorv'} } */
svorv (0, u32); /* { dg-error {passing 'int' to argument 1 of 'svorv', which expects 'svbool_t'} } */
svorv (u32, u32); /* { dg-error {passing 'svuint32_t' to argument 1 of 'svorv', which expects 'svbool_t'} } */
- svorv (pg, 0); /* { dg-error {passing 'int' to argument 2 of 'svorv', which expects an SVE vector type} } */
+ svorv (pg, 0); /* { dg-error {passing 'int' to argument 2 of 'svorv', which expects an SVE type rather than a scalar} } */
svorv (pg, pg); /* { dg-error {'svorv' has no form that takes 'svbool_t' arguments} } */
svorv (pg, s32);
svorv (pg, u32);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/reduction_wide_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/reduction_wide_1.c
index f99a288..68bacd0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/reduction_wide_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/reduction_wide_1.c
@@ -10,7 +10,7 @@ f1 (svbool_t pg, svint32_t s32, svuint32_t u32, svfloat32_t f32,
svaddv (pg, u32, u32); /* { dg-error {too many arguments to function 'svaddv'} } */
svaddv (0, u32); /* { dg-error {passing 'int' to argument 1 of 'svaddv', which expects 'svbool_t'} } */
svaddv (u32, u32); /* { dg-error {passing 'svuint32_t' to argument 1 of 'svaddv', which expects 'svbool_t'} } */
- svaddv (pg, 0); /* { dg-error {passing 'int' to argument 2 of 'svaddv', which expects an SVE vector type} } */
+ svaddv (pg, 0); /* { dg-error {passing 'int' to argument 2 of 'svaddv', which expects an SVE type rather than a scalar} } */
svaddv (pg, pg); /* { dg-error {'svaddv' has no form that takes 'svbool_t' arguments} } */
svaddv (pg, s32);
svaddv (pg, u32);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_1.c
index f07c761..f2a6da5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_1.c
@@ -16,8 +16,8 @@ f1 (svbool_t pg, svuint8_t u8, svuint8x2_t u8x2, svuint8x3_t u8x3, int x)
u8x2 = svset2 (u8x3, 0, u8); /* { dg-error {passing 'svuint8x3_t' to argument 1 of 'svset2', which expects a tuple of 2 vectors} } */
u8x2 = svset2 (pg, 0, u8); /* { dg-error {passing 'svbool_t' to argument 1 of 'svset2', which expects a tuple of 2 vectors} } */
u8x2 = svset2 (u8x2, 0, u8x2); /* { dg-error {passing 'svuint8x2_t' to argument 3 of 'svset2', which expects a single SVE vector rather than a tuple} } */
- u8x2 = svset2 (u8x2, 0, f64); /* { dg-error {passing 'svfloat64_t' instead of the expected 'svuint8_t' to argument 3 of 'svset2', after passing 'svuint8x2_t' to argument 1} } */
- u8x2 = svset2 (u8x2, 0, pg); /* { dg-error {passing 'svbool_t' instead of the expected 'svuint8_t' to argument 3 of 'svset2', after passing 'svuint8x2_t' to argument 1} } */
+ u8x2 = svset2 (u8x2, 0, f64); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svset2', but argument 1 was a tuple of 'svuint8_t'} } */
+ u8x2 = svset2 (u8x2, 0, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svset2', but argument 1 was a tuple of 'svuint8_t'} } */
u8x2 = svset2 (u8x2, x, u8); /* { dg-error {argument 2 of 'svset2' must be an integer constant expression} } */
u8x2 = svset2 (u8x2, 0, u8);
f64 = svset2 (u8x2, 0, u8); /* { dg-error {incompatible types when assigning to type 'svfloat64_t' from type 'svuint8x2_t'} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_3.c
index 543a1be..92b955f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_3.c
@@ -17,8 +17,8 @@ f1 (svbool_t pg, svfloat16_t f16, svfloat16x3_t f16x3, svfloat16x4_t f16x4,
f16x3 = svset3 (f16x4, 0, f16); /* { dg-error {passing 'svfloat16x4_t' to argument 1 of 'svset3', which expects a tuple of 3 vectors} } */
f16x3 = svset3 (pg, 0, f16); /* { dg-error {passing 'svbool_t' to argument 1 of 'svset3', which expects a tuple of 3 vectors} } */
f16x3 = svset3 (f16x3, 0, f16x3); /* { dg-error {passing 'svfloat16x3_t' to argument 3 of 'svset3', which expects a single SVE vector rather than a tuple} } */
- f16x3 = svset3 (f16x3, 0, f64); /* { dg-error {passing 'svfloat64_t' instead of the expected 'svfloat16_t' to argument 3 of 'svset3', after passing 'svfloat16x3_t' to argument 1} } */
- f16x3 = svset3 (f16x3, 0, pg); /* { dg-error {passing 'svbool_t' instead of the expected 'svfloat16_t' to argument 3 of 'svset3', after passing 'svfloat16x3_t' to argument 1} } */
+ f16x3 = svset3 (f16x3, 0, f64); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svset3', but argument 1 was a tuple of 'svfloat16_t'} } */
+ f16x3 = svset3 (f16x3, 0, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svset3', but argument 1 was a tuple of 'svfloat16_t'} } */
f16x3 = svset3 (f16x3, x, f16); /* { dg-error {argument 2 of 'svset3' must be an integer constant expression} } */
f16x3 = svset3 (f16x3, 0, f16);
f64 = svset3 (f16x3, 0, f16); /* { dg-error {incompatible types when assigning to type 'svfloat64_t' from type 'svfloat16x3_t'} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_5.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_5.c
index be911a7..f0696fb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/set_5.c
@@ -16,8 +16,8 @@ f1 (svbool_t pg, svint32_t s32, svint32x4_t s32x4, svint32x2_t s32x2, int x)
s32x4 = svset4 (s32x2, 0, s32); /* { dg-error {passing 'svint32x2_t' to argument 1 of 'svset4', which expects a tuple of 4 vectors} } */
s32x4 = svset4 (pg, 0, s32); /* { dg-error {passing 'svbool_t' to argument 1 of 'svset4', which expects a tuple of 4 vectors} } */
s32x4 = svset4 (s32x4, 0, s32x4); /* { dg-error {passing 'svint32x4_t' to argument 3 of 'svset4', which expects a single SVE vector rather than a tuple} } */
- s32x4 = svset4 (s32x4, 0, f64); /* { dg-error {passing 'svfloat64_t' instead of the expected 'svint32_t' to argument 3 of 'svset4', after passing 'svint32x4_t' to argument 1} } */
- s32x4 = svset4 (s32x4, 0, pg); /* { dg-error {passing 'svbool_t' instead of the expected 'svint32_t' to argument 3 of 'svset4', after passing 'svint32x4_t' to argument 1} } */
+ s32x4 = svset4 (s32x4, 0, f64); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svset4', but argument 1 was a tuple of 'svint32_t'} } */
+ s32x4 = svset4 (s32x4, 0, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svset4', but argument 1 was a tuple of 'svint32_t'} } */
s32x4 = svset4 (s32x4, x, s32); /* { dg-error {argument 2 of 'svset4' must be an integer constant expression} } */
s32x4 = svset4 (s32x4, 0, s32);
f64 = svset4 (s32x4, 0, s32); /* { dg-error {incompatible types when assigning to type 'svfloat64_t' from type 'svint32x4_t'} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowb_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowb_1.c
index 6536679..c5942c7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowb_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowb_1.c
@@ -66,5 +66,5 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svshrnb (f32, 1); /* { dg-error {'svshrnb' has no form that takes 'svfloat32_t' arguments} } */
- svshrnb (1, 1); /* { dg-error {passing 'int' to argument 1 of 'svshrnb', which expects an SVE vector type} } */
+ svshrnb (1, 1); /* { dg-error {passing 'int' to argument 1 of 'svshrnb', which expects an SVE type rather than a scalar} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowb_to_uint_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowb_to_uint_1.c
index 51f9388..3ecd20a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowb_to_uint_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowb_to_uint_1.c
@@ -54,5 +54,5 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svqshrunb (f32, 1); /* { dg-error {'svqshrunb' has no form that takes 'svfloat32_t' arguments} } */
- svqshrunb (1, 1); /* { dg-error {passing 'int' to argument 1 of 'svqshrunb', which expects an SVE vector type} } */
+ svqshrunb (1, 1); /* { dg-error {passing 'int' to argument 1 of 'svqshrunb', which expects an SVE type rather than a scalar} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowt_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowt_1.c
index 6c31cf8..e9d1d13 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowt_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowt_1.c
@@ -76,6 +76,6 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svshrnt (f32, f32, 1); /* { dg-error {'svshrnt' has no form that takes 'svfloat32_t' arguments} } */
- svshrnt (1, s32, 1); /* { dg-error {passing 'int' to argument 1 of 'svshrnt', which expects an SVE vector type} } */
- svshrnt (s32, 1, 1); /* { dg-error {passing 'int' to argument 2 of 'svshrnt', which expects an SVE vector type} } */
+ svshrnt (1, s32, 1); /* { dg-error {passing 'int' to argument 1 of 'svshrnt', which expects an SVE type rather than a scalar} } */
+ svshrnt (s32, 1, 1); /* { dg-error {passing 'int' to argument 2 of 'svshrnt', which expects an SVE type rather than a scalar} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowt_to_uint_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowt_to_uint_1.c
index 2e35ad3..7414956 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowt_to_uint_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowt_to_uint_1.c
@@ -59,6 +59,6 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svqshrunt (u16, f32, 1); /* { dg-error {'svqshrunt' has no form that takes 'svfloat32_t' arguments} } */
- svqshrunt (1, u32, 1); /* { dg-error {passing 'int' to argument 1 of 'svqshrunt', which expects an SVE vector type} } */
- svqshrunt (u32, 1, 1); /* { dg-error {passing 'int' to argument 2 of 'svqshrunt', which expects an SVE vector type} } */
+ svqshrunt (1, u32, 1); /* { dg-error {passing 'int' to argument 1 of 'svqshrunt', which expects an SVE type rather than a scalar} } */
+ svqshrunt (u32, 1, 1); /* { dg-error {passing 'int' to argument 2 of 'svqshrunt', which expects an SVE type rather than a scalar} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowxn_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowxn_1.c
new file mode 100644
index 0000000..ab5602f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/shift_right_imm_narrowxn_1.c
@@ -0,0 +1,89 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svboolx2_t pgx2,
+ svint8x2_t s8x2, svuint8x2_t u8x2,
+ svint8x4_t s8x4, svuint8x4_t u8x4,
+ svint16x2_t s16x2, svuint16x2_t u16x2,
+ svint16x4_t s16x4, svuint16x4_t u16x4,
+ svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint32x4_t s32x4, svuint32x4_t u32x4,
+ svint64x2_t s64x2, svuint64x2_t u64x2,
+ svint64x4_t s64x4, svuint64x4_t u64x4,
+ svfloat32x2_t f32x2, int x) __arm_streaming
+{
+ const int one = 1;
+ svqrshr_u8 (u32x4); /* { dg-error {too few arguments to function 'svqrshr_u8'} } */
+ svqrshr_u8 (u32x4, 1, 1); /* { dg-error {too many arguments to function 'svqrshr_u8'} } */
+
+ svqrshr_u8 (u32x4, x); /* { dg-error {argument 2 of 'svqrshr_u8' must be an integer constant expression} } */
+ svqrshr_u8 (u32x4, one); /* { dg-error {argument 2 of 'svqrshr_u8' must be an integer constant expression} } */
+ svqrshr_u8 (u32x4, 0.4); /* { dg-error {passing 0 to argument 2 of 'svqrshr_u8', which expects a value in the range \[1, 32\]} } */
+ svqrshr_u8 (u32x4, 1.0);
+
+ svqrshr_u8 (pgx2, 1); /* { dg-error {'svqrshr_u8' has no form that takes 'svboolx2_t' arguments} } */
+ svqrshr_u8 (u8x2, 1); /* { dg-error {'svqrshr_u8' has no form that takes 'svuint8x2_t' arguments} } */
+ svqrshr_u8 (u8x4, 1); /* { dg-error {'svqrshr_u8' has no form that takes 'svuint8x4_t' arguments} } */
+ svqrshr_u8 (u16x2, 1); /* { dg-error {'svqrshr_u8' has no form that takes 'svuint16x2_t' arguments} } */
+ svqrshr_u8 (u16x4, 1); /* { dg-error {'svqrshr_u8' has no form that takes 'svuint16x4_t' arguments} } */
+ svqrshr_u8 (u32x2, 1); /* { dg-error {'svqrshr_u8' has no form that takes 'svuint32x2_t' arguments} } */
+ svqrshr_u8 (u32x4, 1);
+ svqrshr_u8 (u64x2, 1); /* { dg-error {'svqrshr_u8' has no form that takes 'svuint64x2_t' arguments} } */
+ svqrshr_u8 (u64x4, 1); /* { dg-error {'svqrshr_u8' has no form that takes 'svuint64x4_t' arguments} } */
+ svqrshr_u8 (s32x4, 1); /* { dg-error {'svqrshr_u8' has no form that takes 'svint32x4_t' arguments} } */
+
+ svqrshr_s8 (s8x2, 1); /* { dg-error {'svqrshr_s8' has no form that takes 'svint8x2_t' arguments} } */
+ svqrshr_s8 (s8x4, 1); /* { dg-error {'svqrshr_s8' has no form that takes 'svint8x4_t' arguments} } */
+ svqrshr_s8 (s16x2, 1); /* { dg-error {'svqrshr_s8' has no form that takes 'svint16x2_t' arguments} } */
+ svqrshr_s8 (s16x4, 1); /* { dg-error {'svqrshr_s8' has no form that takes 'svint16x4_t' arguments} } */
+ svqrshr_s8 (s32x2, 1); /* { dg-error {'svqrshr_s8' has no form that takes 'svint32x2_t' arguments} } */
+ svqrshr_s8 (s32x4, 1);
+ svqrshr_s8 (s64x2, 1); /* { dg-error {'svqrshr_s8' has no form that takes 'svint64x2_t' arguments} } */
+ svqrshr_s8 (s64x4, 1); /* { dg-error {'svqrshr_s8' has no form that takes 'svint64x4_t' arguments} } */
+ svqrshr_s8 (u32x4, 1); /* { dg-error {'svqrshr_s8' has no form that takes 'svuint32x4_t' arguments} } */
+
+ svqrshr_u16 (pgx2, 1); /* { dg-error {'svqrshr_u16' has no form that takes 'svboolx2_t' arguments} } */
+ svqrshr_u16 (u8x2, 1); /* { dg-error {'svqrshr_u16' has no form that takes 'svuint8x2_t' arguments} } */
+ svqrshr_u16 (u8x4, 1); /* { dg-error {'svqrshr_u16' has no form that takes 'svuint8x4_t' arguments} } */
+ svqrshr_u16 (u16x2, 1); /* { dg-error {'svqrshr_u16' has no form that takes 'svuint16x2_t' arguments} } */
+ svqrshr_u16 (u16x4, 1); /* { dg-error {'svqrshr_u16' has no form that takes 'svuint16x4_t' arguments} } */
+ svqrshr_u16 (u32x2, 1);
+ svqrshr_u16 (u32x4, 1); /* { dg-error {'svqrshr_u16' has no form that takes 'svuint32x4_t' arguments} } */
+ svqrshr_u16 (u64x2, 1); /* { dg-error {'svqrshr_u16' has no form that takes 'svuint64x2_t' arguments} } */
+ svqrshr_u16 (u64x4, 1);
+ svqrshr_u16 (s32x2, 1); /* { dg-error {'svqrshr_u16' has no form that takes 'svint32x2_t' arguments} } */
+
+ svqrshr_s16 (s8x2, 1); /* { dg-error {'svqrshr_s16' has no form that takes 'svint8x2_t' arguments} } */
+ svqrshr_s16 (s8x4, 1); /* { dg-error {'svqrshr_s16' has no form that takes 'svint8x4_t' arguments} } */
+ svqrshr_s16 (s16x2, 1); /* { dg-error {'svqrshr_s16' has no form that takes 'svint16x2_t' arguments} } */
+ svqrshr_s16 (s16x4, 1); /* { dg-error {'svqrshr_s16' has no form that takes 'svint16x4_t' arguments} } */
+ svqrshr_s16 (s32x2, 1);
+ svqrshr_s16 (s32x4, 1); /* { dg-error {'svqrshr_s16' has no form that takes 'svint32x4_t' arguments} } */
+ svqrshr_s16 (s64x2, 1); /* { dg-error {'svqrshr_s16' has no form that takes 'svint64x2_t' arguments} } */
+ svqrshr_s16 (s64x4, 1);
+ svqrshr_s16 (u32x2, 1); /* { dg-error {'svqrshr_s16' has no form that takes 'svuint32x2_t' arguments} } */
+
+ svqrshr_u8 (u32x4, -1); /* { dg-error {passing -1 to argument 2 of 'svqrshr_u8', which expects a value in the range \[1, 32\]} } */
+ svqrshr_u8 (u32x4, 0); /* { dg-error {passing 0 to argument 2 of 'svqrshr_u8', which expects a value in the range \[1, 32\]} } */
+ svqrshr_u8 (u32x4, 1);
+ svqrshr_u8 (u32x4, 32);
+ svqrshr_u8 (u32x4, 33); /* { dg-error {passing 33 to argument 2 of 'svqrshr_u8', which expects a value in the range \[1, 32\]} } */
+
+ svqrshr_u16 (u32x2, -1); /* { dg-error {passing -1 to argument 2 of 'svqrshr_u16', which expects a value in the range \[1, 16\]} } */
+ svqrshr_u16 (u32x2, 0); /* { dg-error {passing 0 to argument 2 of 'svqrshr_u16', which expects a value in the range \[1, 16\]} } */
+ svqrshr_u16 (u32x2, 1);
+ svqrshr_u16 (u32x2, 16);
+ svqrshr_u16 (u32x2, 17); /* { dg-error {passing 17 to argument 2 of 'svqrshr_u16', which expects a value in the range \[1, 16\]} } */
+
+ svqrshr_u16 (u64x4, -1); /* { dg-error {passing -1 to argument 2 of 'svqrshr_u16', which expects a value in the range \[1, 64\]} } */
+ svqrshr_u16 (u64x4, 0); /* { dg-error {passing 0 to argument 2 of 'svqrshr_u16', which expects a value in the range \[1, 64\]} } */
+ svqrshr_u16 (u64x4, 1);
+ svqrshr_u16 (u64x4, 64);
+ svqrshr_u16 (u64x4, 65); /* { dg-error {passing 65 to argument 2 of 'svqrshr_u16', which expects a value in the range \[1, 64\]} } */
+
+ svqrshr_u8 (1, 1); /* { dg-error {passing 'int' to argument 1 of 'svqrshr_u8', which expects an SVE type rather than a scalar} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/sizeless-1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/sizeless-1.c
index 01cfd14..b0389fa 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/sizeless-1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/sizeless-1.c
@@ -105,8 +105,8 @@ statements (int n)
/* Pointer assignment. */
- gnu_sc_ptr = sve_sc_ptr; /* { dg-warning {assignment to [^\n]* from incompatible pointer type} } */
- sve_sc_ptr = gnu_sc_ptr; /* { dg-warning {assignment to [^\n]* from incompatible pointer type} } */
+ gnu_sc_ptr = sve_sc_ptr; /* { dg-error {assignment to [^\n]* from incompatible pointer type} } */
+ sve_sc_ptr = gnu_sc_ptr; /* { dg-error {assignment to [^\n]* from incompatible pointer type} } */
/* Pointer arithmetic. */
@@ -153,8 +153,8 @@ statements (int n)
0 ? 0 : sve_sc1; /* { dg-error {type mismatch in conditional expression} } */
0 ?: sve_sc1; /* { dg-error {type mismatch in conditional expression} } */
0 ? sve_sc_ptr : sve_sc_ptr;
- 0 ? sve_sc_ptr : gnu_sc_ptr; /* { dg-warning {pointer type mismatch in conditional expression} } */
- 0 ? gnu_sc_ptr : sve_sc_ptr; /* { dg-warning {pointer type mismatch in conditional expression} } */
+ 0 ? sve_sc_ptr : gnu_sc_ptr; /* { dg-error {pointer type mismatch in conditional expression} } */
+ 0 ? gnu_sc_ptr : sve_sc_ptr; /* { dg-error {pointer type mismatch in conditional expression} } */
/* Generic associations. */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/sizeless-2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/sizeless-2.c
index 613b9c4..d16f40b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/sizeless-2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/sizeless-2.c
@@ -105,8 +105,8 @@ statements (int n)
/* Pointer assignment. */
- gnu_sc_ptr = sve_sc_ptr; /* { dg-warning {incompatible pointer type} } */
- sve_sc_ptr = gnu_sc_ptr; /* { dg-warning {incompatible pointer type} } */
+ gnu_sc_ptr = sve_sc_ptr; /* { dg-error {incompatible pointer type} } */
+ sve_sc_ptr = gnu_sc_ptr; /* { dg-error {incompatible pointer type} } */
/* Pointer arithmetic. */
@@ -153,8 +153,8 @@ statements (int n)
0 ? 0 : sve_sc1; /* { dg-error {type mismatch in conditional expression} } */
0 ?: sve_sc1; /* { dg-error {type mismatch in conditional expression} } */
0 ? sve_sc_ptr : sve_sc_ptr;
- 0 ? sve_sc_ptr : gnu_sc_ptr; /* { dg-warning {pointer type mismatch} } */
- 0 ? gnu_sc_ptr : sve_sc_ptr; /* { dg-warning {pointer type mismatch} } */
+ 0 ? sve_sc_ptr : gnu_sc_ptr; /* { dg-error {pointer type mismatch} } */
+ 0 ? gnu_sc_ptr : sve_sc_ptr; /* { dg-error {pointer type mismatch} } */
/* Generic associations. */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_1.c
index 625f059..cfe6869 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_1.c
@@ -12,15 +12,15 @@ f1 (svbool_t pg, signed char *s8_ptr, void *void_ptr, struct s *s_ptr,
{
svst1 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svst1'} } */
svst1 (pg, s8_ptr, s8, 0); /* { dg-error {too many arguments to function 'svst1'} } */
- svst1 (0, s8_ptr, s8); /* { dg-error {passing 'int' to argument 1 of 'svst1', which expects 'svbool_t'} } */
- svst1 (pg, void_ptr, 0); /* { dg-error {passing 'int' to argument 3 of 'svst1', which expects an SVE vector type} } */
+ svst1 (0, s8_ptr, s8); /* { dg-error {passing 'int' to argument 1 of 'svst1', which expects an 'svbool_t' or 'svcount_t'} } */
+ svst1 (pg, void_ptr, 0); /* { dg-error {passing 'int' to argument 3 of 'svst1', which expects an SVE type rather than a scalar} } */
svst1 (pg, void_ptr, pg); /* { dg-error {'svst1' has no form that takes 'svbool_t' arguments} } */
svst1 (pg, 0, s8);
- svst1 (pg, (int32_t *) 0, s8); /* { dg-warning "passing argument 2 of 'svst1_s8' from incompatible pointer type" } */
+ svst1 (pg, (int32_t *) 0, s8); /* { dg-error "passing argument 2 of 'svst1_s8' from incompatible pointer type" } */
svst1 (pg, void_ptr, s8);
- svst1 (pg, s_ptr, s8); /* { dg-warning "passing argument 2 of 'svst1_s8' from incompatible pointer type" } */
- svst1 (pg, f32_ptr, s8); /* { dg-warning "passing argument 2 of 'svst1_s8' from incompatible pointer type" } */
+ svst1 (pg, s_ptr, s8); /* { dg-error "passing argument 2 of 'svst1_s8' from incompatible pointer type" } */
+ svst1 (pg, f32_ptr, s8); /* { dg-error "passing argument 2 of 'svst1_s8' from incompatible pointer type" } */
svst1 (pg, f32_ptr, f32);
- svst1 (pg, cf32_ptr, f32); /* { dg-warning "passing argument 2 of 'svst1_f32' from incompatible pointer type" } */
+ svst1 (pg, cf32_ptr, f32); /* { dg-error "passing argument 2 of 'svst1_f32' from incompatible pointer type" } */
svst1 (pg, s, s8); /* { dg-error {passing 'struct s' to argument 2 of 'svst1', which expects a scalar pointer} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_2.c
index c718b3e..eb12cbb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_2.c
@@ -11,17 +11,17 @@ f1 (svbool_t pg, signed char *s8_ptr, void *void_ptr, struct s *s_ptr,
{
svst1_vnum (pg, s8_ptr, 0); /* { dg-error {too few arguments to function 'svst1_vnum'} } */
svst1_vnum (pg, s8_ptr, 0, s8, 0); /* { dg-error {too many arguments to function 'svst1_vnum'} } */
- svst1_vnum (0, s8_ptr, 0, s8); /* { dg-error {passing 'int' to argument 1 of 'svst1_vnum', which expects 'svbool_t'} } */
+ svst1_vnum (0, s8_ptr, 0, s8); /* { dg-error {passing 'int' to argument 1 of 'svst1_vnum', which expects an 'svbool_t' or 'svcount_t'} } */
svst1_vnum (pg, s8_ptr, pg, s8); /* { dg-error {passing 'svbool_t' to argument 3 of 'svst1_vnum', which expects 'int64_t'} } */
svst1_vnum (pg, s8_ptr, s8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svst1_vnum', which expects 'int64_t'} } */
- svst1_vnum (pg, s8_ptr, void_ptr, s8); /* { dg-warning "passing argument 3 of 'svst1_vnum_s8' makes integer from pointer without a cast" } */
- svst1_vnum (pg, void_ptr, 0, 0); /* { dg-error {passing 'int' to argument 4 of 'svst1_vnum', which expects an SVE vector type} } */
+ svst1_vnum (pg, s8_ptr, void_ptr, s8); /* { dg-error "passing argument 3 of 'svst1_vnum_s8' makes integer from pointer without a cast" } */
+ svst1_vnum (pg, void_ptr, 0, 0); /* { dg-error {passing 'int' to argument 4 of 'svst1_vnum', which expects an SVE type rather than a scalar} } */
svst1_vnum (pg, void_ptr, 0, pg); /* { dg-error {'svst1_vnum' has no form that takes 'svbool_t' arguments} } */
svst1_vnum (pg, 0, 0, s8);
- svst1_vnum (pg, (int32_t *) 0, 0, s8); /* { dg-warning "passing argument 2 of 'svst1_vnum_s8' from incompatible pointer type" } */
+ svst1_vnum (pg, (int32_t *) 0, 0, s8); /* { dg-error "passing argument 2 of 'svst1_vnum_s8' from incompatible pointer type" } */
svst1_vnum (pg, void_ptr, 0, s8);
- svst1_vnum (pg, s_ptr, 0, s8); /* { dg-warning "passing argument 2 of 'svst1_vnum_s8' from incompatible pointer type" } */
- svst1_vnum (pg, f32_ptr, 0, s8); /* { dg-warning "passing argument 2 of 'svst1_vnum_s8' from incompatible pointer type" } */
+ svst1_vnum (pg, s_ptr, 0, s8); /* { dg-error "passing argument 2 of 'svst1_vnum_s8' from incompatible pointer type" } */
+ svst1_vnum (pg, f32_ptr, 0, s8); /* { dg-error "passing argument 2 of 'svst1_vnum_s8' from incompatible pointer type" } */
svst1_vnum (pg, f32_ptr, 0, f32);
- svst1_vnum (pg, cf32_ptr, 0, f32); /* { dg-warning "passing argument 2 of 'svst1_vnum_f32' from incompatible pointer type" } */
+ svst1_vnum (pg, cf32_ptr, 0, f32); /* { dg-error "passing argument 2 of 'svst1_vnum_f32' from incompatible pointer type" } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_index_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_index_1.c
index 8952823..21bd93a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_index_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_index_1.c
@@ -20,13 +20,13 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svst1_scatter_index (pg, 0, s32, s32);
svst1_scatter_index (pg, (int32_t *) 0, s32, s32);
svst1_scatter_index (pg, void_ptr, s32, s32);
- svst1_scatter_index (pg, s_ptr, s32, s32); /* { dg-warning "passing argument 2 of 'svst1_scatter_s32index_s32' from incompatible pointer type" } */
- svst1_scatter_index (pg, f32_ptr, s32, s32); /* { dg-warning "passing argument 2 of 'svst1_scatter_s32index_s32' from incompatible pointer type" } */
+ svst1_scatter_index (pg, s_ptr, s32, s32); /* { dg-error "passing argument 2 of 'svst1_scatter_s32index_s32' from incompatible pointer type" } */
+ svst1_scatter_index (pg, f32_ptr, s32, s32); /* { dg-error "passing argument 2 of 'svst1_scatter_s32index_s32' from incompatible pointer type" } */
svst1_scatter_index (pg, f32_ptr, s32, f32);
- svst1_scatter_index (pg, cf32_ptr, s32, f32); /* { dg-warning "passing argument 2 of 'svst1_scatter_s32index_f32' from incompatible pointer type" } */
+ svst1_scatter_index (pg, cf32_ptr, s32, f32); /* { dg-error "passing argument 2 of 'svst1_scatter_s32index_f32' from incompatible pointer type" } */
svst1_scatter_index (pg, s, s32, s32); /* { dg-error {passing 'struct s' to argument 2 of 'svst1_scatter_index', which expects a vector or pointer base address} } */
- svst1_scatter_index (pg, u32, void_ptr, s32); /* { dg-warning "passing argument 3 of 'svst1_scatter_u32base_index_s32' makes integer from pointer without a cast" } */
+ svst1_scatter_index (pg, u32, void_ptr, s32); /* { dg-error "passing argument 3 of 'svst1_scatter_u32base_index_s32' makes integer from pointer without a cast" } */
svst1_scatter_index (pg, u32, pg, s32); /* { dg-error {passing 'svbool_t' to argument 3 of 'svst1_scatter_index', which expects 'int64_t'} } */
svst1_scatter_index (pg, u32, s32, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svst1_scatter_index', which expects 'int64_t'} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_index_restricted_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_index_restricted_1.c
index 5e31362..ec99f8a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_index_restricted_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_index_restricted_1.c
@@ -22,13 +22,13 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svstnt1_scatter_index (pg, 0, s64, s64);
svstnt1_scatter_index (pg, (int64_t *) 0, s64, s64);
svstnt1_scatter_index (pg, void_ptr, s64, s64);
- svstnt1_scatter_index (pg, s_ptr, s64, s64); /* { dg-warning "passing argument 2 of 'svstnt1_scatter_s64index_s64' from incompatible pointer type" } */
- svstnt1_scatter_index (pg, f32_ptr, s64, s64); /* { dg-warning "passing argument 2 of 'svstnt1_scatter_s64index_s64' from incompatible pointer type" } */
+ svstnt1_scatter_index (pg, s_ptr, s64, s64); /* { dg-error "passing argument 2 of 'svstnt1_scatter_s64index_s64' from incompatible pointer type" } */
+ svstnt1_scatter_index (pg, f32_ptr, s64, s64); /* { dg-error "passing argument 2 of 'svstnt1_scatter_s64index_s64' from incompatible pointer type" } */
svstnt1_scatter_index (pg, f64_ptr, s64, f64);
- svstnt1_scatter_index (pg, cf64_ptr, s64, f64); /* { dg-warning "passing argument 2 of 'svstnt1_scatter_s64index_f64' from incompatible pointer type" } */
+ svstnt1_scatter_index (pg, cf64_ptr, s64, f64); /* { dg-error "passing argument 2 of 'svstnt1_scatter_s64index_f64' from incompatible pointer type" } */
svstnt1_scatter_index (pg, s, s64, s64); /* { dg-error {passing 'struct s' to argument 2 of 'svstnt1_scatter_index', which expects a vector or pointer base address} } */
- svstnt1_scatter_index (pg, u32, void_ptr, s32); /* { dg-warning "passing argument 3 of 'svstnt1_scatter_u32base_index_s32' makes integer from pointer without a cast" } */
+ svstnt1_scatter_index (pg, u32, void_ptr, s32); /* { dg-error "passing argument 3 of 'svstnt1_scatter_u32base_index_s32' makes integer from pointer without a cast" } */
svstnt1_scatter_index (pg, u32, pg, s32); /* { dg-error {passing 'svbool_t' to argument 3 of 'svstnt1_scatter_index', which expects 'int64_t'} } */
svstnt1_scatter_index (pg, u32, s32, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svstnt1_scatter_index', which expects 'int64_t'} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_1.c
index 10abf75..3b3b562 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_1.c
@@ -13,8 +13,8 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
svst1_scatter (pg, u32); /* { dg-error {too few arguments to function 'svst1_scatter'} } */
svst1_scatter (pg, u32, u32, 0); /* { dg-error {too many arguments to function 'svst1_scatter'} } */
svst1_scatter (0, u32, u32); /* { dg-error {passing 'int' to argument 1 of 'svst1_scatter', which expects 'svbool_t'} } */
- svst1_scatter (pg, 0, u32); /* { dg-error {passing 'int' to argument 2 of 'svst1_scatter', which expects an SVE vector type} } */
- svst1_scatter (pg, u32, 0); /* { dg-error {passing 'int' to argument 3 of 'svst1_scatter', which expects an SVE vector type} } */
+ svst1_scatter (pg, 0, u32); /* { dg-error {passing 'int' to argument 2 of 'svst1_scatter', which expects an SVE type rather than a scalar} } */
+ svst1_scatter (pg, u32, 0); /* { dg-error {passing 'int' to argument 3 of 'svst1_scatter', which expects an SVE type rather than a scalar} } */
svst1_scatter (pg, u32, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svst1_scatter', which expects a vector of 32-bit or 64-bit elements} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_2.c
index 4854818..318f0dd 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_2.c
@@ -20,13 +20,13 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svst1_scatter_offset (pg, 0, s32, s32);
svst1_scatter_offset (pg, (int32_t *) 0, s32, s32);
svst1_scatter_offset (pg, void_ptr, s32, s32);
- svst1_scatter_offset (pg, s_ptr, s32, s32); /* { dg-warning "passing argument 2 of 'svst1_scatter_s32offset_s32' from incompatible pointer type" } */
- svst1_scatter_offset (pg, f32_ptr, s32, s32); /* { dg-warning "passing argument 2 of 'svst1_scatter_s32offset_s32' from incompatible pointer type" } */
+ svst1_scatter_offset (pg, s_ptr, s32, s32); /* { dg-error "passing argument 2 of 'svst1_scatter_s32offset_s32' from incompatible pointer type" } */
+ svst1_scatter_offset (pg, f32_ptr, s32, s32); /* { dg-error "passing argument 2 of 'svst1_scatter_s32offset_s32' from incompatible pointer type" } */
svst1_scatter_offset (pg, f32_ptr, s32, f32);
- svst1_scatter_offset (pg, cf32_ptr, s32, f32); /* { dg-warning "passing argument 2 of 'svst1_scatter_s32offset_f32' from incompatible pointer type" } */
+ svst1_scatter_offset (pg, cf32_ptr, s32, f32); /* { dg-error "passing argument 2 of 'svst1_scatter_s32offset_f32' from incompatible pointer type" } */
svst1_scatter_offset (pg, s, s32, s32); /* { dg-error {passing 'struct s' to argument 2 of 'svst1_scatter_offset', which expects a vector or pointer base address} } */
- svst1_scatter_offset (pg, u32, void_ptr, s32); /* { dg-warning "passing argument 3 of 'svst1_scatter_u32base_offset_s32' makes integer from pointer without a cast" } */
+ svst1_scatter_offset (pg, u32, void_ptr, s32); /* { dg-error "passing argument 3 of 'svst1_scatter_u32base_offset_s32' makes integer from pointer without a cast" } */
svst1_scatter_offset (pg, u32, pg, s32); /* { dg-error {passing 'svbool_t' to argument 3 of 'svst1_scatter_offset', which expects 'int64_t'} } */
svst1_scatter_offset (pg, u32, s32, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svst1_scatter_offset', which expects 'int64_t'} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_restricted_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_restricted_1.c
index 100624b..74bd7ae 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_restricted_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter_offset_restricted_1.c
@@ -22,13 +22,13 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svstnt1_scatter_offset (pg, 0, u32, s32);
svstnt1_scatter_offset (pg, (int32_t *) 0, u32, s32);
svstnt1_scatter_offset (pg, void_ptr, u32, s32);
- svstnt1_scatter_offset (pg, s_ptr, u32, s32); /* { dg-warning "passing argument 2 of 'svstnt1_scatter_u32offset_s32' from incompatible pointer type" } */
- svstnt1_scatter_offset (pg, f32_ptr, u32, s32); /* { dg-warning "passing argument 2 of 'svstnt1_scatter_u32offset_s32' from incompatible pointer type" } */
+ svstnt1_scatter_offset (pg, s_ptr, u32, s32); /* { dg-error "passing argument 2 of 'svstnt1_scatter_u32offset_s32' from incompatible pointer type" } */
+ svstnt1_scatter_offset (pg, f32_ptr, u32, s32); /* { dg-error "passing argument 2 of 'svstnt1_scatter_u32offset_s32' from incompatible pointer type" } */
svstnt1_scatter_offset (pg, f32_ptr, u32, f32);
- svstnt1_scatter_offset (pg, cf32_ptr, u32, f32); /* { dg-warning "passing argument 2 of 'svstnt1_scatter_u32offset_f32' from incompatible pointer type" } */
+ svstnt1_scatter_offset (pg, cf32_ptr, u32, f32); /* { dg-error "passing argument 2 of 'svstnt1_scatter_u32offset_f32' from incompatible pointer type" } */
svstnt1_scatter_offset (pg, s, u32, s32); /* { dg-error {passing 'struct s' to argument 2 of 'svstnt1_scatter_offset', which expects a vector or pointer base address} } */
- svstnt1_scatter_offset (pg, u32, void_ptr, s32); /* { dg-warning "passing argument 3 of 'svstnt1_scatter_u32base_offset_s32' makes integer from pointer without a cast" } */
+ svstnt1_scatter_offset (pg, u32, void_ptr, s32); /* { dg-error "passing argument 3 of 'svstnt1_scatter_u32base_offset_s32' makes integer from pointer without a cast" } */
svstnt1_scatter_offset (pg, u32, pg, s32); /* { dg-error {passing 'svbool_t' to argument 3 of 'svstnt1_scatter_offset', which expects 'int64_t'} } */
svstnt1_scatter_offset (pg, u32, s32, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svstnt1_scatter_offset', which expects 'int64_t'} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/storexn_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/storexn_1.c
new file mode 100644
index 0000000..7ad4ca8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/storexn_1.c
@@ -0,0 +1,33 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99" } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sme2"
+
+struct s { signed char x; };
+
+svuint8_t
+f1 (svbool_t pg, svcount_t pn, svboolx2_t pgx2,
+ signed char *s8_ptr, void *void_ptr, struct s *s_ptr,
+ float *f32_ptr, _Complex float *cf32_ptr,
+ svint8_t s8, svint8x2_t s8x2, svint8x3_t s8x3,
+ svfloat32x4_t f32x4, struct s s) __arm_streaming
+{
+ svst1 (pn, s8_ptr); /* { dg-error {too few arguments to function 'svst1'} } */
+ svst1 (pn, s8_ptr, s8x2, 0); /* { dg-error {too many arguments to function 'svst1'} } */
+ svst1 (0, s8_ptr, s8x2); /* { dg-error {passing 'int' to argument 1 of 'svst1', which expects an 'svbool_t' or 'svcount_t'} } */
+ svst1 (pn, void_ptr, 0x2); /* { dg-error {passing 'int' to argument 3 of 'svst1', which expects an SVE type rather than a scalar} } */
+ svst1 (pn, void_ptr, pgx2); /* { dg-error {'svst1' has no form that takes 'svboolx2_t' arguments} } */
+ svst1 (pn, 0, s8); /* { dg-error {operations on single vectors must be predicated by 'svbool_t' rather than 'svcount_t'} } */
+ svst1 (pn, 0, s8x2);
+ svst1 (pg, 0, s8x2); /* { dg-error {operations on multiple vectors must be predicated by 'svcount_t' rather than 'svbool_t'} } */
+ svst1 (pn, 0, s8x3); /* { dg-error {'svst1' has no form that takes 'svint8x3_t' arguments} } */
+ svst1 (pn, (int32_t *) 0, s8x2); /* { dg-error "passing argument 2 of 'svst1_s8_x2' from incompatible pointer type" } */
+ svst1 (pn, void_ptr, s8x2);
+ svst1 (pn, s_ptr, s8x2); /* { dg-error "passing argument 2 of 'svst1_s8_x2' from incompatible pointer type" } */
+ svst1 (pn, f32_ptr, s8x2); /* { dg-error "passing argument 2 of 'svst1_s8_x2' from incompatible pointer type" } */
+ svst1 (pn, f32_ptr, f32x4);
+ svst1 (pn, cf32_ptr, f32x4); /* { dg-error "passing argument 2 of 'svst1_f32_x4' from incompatible pointer type" } */
+ svst1 (pn, s, s8x2); /* { dg-error {passing 'struct s' to argument 2 of 'svst1', which expects a scalar pointer} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/svboolx2_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/svboolx2_1.c
new file mode 100644
index 0000000..877b184
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/svboolx2_1.c
@@ -0,0 +1,135 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <arm_sve.h>
+
+/*
+** ret_p0:
+** ret
+*/
+svboolx2_t
+ret_p0 (svboolx2_t p0)
+{
+ return p0;
+}
+
+/*
+** ret_p1:
+** mov p0\.b, p1\.b
+** mov p1\.b, p2\.b
+** ret
+*/
+svboolx2_t
+ret_p1 (svbool_t p0, svboolx2_t p1)
+{
+ return p1;
+}
+
+/*
+** ret_p2:
+** (
+** mov p0\.b, p2\.b
+** mov p1\.b, p3\.b
+** |
+** mov p1\.b, p3\.b
+** mov p0\.b, p2\.b
+** )
+** ret
+*/
+svboolx2_t
+ret_p2 (svboolx2_t p0, svboolx2_t p2)
+{
+ return p2;
+}
+
+/*
+** ret_mem:
+** (
+** ldr p0, \[x0\]
+** ldr p1, \[x0, #1, mul vl\]
+** |
+** ldr p1, \[x0, #1, mul vl\]
+** ldr p0, \[x0\]
+** )
+** ret
+*/
+svboolx2_t
+ret_mem (svboolx2_t p0, svbool_t p2, svboolx2_t mem)
+{
+ return mem;
+}
+
+/*
+** load:
+** (
+** ldr p0, \[x0\]
+** ldr p1, \[x0, #1, mul vl\]
+** |
+** ldr p1, \[x0, #1, mul vl\]
+** ldr p0, \[x0\]
+** )
+** ret
+*/
+svboolx2_t
+load (svboolx2_t *ptr)
+{
+ return *ptr;
+}
+
+/*
+** store:
+** (
+** str p1, \[x0\]
+** str p2, \[x0, #1, mul vl\]
+** |
+** str p2, \[x0, #1, mul vl\]
+** str p1, \[x0\]
+** )
+** ret
+*/
+void
+store (svbool_t p0, svboolx2_t p1, svboolx2_t *ptr)
+{
+ *ptr = p1;
+}
+
+/*
+** upa_p1:
+** ret
+*/
+void
+upa_p1 (svbool_t p0, svboolx2_t p1)
+{
+ asm volatile ("" :: "Upa" (p1));
+}
+
+/*
+** up2_p1:
+** (
+** mov p0\.b, p1\.b
+** mov p1\.b, p2\.b
+** |
+** mov p3\.b, p2\.b
+** mov p2\.b, p1\.b
+** )
+** ret
+*/
+void
+up2_p1 (svbool_t p0, svboolx2_t p1)
+{
+ asm volatile ("" :: "Up2" (p1));
+}
+
+/*
+** p1_to_p2:
+** mov p3\.b, p2\.b
+** mov p2\.b, p1\.b
+** ret
+*/
+void
+p1_to_p2 (svbool_t p0, svboolx2_t p1)
+{
+ register svboolx2_t p2 asm ("p2") = p1;
+ asm volatile ("" :: "Up2" (p2));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/svcount_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/svcount_1.c
new file mode 100644
index 0000000..920d37e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/svcount_1.c
@@ -0,0 +1,10 @@
+#include <arm_sve.h>
+
+svbool_t f1 (svcount_t x) { return x; } /* { dg-error {incompatible types} } */
+svcount_t f2 (svbool_t x) { return x; } /* { dg-error {incompatible types} } */
+void f3 (svbool_t *p, svcount_t x) { *p = x; } /* { dg-error {incompatible types} } */
+void f4 (svcount_t *p, svbool_t x) { *p = x; } /* { dg-error {incompatible types} } */
+svbool_t *f5 (svcount_t *p) { return p; } /* { dg-error {incompatible return type} } */
+svcount_t *f6 (svbool_t *p) { return p; } /* { dg-error {incompatible return type} } */
+svbool_t f7 (svcount_t x) { return (svbool_t) x; } /* { dg-error {conversion to non-scalar} } */
+svcount_t f8 (svbool_t x) { return (svcount_t) x; } /* { dg-error {conversion to non-scalar} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_1.c
index a9233324..9a554f5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_1.c
@@ -10,7 +10,7 @@ f1 (svbool_t pg, svuint8_t u8, svuint16_t u16, svint32_t s32,
{
svbfmmla (f32, bf16); /* { dg-error {too few arguments to function 'svbfmmla'} } */
svbfmmla (f32, bf16, bf16, 0); /* { dg-error {too many arguments to function 'svbfmmla'} } */
- svbfmmla (0, bf16, bf16); /* { dg-error {passing 'int' to argument 1 of 'svbfmmla', which expects an SVE vector type} } */
+ svbfmmla (0, bf16, bf16); /* { dg-error {passing 'int' to argument 1 of 'svbfmmla', which expects an SVE type rather than a scalar} } */
svbfmmla (pg, bf16, bf16); /* { dg-error {'svbfmmla' has no form that takes 'svbool_t' arguments} } */
svbfmmla (u8, bf16, bf16); /* { dg-error {'svbfmmla' has no form that takes 'svuint8_t' arguments} } */
svbfmmla (u16, bf16, bf16); /* { dg-error {'svbfmmla' has no form that takes 'svuint16_t' arguments} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_lane_1.c
index 23f027f..87e74fb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_lane_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_lane_1.c
@@ -10,7 +10,7 @@ f1 (svbool_t pg, svuint8_t u8, svuint16_t u16, svint32_t s32,
{
svbfmlalb_lane (f32, bf16, bf16); /* { dg-error {too few arguments to function 'svbfmlalb_lane'} } */
svbfmlalb_lane (f32, bf16, bf16, 0, 0); /* { dg-error {too many arguments to function 'svbfmlalb_lane'} } */
- svbfmlalb_lane (0, bf16, bf16, 0); /* { dg-error {passing 'int' to argument 1 of 'svbfmlalb_lane', which expects an SVE vector type} } */
+ svbfmlalb_lane (0, bf16, bf16, 0); /* { dg-error {passing 'int' to argument 1 of 'svbfmlalb_lane', which expects an SVE type rather than a scalar} } */
svbfmlalb_lane (pg, bf16, bf16, 0); /* { dg-error {'svbfmlalb_lane' has no form that takes 'svbool_t' arguments} } */
svbfmlalb_lane (u8, bf16, bf16, 0); /* { dg-error {'svbfmlalb_lane' has no form that takes 'svuint8_t' arguments} } */
svbfmlalb_lane (u16, bf16, bf16, 0); /* { dg-error {'svbfmlalb_lane' has no form that takes 'svuint16_t' arguments} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_lanex2_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_lanex2_1.c
index 4755ca7..ca18526 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_lanex2_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_lanex2_1.c
@@ -10,7 +10,7 @@ f1 (svbool_t pg, svuint8_t u8, svuint16_t u16, svint32_t s32,
{
svbfdot_lane (f32, bf16, bf16); /* { dg-error {too few arguments to function 'svbfdot_lane'} } */
svbfdot_lane (f32, bf16, bf16, 0, 0); /* { dg-error {too many arguments to function 'svbfdot_lane'} } */
- svbfdot_lane (0, bf16, bf16, 0); /* { dg-error {passing 'int' to argument 1 of 'svbfdot_lane', which expects an SVE vector type} } */
+ svbfdot_lane (0, bf16, bf16, 0); /* { dg-error {passing 'int' to argument 1 of 'svbfdot_lane', which expects an SVE type rather than a scalar} } */
svbfdot_lane (pg, bf16, bf16, 0); /* { dg-error {'svbfdot_lane' has no form that takes 'svbool_t' arguments} } */
svbfdot_lane (u8, bf16, bf16, 0); /* { dg-error {'svbfdot_lane' has no form that takes 'svuint8_t' arguments} } */
svbfdot_lane (u16, bf16, bf16, 0); /* { dg-error {'svbfdot_lane' has no form that takes 'svuint16_t' arguments} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_opt_n_1.c
index cb0605b..d831fc9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_bfloat16_opt_n_1.c
@@ -10,7 +10,7 @@ f1 (svbool_t pg, svuint8_t u8, svuint16_t u16, svint32_t s32,
{
svbfdot (f32, bf16); /* { dg-error {too few arguments to function 'svbfdot'} } */
svbfdot (f32, bf16, bf16, 0); /* { dg-error {too many arguments to function 'svbfdot'} } */
- svbfdot (0, bf16, bf16); /* { dg-error {passing 'int' to argument 1 of 'svbfdot', which expects an SVE vector type} } */
+ svbfdot (0, bf16, bf16); /* { dg-error {passing 'int' to argument 1 of 'svbfdot', which expects an SVE type rather than a scalar} } */
svbfdot (pg, bf16, bf16); /* { dg-error {'svbfdot' has no form that takes 'svbool_t' arguments} } */
svbfdot (u8, bf16, bf16); /* { dg-error {'svbfdot' has no form that takes 'svuint8_t' arguments} } */
svbfdot (u16, bf16, bf16); /* { dg-error {'svbfdot' has no form that takes 'svuint16_t' arguments} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_intq_uintq_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_intq_uintq_lane_1.c
index 600be05..934b7bd 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_intq_uintq_lane_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_intq_uintq_lane_1.c
@@ -10,14 +10,14 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
{
svsudot_lane (s32, s8, u8); /* { dg-error {too few arguments to function 'svsudot_lane'} } */
svsudot_lane (s32, s8, u8, 0, 0); /* { dg-error {too many arguments to function 'svsudot_lane'} } */
- svsudot_lane (0, s8, u8, 0); /* { dg-error {passing 'int' to argument 1 of 'svsudot_lane', which expects an SVE vector type} } */
+ svsudot_lane (0, s8, u8, 0); /* { dg-error {passing 'int' to argument 1 of 'svsudot_lane', which expects an SVE type rather than a scalar} } */
svsudot_lane (pg, s8, u8, 0); /* { dg-error {'svsudot_lane' has no form that takes 'svbool_t' arguments} } */
svsudot_lane (u8, s8, u8, 0); /* { dg-error {'svsudot_lane' has no form that takes 'svuint8_t' arguments} } */
svsudot_lane (f32, s8, u8, 0); /* { dg-error {'svsudot_lane' has no form that takes 'svfloat32_t' arguments} } */
svsudot_lane (u32, s8, u8, 0); /* { dg-error {'svsudot_lane' has no form that takes 'svuint32_t' arguments} } */
svsudot_lane (s32, s8, u8, 0);
- svsudot_lane (s32, 0, u8, 0); /* { dg-error {passing 'int' to argument 2 of 'svsudot_lane', which expects an SVE vector type} } */
- svsudot_lane (s32, s8, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svsudot_lane', which expects an SVE vector type} } */
+ svsudot_lane (s32, 0, u8, 0); /* { dg-error {passing 'int' to argument 2 of 'svsudot_lane', which expects an SVE type rather than a scalar} } */
+ svsudot_lane (s32, s8, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svsudot_lane', which expects an SVE type rather than a scalar} } */
svsudot_lane (s32, s8, u8, 0);
svsudot_lane (s32, u8, u8, 0); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svsudot_lane', which expects a vector of signed integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_intq_uintq_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_intq_uintq_opt_n_1.c
index f95ac58..c481996 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_intq_uintq_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_intq_uintq_opt_n_1.c
@@ -23,12 +23,12 @@ f2 (svbool_t pg, svint8_t s8, svuint8_t u8, svuint32_t u32,
{
svsudot (s32, s8); /* { dg-error {too few arguments to function 'svsudot'} } */
svsudot (s32, s8, u8, u8); /* { dg-error {too many arguments to function 'svsudot'} } */
- svsudot (0, s8, u8); /* { dg-error {passing 'int' to argument 1 of 'svsudot', which expects an SVE vector type} } */
+ svsudot (0, s8, u8); /* { dg-error {passing 'int' to argument 1 of 'svsudot', which expects an SVE type rather than a scalar} } */
svsudot (pg, s8, u8); /* { dg-error {'svsudot' has no form that takes 'svbool_t' arguments} } */
svsudot (u8, s8, u8); /* { dg-error {'svsudot' has no form that takes 'svuint8_t' arguments} } */
svsudot (f32, s8, u8); /* { dg-error {'svsudot' has no form that takes 'svfloat32_t' arguments} } */
svsudot (s32, s8, u8);
- svsudot (s32, 0, u8); /* { dg-error {passing 'int' to argument 2 of 'svsudot', which expects an SVE vector type} } */
+ svsudot (s32, 0, u8); /* { dg-error {passing 'int' to argument 2 of 'svsudot', which expects an SVE type rather than a scalar} } */
svsudot (s32, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svsudot', which expects a vector of signed integers} } */
svsudot (s32, s8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svsudot', which expects a vector of unsigned integers} } */
svsudot (s32, s8, 0);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_lane_1.c
index d59ffab..0a67f82 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_lane_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_lane_1.c
@@ -10,11 +10,11 @@ f1 (svbool_t pg, svfloat16_t f16, svfloat32_t f32, svfloat64_t f64,
svmla_lane (f32, f32, f32, 0, 0); /* { dg-error {too many arguments to function 'svmla_lane'} } */
svmla_lane (pg, pg, pg, 0); /* { dg-error {'svmla_lane' has no form that takes 'svbool_t' arguments} } */
svmla_lane (s32, s32, s32, 0); /* { dg-error {ACLE function 'svmla_lane_s32' requires ISA extension 'sve2'} "" { xfail aarch64_sve2 } } */
- svmla_lane (1, f32, f32, 0); /* { dg-error {passing 'int' to argument 1 of 'svmla_lane', which expects an SVE vector type} } */
- svmla_lane (f32, 1, f32, 0); /* { dg-error {passing 'int' to argument 2 of 'svmla_lane', which expects an SVE vector type} } */
- svmla_lane (f32, f32, 1, 0); /* { dg-error {passing 'int' to argument 3 of 'svmla_lane', which expects an SVE vector type} } */
- svmla_lane (f32, f64, f32, 0); /* { dg-error {passing 'svfloat64_t' to argument 2 of 'svmla_lane', but previous arguments had type 'svfloat32_t'} } */
- svmla_lane (f32, f32, f64, 0); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svmla_lane', but previous arguments had type 'svfloat32_t'} } */
+ svmla_lane (1, f32, f32, 0); /* { dg-error {passing 'int' to argument 1 of 'svmla_lane', which expects an SVE type rather than a scalar} } */
+ svmla_lane (f32, 1, f32, 0); /* { dg-error {passing 'int' to argument 2 of 'svmla_lane', which expects an SVE type rather than a scalar} } */
+ svmla_lane (f32, f32, 1, 0); /* { dg-error {passing 'int' to argument 3 of 'svmla_lane', which expects an SVE type rather than a scalar} } */
+ svmla_lane (f32, f64, f32, 0); /* { dg-error {passing 'svfloat64_t' to argument 2 of 'svmla_lane', but argument 1 had type 'svfloat32_t'} } */
+ svmla_lane (f32, f32, f64, 0); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svmla_lane', but argument 1 had type 'svfloat32_t'} } */
svmla_lane (f32, f32, f32, s32); /* { dg-error {argument 4 of 'svmla_lane' must be an integer constant expression} } */
svmla_lane (f32, f32, f32, i); /* { dg-error {argument 4 of 'svmla_lane' must be an integer constant expression} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_lane_rotate_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_lane_rotate_1.c
index 68e5172..60c9c46 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_lane_rotate_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_lane_rotate_1.c
@@ -11,11 +11,11 @@ f1 (svbool_t pg, svfloat16_t f16, svfloat32_t f32, svfloat64_t f64,
svcmla_lane (pg, pg, pg, 0, 90); /* { dg-error {'svcmla_lane' has no form that takes 'svbool_t' arguments} } */
svcmla_lane (s32, s32, s32, 0, 90); /* { dg-error {ACLE function 'svcmla_lane_s32' requires ISA extension 'sve2'} "" { xfail aarch64_sve2 } } */
svcmla_lane (f64, f64, f64, 0, 90); /* { dg-error {'svcmla_lane' has no form that takes 'svfloat64_t' arguments} } */
- svcmla_lane (1, f32, f32, 0, 90); /* { dg-error {passing 'int' to argument 1 of 'svcmla_lane', which expects an SVE vector type} } */
- svcmla_lane (f32, 1, f32, 0, 90); /* { dg-error {passing 'int' to argument 2 of 'svcmla_lane', which expects an SVE vector type} } */
- svcmla_lane (f32, f32, 1, 0, 90); /* { dg-error {passing 'int' to argument 3 of 'svcmla_lane', which expects an SVE vector type} } */
- svcmla_lane (f32, f64, f32, 0, 90); /* { dg-error {passing 'svfloat64_t' to argument 2 of 'svcmla_lane', but previous arguments had type 'svfloat32_t'} } */
- svcmla_lane (f32, f32, f64, 0, 90); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svcmla_lane', but previous arguments had type 'svfloat32_t'} } */
+ svcmla_lane (1, f32, f32, 0, 90); /* { dg-error {passing 'int' to argument 1 of 'svcmla_lane', which expects an SVE type rather than a scalar} } */
+ svcmla_lane (f32, 1, f32, 0, 90); /* { dg-error {passing 'int' to argument 2 of 'svcmla_lane', which expects an SVE type rather than a scalar} } */
+ svcmla_lane (f32, f32, 1, 0, 90); /* { dg-error {passing 'int' to argument 3 of 'svcmla_lane', which expects an SVE type rather than a scalar} } */
+ svcmla_lane (f32, f64, f32, 0, 90); /* { dg-error {passing 'svfloat64_t' to argument 2 of 'svcmla_lane', but argument 1 had type 'svfloat32_t'} } */
+ svcmla_lane (f32, f32, f64, 0, 90); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svcmla_lane', but argument 1 had type 'svfloat32_t'} } */
svcmla_lane (f32, f32, f32, s32, 0); /* { dg-error {argument 4 of 'svcmla_lane' must be an integer constant expression} } */
svcmla_lane (f32, f32, f32, i, 0); /* { dg-error {argument 4 of 'svcmla_lane' must be an integer constant expression} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_long_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_long_lane_1.c
index e20e1a1..dd67b4e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_long_lane_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_long_lane_1.c
@@ -11,16 +11,16 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
{
svmlalb_lane (u64, u32, u32); /* { dg-error {too few arguments to function 'svmlalb_lane'} } */
svmlalb_lane (u64, u32, u32, 0, 0); /* { dg-error {too many arguments to function 'svmlalb_lane'} } */
- svmlalb_lane (0, u16, u16, 0); /* { dg-error {passing 'int' to argument 1 of 'svmlalb_lane', which expects an SVE vector type} } */
+ svmlalb_lane (0, u16, u16, 0); /* { dg-error {passing 'int' to argument 1 of 'svmlalb_lane', which expects an SVE type rather than a scalar} } */
svmlalb_lane (pg, u16, u16, 0); /* { dg-error {'svmlalb_lane' has no form that takes 'svbool_t' arguments} } */
svmlalb_lane (u8, u8, u8, 0); /* { dg-error {'svmlalb_lane' has no form that takes 'svuint8_t' arguments} } */
svmlalb_lane (u16, u8, u8, 0); /* { dg-error {'svmlalb_lane' has no form that takes 'svuint16_t' arguments} } */
svmlalb_lane (f16, u16, u16, 0); /* { dg-error {'svmlalb_lane' has no form that takes 'svfloat16_t' arguments} } */
svmlalb_lane (f32, f16, f16, 0);
svmlalb_lane (u32, u16, u16, 0);
- svmlalb_lane (u32, 0, u16, 0); /* { dg-error {passing 'int' to argument 2 of 'svmlalb_lane', which expects an SVE vector type} } */
+ svmlalb_lane (u32, 0, u16, 0); /* { dg-error {passing 'int' to argument 2 of 'svmlalb_lane', which expects an SVE type rather than a scalar} } */
svmlalb_lane (u32, s16, u16, 0); /* { dg-error {arguments 1 and 2 of 'svmlalb_lane' must have the same signedness, but the values passed here have type 'svuint32_t' and 'svint16_t' respectively} } */
- svmlalb_lane (u32, u16, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svmlalb_lane', which expects an SVE vector type} } */
+ svmlalb_lane (u32, u16, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svmlalb_lane', which expects an SVE type rather than a scalar} } */
svmlalb_lane (u32, u16, s16, 0); /* { dg-error {arguments 1 and 3 of 'svmlalb_lane' must have the same signedness, but the values passed here have type 'svuint32_t' and 'svint16_t' respectively} } */
svmlalb_lane (u32, u32, u32, 0); /* { dg-error {passing 'svuint32_t' instead of the expected 'svuint16_t' to argument 2 of 'svmlalb_lane', after passing 'svuint32_t' to argument 1} } */
svmlalb_lane (u32, u8, u16, 0); /* { dg-error {passing 'svuint8_t' instead of the expected 'svuint16_t' to argument 2 of 'svmlalb_lane', after passing 'svuint32_t' to argument 1} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_long_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_long_opt_n_1.c
index c6718cf..157fd7c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_long_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_long_opt_n_1.c
@@ -10,13 +10,13 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svuint16_t u16, svuint32_t u32,
{
svabalb (u16, u8); /* { dg-error {too few arguments to function 'svabalb'} } */
svabalb (u16, u8, u8, u8); /* { dg-error {too many arguments to function 'svabalb'} } */
- svabalb (0, u8, u8); /* { dg-error {passing 'int' to argument 1 of 'svabalb', which expects an SVE vector type} } */
+ svabalb (0, u8, u8); /* { dg-error {passing 'int' to argument 1 of 'svabalb', which expects an SVE type rather than a scalar} } */
svabalb (pg, u8, u8); /* { dg-error {'svabalb' has no form that takes 'svbool_t' arguments} } */
svabalb (u8, u8, u8); /* { dg-error {'svabalb' has no form that takes 'svuint8_t' arguments} } */
svabalb (f16, u8, u8); /* { dg-error {'svabalb' has no form that takes 'svfloat16_t' arguments} } */
svabalb (f32, f16, f16); /* { dg-error {'svabalb' has no form that takes 'svfloat32_t' arguments} } */
svabalb (u16, u8, u8);
- svabalb (u16, 0, u8); /* { dg-error {passing 'int' to argument 2 of 'svabalb', which expects an SVE vector type} } */
+ svabalb (u16, 0, u8); /* { dg-error {passing 'int' to argument 2 of 'svabalb', which expects an SVE type rather than a scalar} } */
svabalb (u16, s8, u8); /* { dg-error {arguments 1 and 2 of 'svabalb' must have the same signedness, but the values passed here have type 'svuint16_t' and 'svint8_t' respectively} } */
svabalb (u16, u8, 0);
svabalb (u16, u8, s8); /* { dg-error {arguments 1 and 3 of 'svabalb' must have the same signedness, but the values passed here have type 'svuint16_t' and 'svint8_t' respectively} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_opt_n_1.c
index c4a80e9..6ca2234 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_opt_n_1.c
@@ -10,25 +10,25 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svmla_x (pg, u8, u8, u8, u8); /* { dg-error {too many arguments to function 'svmla_x'} } */
svmla_x (u8, u8, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 1 of 'svmla_x', which expects 'svbool_t'} } */
svmla_x (pg, pg, pg, pg); /* { dg-error {'svmla_x' has no form that takes 'svbool_t' arguments} } */
- svmla_x (pg, 1, u8, u8); /* { dg-error {passing 'int' to argument 2 of 'svmla_x', which expects an SVE vector type} } */
- svmla_x (pg, u8, s8, u8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svmla_x', but previous arguments had type 'svuint8_t'} } */
+ svmla_x (pg, 1, u8, u8); /* { dg-error {passing 'int' to argument 2 of 'svmla_x', which expects an SVE type rather than a scalar} } */
+ svmla_x (pg, u8, s8, u8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svmla_x', but argument 2 had type 'svuint8_t'} } */
svmla_x (pg, u8, u8, u8);
- svmla_x (pg, u8, s16, u8); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmla_x', but previous arguments had type 'svuint8_t'} } */
- svmla_x (pg, u8, u16, u8); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svmla_x', but previous arguments had type 'svuint8_t'} } */
- svmla_x (pg, u8, f16, u8); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svmla_x', but previous arguments had type 'svuint8_t'} } */
- svmla_x (pg, u8, pg, u8); /* { dg-error {passing 'svbool_t' to argument 3 of 'svmla_x', but previous arguments had type 'svuint8_t'} } */
- svmla_x (pg, u8, 0, u8); /* { dg-error {passing 'int' to argument 3 of 'svmla_x', which expects an SVE vector type} } */
- svmla_x (pg, u8, u8, s8); /* { dg-error {passing 'svint8_t' to argument 4 of 'svmla_x', but previous arguments had type 'svuint8_t'} } */
- svmla_x (pg, u8, u8, s16); /* { dg-error {passing 'svint16_t' to argument 4 of 'svmla_x', but previous arguments had type 'svuint8_t'} } */
- svmla_x (pg, u8, u8, u16); /* { dg-error {passing 'svuint16_t' to argument 4 of 'svmla_x', but previous arguments had type 'svuint8_t'} } */
- svmla_x (pg, u8, u8, f16); /* { dg-error {passing 'svfloat16_t' to argument 4 of 'svmla_x', but previous arguments had type 'svuint8_t'} } */
- svmla_x (pg, u8, u8, pg); /* { dg-error {passing 'svbool_t' to argument 4 of 'svmla_x', but previous arguments had type 'svuint8_t'} } */
+ svmla_x (pg, u8, s16, u8); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmla_x', but argument 2 had type 'svuint8_t'} } */
+ svmla_x (pg, u8, u16, u8); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svmla_x', but argument 2 had type 'svuint8_t'} } */
+ svmla_x (pg, u8, f16, u8); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svmla_x', but argument 2 had type 'svuint8_t'} } */
+ svmla_x (pg, u8, pg, u8); /* { dg-error {passing 'svbool_t' to argument 3 of 'svmla_x', but argument 2 had type 'svuint8_t'} } */
+ svmla_x (pg, u8, 0, u8); /* { dg-error {passing 'int' to argument 3 of 'svmla_x', which expects an SVE type rather than a scalar} } */
+ svmla_x (pg, u8, u8, s8); /* { dg-error {passing 'svint8_t' to argument 4 of 'svmla_x', but argument 2 had type 'svuint8_t'} } */
+ svmla_x (pg, u8, u8, s16); /* { dg-error {passing 'svint16_t' to argument 4 of 'svmla_x', but argument 2 had type 'svuint8_t'} } */
+ svmla_x (pg, u8, u8, u16); /* { dg-error {passing 'svuint16_t' to argument 4 of 'svmla_x', but argument 2 had type 'svuint8_t'} } */
+ svmla_x (pg, u8, u8, f16); /* { dg-error {passing 'svfloat16_t' to argument 4 of 'svmla_x', but argument 2 had type 'svuint8_t'} } */
+ svmla_x (pg, u8, u8, pg); /* { dg-error {passing 'svbool_t' to argument 4 of 'svmla_x', but argument 2 had type 'svuint8_t'} } */
svmla_x (pg, u8, u8, 0);
- svmla_x (pg, f16, s16, f16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmla_x', but previous arguments had type 'svfloat16_t'} } */
- svmla_x (pg, f16, u16, f16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svmla_x', but previous arguments had type 'svfloat16_t'} } */
- svmla_x (pg, f16, f16, s16); /* { dg-error {passing 'svint16_t' to argument 4 of 'svmla_x', but previous arguments had type 'svfloat16_t'} } */
- svmla_x (pg, f16, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 4 of 'svmla_x', but previous arguments had type 'svfloat16_t'} } */
+ svmla_x (pg, f16, s16, f16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svmla_x', but argument 2 had type 'svfloat16_t'} } */
+ svmla_x (pg, f16, u16, f16); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svmla_x', but argument 2 had type 'svfloat16_t'} } */
+ svmla_x (pg, f16, f16, s16); /* { dg-error {passing 'svint16_t' to argument 4 of 'svmla_x', but argument 2 had type 'svfloat16_t'} } */
+ svmla_x (pg, f16, f16, u16); /* { dg-error {passing 'svuint16_t' to argument 4 of 'svmla_x', but argument 2 had type 'svfloat16_t'} } */
svmla_x (pg, f16, f16, f16);
svmla_x (pg, f16, f16, 1);
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_lane_1.c
index e81552b..ed38b78 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_lane_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_lane_1.c
@@ -9,33 +9,33 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
{
svdot_lane (u32, u8, u8); /* { dg-error {too few arguments to function 'svdot_lane'} } */
svdot_lane (u32, u8, u8, 0, 0); /* { dg-error {too many arguments to function 'svdot_lane'} } */
- svdot_lane (0, u8, u8, 0); /* { dg-error {passing 'int' to argument 1 of 'svdot_lane', which expects an SVE vector type} } */
- svdot_lane (pg, u8, u8, 0); /* { dg-error {'svdot_lane' has no form that takes 'svbool_t' arguments} } */
- svdot_lane (u8, u8, u8, 0); /* { dg-error {'svdot_lane' has no form that takes 'svuint8_t' arguments} } */
- svdot_lane (f32, u8, u8, 0); /* { dg-error {'svdot_lane' has no form that takes 'svfloat32_t' arguments} } */
+ svdot_lane (0, u8, u8, 0); /* { dg-error {passing 'int' to argument 1 of 'svdot_lane', which expects an SVE type rather than a scalar} } */
+ svdot_lane (pg, u8, u8, 0); /* { dg-error {'svdot_lane' has no form that takes 'svbool_t' and 'svuint8_t' arguments} } */
+ svdot_lane (u8, u8, u8, 0); /* { dg-error {'svdot_lane' has no form that takes 'svuint8_t' and 'svuint8_t' arguments} } */
+ svdot_lane (f32, u8, u8, 0); /* { dg-error {'svdot_lane' has no form that takes 'svfloat32_t' and 'svuint8_t' arguments} } */
svdot_lane (u32, u8, u8, 0);
- svdot_lane (u32, 0, u8, 0); /* { dg-error {passing 'int' to argument 2 of 'svdot_lane', which expects an SVE vector type} } */
- svdot_lane (u32, u8, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svdot_lane', which expects an SVE vector type} } */
+ svdot_lane (u32, 0, u8, 0); /* { dg-error {passing 'int' to argument 2 of 'svdot_lane', which expects an SVE type rather than a scalar} } */
+ svdot_lane (u32, u8, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svdot_lane', which expects an SVE type rather than a scalar} } */
svdot_lane (s32, s8, s8, 0);
- svdot_lane (s32, u8, s8, 0); /* { dg-error {arguments 1 and 2 of 'svdot_lane' must have the same signedness, but the values passed here have type 'svint32_t' and 'svuint8_t' respectively} } */
- svdot_lane (s32, s8, u8, 0); /* { dg-error {arguments 1 and 3 of 'svdot_lane' must have the same signedness, but the values passed here have type 'svint32_t' and 'svuint8_t' respectively} } */
- svdot_lane (s32, s32, s32, 0); /* { dg-error {passing 'svint32_t' instead of the expected 'svint8_t' to argument 2 of 'svdot_lane', after passing 'svint32_t' to argument 1} } */
+ svdot_lane (s32, u8, s8, 0); /* { dg-error {passing 'svint8_t' to argument 3 of 'svdot_lane', but argument 2 had type 'svuint8_t'} } */
+ svdot_lane (s32, s8, u8, 0); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svdot_lane', but argument 2 had type 'svint8_t'} } */
+ svdot_lane (s32, s32, s32, 0); /* { dg-error {'svdot_lane' has no form that takes 'svint32_t' and 'svint32_t' arguments} } */
svdot_lane (u32, u8, u8, 0);
- svdot_lane (u32, s8, u8, 0); /* { dg-error {arguments 1 and 2 of 'svdot_lane' must have the same signedness, but the values passed here have type 'svuint32_t' and 'svint8_t' respectively} } */
- svdot_lane (u32, u8, s8, 0); /* { dg-error {arguments 1 and 3 of 'svdot_lane' must have the same signedness, but the values passed here have type 'svuint32_t' and 'svint8_t' respectively} } */
- svdot_lane (u32, u32, u32, 0); /* { dg-error {passing 'svuint32_t' instead of the expected 'svuint8_t' to argument 2 of 'svdot_lane', after passing 'svuint32_t' to argument 1} } */
+ svdot_lane (u32, s8, u8, 0); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svdot_lane', but argument 2 had type 'svint8_t'} } */
+ svdot_lane (u32, u8, s8, 0); /* { dg-error {passing 'svint8_t' to argument 3 of 'svdot_lane', but argument 2 had type 'svuint8_t'} } */
+ svdot_lane (u32, u32, u32, 0); /* { dg-error {'svdot_lane' has no form that takes 'svuint32_t' and 'svuint32_t' arguments} } */
svdot_lane (s64, s16, s16, 0);
- svdot_lane (s64, u16, s16, 0); /* { dg-error {arguments 1 and 2 of 'svdot_lane' must have the same signedness, but the values passed here have type 'svint64_t' and 'svuint16_t' respectively} } */
- svdot_lane (s64, s16, u16, 0); /* { dg-error {arguments 1 and 3 of 'svdot_lane' must have the same signedness, but the values passed here have type 'svint64_t' and 'svuint16_t' respectively} } */
- svdot_lane (s64, s64, s64, 0); /* { dg-error {passing 'svint64_t' instead of the expected 'svint16_t' to argument 2 of 'svdot_lane', after passing 'svint64_t' to argument 1} } */
+ svdot_lane (s64, u16, s16, 0); /* { dg-error {passing 'svint16_t' to argument 3 of 'svdot_lane', but argument 2 had type 'svuint16_t'} } */
+ svdot_lane (s64, s16, u16, 0); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svdot_lane', but argument 2 had type 'svint16_t'} } */
+ svdot_lane (s64, s64, s64, 0); /* { dg-error {'svdot_lane' has no form that takes 'svint64_t' and 'svint64_t' arguments} } */
svdot_lane (u64, u16, u16, 0);
- svdot_lane (u64, s16, u16, 0); /* { dg-error {arguments 1 and 2 of 'svdot_lane' must have the same signedness, but the values passed here have type 'svuint64_t' and 'svint16_t' respectively} } */
- svdot_lane (u64, u16, s16, 0); /* { dg-error {arguments 1 and 3 of 'svdot_lane' must have the same signedness, but the values passed here have type 'svuint64_t' and 'svint16_t' respectively} } */
- svdot_lane (u64, u64, u64, 0); /* { dg-error {passing 'svuint64_t' instead of the expected 'svuint16_t' to argument 2 of 'svdot_lane', after passing 'svuint64_t' to argument 1} } */
+ svdot_lane (u64, s16, u16, 0); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svdot_lane', but argument 2 had type 'svint16_t'} } */
+ svdot_lane (u64, u16, s16, 0); /* { dg-error {passing 'svint16_t' to argument 3 of 'svdot_lane', but argument 2 had type 'svuint16_t'} } */
+ svdot_lane (u64, u64, u64, 0); /* { dg-error {'svdot_lane' has no form that takes 'svuint64_t' and 'svuint64_t' arguments} } */
svdot_lane (s32, s8, s8, i); /* { dg-error {argument 4 of 'svdot_lane' must be an integer constant expression} } */
svdot_lane (s32, s8, s8, 0);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_lane_rotate_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_lane_rotate_1.c
index a748a86..9e84e7a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_lane_rotate_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_lane_rotate_1.c
@@ -11,13 +11,13 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
{
svcdot_lane (u32, u8, u8, 0); /* { dg-error {too few arguments to function 'svcdot_lane'} } */
svcdot_lane (u32, u8, u8, 0, 0, 0); /* { dg-error {too many arguments to function 'svcdot_lane'} } */
- svcdot_lane (0, u8, u8, 0, 0); /* { dg-error {passing 'int' to argument 1 of 'svcdot_lane', which expects an SVE vector type} } */
+ svcdot_lane (0, u8, u8, 0, 0); /* { dg-error {passing 'int' to argument 1 of 'svcdot_lane', which expects an SVE type rather than a scalar} } */
svcdot_lane (pg, u8, u8, 0, 0); /* { dg-error {'svcdot_lane' has no form that takes 'svbool_t' arguments} } */
svcdot_lane (s8, s8, s8, 0, 0); /* { dg-error {'svcdot_lane' has no form that takes 'svint8_t' arguments} } */
svcdot_lane (f32, s8, s8, 0, 0); /* { dg-error {'svcdot_lane' has no form that takes 'svfloat32_t' arguments} } */
svcdot_lane (s32, s8, s8, 0, 0);
- svcdot_lane (s32, 0, s8, 0, 0); /* { dg-error {passing 'int' to argument 2 of 'svcdot_lane', which expects an SVE vector type} } */
- svcdot_lane (s32, s8, 0, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svcdot_lane', which expects an SVE vector type} } */
+ svcdot_lane (s32, 0, s8, 0, 0); /* { dg-error {passing 'int' to argument 2 of 'svcdot_lane', which expects an SVE type rather than a scalar} } */
+ svcdot_lane (s32, s8, 0, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svcdot_lane', which expects an SVE type rather than a scalar} } */
svcdot_lane (s32, s8, s8, 0, 0);
svcdot_lane (s32, u8, s8, 0, 0); /* { dg-error {arguments 1 and 2 of 'svcdot_lane' must have the same signedness, but the values passed here have type 'svint32_t' and 'svuint8_t' respectively} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_opt_n_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_opt_n_2.c
index fee4096..fc92dce 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_opt_n_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_opt_n_2.c
@@ -8,14 +8,14 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svuint32_t u32,
{
svdot (u32, u8); /* { dg-error {too few arguments to function 'svdot'} } */
svdot (u32, u8, u8, u8); /* { dg-error {too many arguments to function 'svdot'} } */
- svdot (0, u8, u8); /* { dg-error {passing 'int' to argument 1 of 'svdot', which expects an SVE vector type} } */
- svdot (pg, u8, u8); /* { dg-error {'svdot' has no form that takes 'svbool_t' arguments} } */
- svdot (u8, u8, u8); /* { dg-error {'svdot' has no form that takes 'svuint8_t' arguments} } */
- svdot (f32, u8, u8); /* { dg-error {'svdot' has no form that takes 'svfloat32_t' arguments} } */
+ svdot (0, u8, u8); /* { dg-error {passing 'int' to argument 1 of 'svdot', which expects an SVE type rather than a scalar} } */
+ svdot (pg, u8, u8); /* { dg-error {'svdot' has no form that takes 'svbool_t' and 'svuint8_t' arguments} }*/
+ svdot (u8, u8, u8); /* { dg-error {'svdot' has no form that takes 'svuint8_t' and 'svuint8_t' arguments} } */
+ svdot (f32, u8, u8); /* { dg-error {'svdot' has no form that takes 'svfloat32_t' and 'svuint8_t' arguments} } */
svdot (u32, u8, u8);
- svdot (u32, 0, u8); /* { dg-error {passing 'int' to argument 2 of 'svdot', which expects an SVE vector type} } */
- svdot (u32, s8, u8); /* { dg-error {arguments 1 and 2 of 'svdot' must have the same signedness, but the values passed here have type 'svuint32_t' and 'svint8_t' respectively} } */
+ svdot (u32, 0, u8); /* { dg-error {passing 'int' to argument 2 of 'svdot', which expects an SVE type rather than a scalar} } */
+ svdot (u32, s8, u8); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svdot', but argument 2 had type 'svint8_t'} } */
svdot (u32, u8, 0);
- svdot (u32, u8, s8); /* { dg-error {arguments 1 and 3 of 'svdot' must have the same signedness, but the values passed here have type 'svuint32_t' and 'svint8_t' respectively} } */
- svdot (u32, u32, u32); /* { dg-error {passing 'svuint32_t' instead of the expected 'svuint8_t' to argument 2 of 'svdot', after passing 'svuint32_t' to argument 1} } */
+ svdot (u32, u8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svdot', but argument 2 had type 'svuint8_t'} } */
+ svdot (u32, u32, u32); /* { dg-error {'svdot' has no form that takes 'svuint32_t' and 'svuint32_t' arguments} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_or_011_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_or_011_lane_1.c
new file mode 100644
index 0000000..b8968c8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_or_011_lane_1.c
@@ -0,0 +1,33 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sme2"
+
+void
+f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
+ svint32_t s32, svuint32_t u32, svint64_t s64, svuint64_t u64,
+ svfloat16_t f16, svfloat32_t f32, int i) __arm_streaming
+{
+ svdot_lane (u32, u16, u8, 0); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svdot_lane', but argument 2 had type 'svuint16_t'} } */
+ svdot_lane (u32, u8, u16, 0); /* { dg-error {passing 'svuint16_t' to argument 3 of 'svdot_lane', but argument 2 had type 'svuint8_t'} } */
+ svdot_lane (u32, s16, s16, 0); /* { dg-error {'svdot_lane' has no form that takes 'svuint32_t' and 'svint16_t' arguments} } */
+
+ svdot_lane (u32, u16, u16, i); /* { dg-error {argument 4 of 'svdot_lane' must be an integer constant expression} } */
+ svdot_lane (u32, u16, u16, 0);
+ svdot_lane (u32, u16, u16, 3);
+ svdot_lane (u32, u16, u16, 4); /* { dg-error {passing 4 to argument 4 of 'svdot_lane', which expects a value in the range \[0, 3\]} } */
+ svdot_lane (u32, u16, u16, -1); /* { dg-error {passing -1 to argument 4 of 'svdot_lane', which expects a value in the range \[0, 3\]} } */
+
+ svdot_lane (s32, s16, s16, i); /* { dg-error {argument 4 of 'svdot_lane' must be an integer constant expression} } */
+ svdot_lane (s32, s16, s16, 0);
+ svdot_lane (s32, s16, s16, 3);
+ svdot_lane (s32, s16, s16, 4); /* { dg-error {passing 4 to argument 4 of 'svdot_lane', which expects a value in the range \[0, 3\]} } */
+ svdot_lane (s32, s16, s16, -1); /* { dg-error {passing -1 to argument 4 of 'svdot_lane', which expects a value in the range \[0, 3\]} } */
+
+ svdot_lane (f32, f16, f16, i); /* { dg-error {argument 4 of 'svdot_lane' must be an integer constant expression} } */
+ svdot_lane (f32, f16, f16, 0);
+ svdot_lane (f32, f16, f16, 3);
+ svdot_lane (f32, f16, f16, 4); /* { dg-error {passing 4 to argument 4 of 'svdot_lane', which expects a value in the range \[0, 3\]} } */
+ svdot_lane (f32, f16, f16, -1); /* { dg-error {passing -1 to argument 4 of 'svdot_lane', which expects a value in the range \[0, 3\]} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_rotate_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_rotate_1.c
index 65e749b..9dd7eaf 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_rotate_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_qq_rotate_1.c
@@ -11,13 +11,13 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
{
svcdot (u32, u8, u8); /* { dg-error {too few arguments to function 'svcdot'} } */
svcdot (u32, u8, u8, 0, 0); /* { dg-error {too many arguments to function 'svcdot'} } */
- svcdot (0, u8, u8, 0); /* { dg-error {passing 'int' to argument 1 of 'svcdot', which expects an SVE vector type} } */
+ svcdot (0, u8, u8, 0); /* { dg-error {passing 'int' to argument 1 of 'svcdot', which expects an SVE type rather than a scalar} } */
svcdot (pg, u8, u8, 0); /* { dg-error {'svcdot' has no form that takes 'svbool_t' arguments} } */
svcdot (s8, s8, s8, 0); /* { dg-error {'svcdot' has no form that takes 'svint8_t' arguments} } */
svcdot (f32, s8, s8, 0); /* { dg-error {'svcdot' has no form that takes 'svfloat32_t' arguments} } */
svcdot (s32, s8, s8, 0);
- svcdot (s32, 0, s8, 0); /* { dg-error {passing 'int' to argument 2 of 'svcdot', which expects an SVE vector type} } */
- svcdot (s32, s8, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svcdot', which expects an SVE vector type} } */
+ svcdot (s32, 0, s8, 0); /* { dg-error {passing 'int' to argument 2 of 'svcdot', which expects an SVE type rather than a scalar} } */
+ svcdot (s32, s8, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svcdot', which expects an SVE type rather than a scalar} } */
svcdot (s32, s8, s8, 0);
svcdot (s32, u8, s8, 0); /* { dg-error {arguments 1 and 2 of 'svcdot' must have the same signedness, but the values passed here have type 'svint32_t' and 'svuint8_t' respectively} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_rotate_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_rotate_1.c
index f340e3d..68b2cfc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_rotate_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_rotate_1.c
@@ -10,11 +10,11 @@ f1 (svbool_t pg, svfloat32_t f32, svfloat64_t f64, svint32_t s32, int i)
svcmla_x (f32, f32, f32, f32, 90); /* { dg-error {passing 'svfloat32_t' to argument 1 of 'svcmla_x', which expects 'svbool_t'} } */
svcmla_x (pg, pg, pg, pg, 90); /* { dg-error {'svcmla_x' has no form that takes 'svbool_t' arguments} } */
svcmla_x (pg, s32, s32, s32, 90); /* { dg-error {'svcmla_x' has no form that takes 'svint32_t' arguments} } */
- svcmla_x (pg, 1, f32, f32, 90); /* { dg-error {passing 'int' to argument 2 of 'svcmla_x', which expects an SVE vector type} } */
- svcmla_x (pg, f32, 1, f32, 90); /* { dg-error {passing 'int' to argument 3 of 'svcmla_x', which expects an SVE vector type} } */
- svcmla_x (pg, f32, f32, 1, 90); /* { dg-error {passing 'int' to argument 4 of 'svcmla_x', which expects an SVE vector type} } */
- svcmla_x (pg, f32, f64, f32, 90); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svcmla_x', but previous arguments had type 'svfloat32_t'} } */
- svcmla_x (pg, f32, f32, f64, 90); /* { dg-error {passing 'svfloat64_t' to argument 4 of 'svcmla_x', but previous arguments had type 'svfloat32_t'} } */
+ svcmla_x (pg, 1, f32, f32, 90); /* { dg-error {passing 'int' to argument 2 of 'svcmla_x', which expects an SVE type rather than a scalar} } */
+ svcmla_x (pg, f32, 1, f32, 90); /* { dg-error {passing 'int' to argument 3 of 'svcmla_x', which expects an SVE type rather than a scalar} } */
+ svcmla_x (pg, f32, f32, 1, 90); /* { dg-error {passing 'int' to argument 4 of 'svcmla_x', which expects an SVE type rather than a scalar} } */
+ svcmla_x (pg, f32, f64, f32, 90); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svcmla_x', but argument 2 had type 'svfloat32_t'} } */
+ svcmla_x (pg, f32, f32, f64, 90); /* { dg-error {passing 'svfloat64_t' to argument 4 of 'svcmla_x', but argument 2 had type 'svfloat32_t'} } */
svcmla_x (pg, f32, f32, f32, s32); /* { dg-error {argument 5 of 'svcmla_x' must be an integer constant expression} } */
svcmla_x (pg, f32, f32, f32, i); /* { dg-error {argument 5 of 'svcmla_x' must be an integer constant expression} } */
svcmla_x (pg, f32, f32, f32, -90); /* { dg-error {passing -90 to argument 5 of 'svcmla_x', which expects 0, 90, 180 or 270} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_shift_right_imm_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_shift_right_imm_1.c
index 2811137..134cf98 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_shift_right_imm_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_shift_right_imm_1.c
@@ -11,11 +11,11 @@ f1 (svbool_t pg, svuint8_t u8, svint8_t s8, svint16_t s16,
{
const int one = 1;
pg = svsra (pg, pg, 1); /* { dg-error {'svsra' has no form that takes 'svbool_t' arguments} } */
- pg = svsra (pg, s8, 1); /* { dg-error {passing 'svint8_t' to argument 2 of 'svsra', but previous arguments had type 'svbool_t'} } */
- s8 = svsra (1, s8, 1); /* { dg-error {passing 'int' to argument 1 of 'svsra', which expects an SVE vector type} } */
- s8 = svsra (s8, u8, 1); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svsra', but previous arguments had type 'svint8_t'} } */
- s8 = svsra (s8, pg, 1); /* { dg-error {passing 'svbool_t' to argument 2 of 'svsra', but previous arguments had type 'svint8_t'} } */
- s8 = svsra (s8, 1, 1); /* { dg-error {passing 'int' to argument 2 of 'svsra', which expects an SVE vector type} } */
+ pg = svsra (pg, s8, 1); /* { dg-error {passing 'svint8_t' to argument 2 of 'svsra', but argument 1 had type 'svbool_t'} } */
+ s8 = svsra (1, s8, 1); /* { dg-error {passing 'int' to argument 1 of 'svsra', which expects an SVE type rather than a scalar} } */
+ s8 = svsra (s8, u8, 1); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svsra', but argument 1 had type 'svint8_t'} } */
+ s8 = svsra (s8, pg, 1); /* { dg-error {passing 'svbool_t' to argument 2 of 'svsra', but argument 1 had type 'svint8_t'} } */
+ s8 = svsra (s8, 1, 1); /* { dg-error {passing 'int' to argument 2 of 'svsra', which expects an SVE type rather than a scalar} } */
s8 = svsra (s8, s8, x); /* { dg-error {argument 3 of 'svsra' must be an integer constant expression} } */
s8 = svsra (s8, s8, one); /* { dg-error {argument 3 of 'svsra' must be an integer constant expression} } */
s8 = svsra (s8, s8, 0.4); /* { dg-error {passing 0 to argument 3 of 'svsra', which expects a value in the range \[1, 8\]} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uint_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uint_1.c
index 711b6a1..a639562 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uint_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uint_1.c
@@ -13,30 +13,30 @@ f1 (svbool_t pg, svuint8_t u8, svint8_t s8, svuint16_t u16, svint16_t s16,
svtbx (pg, pg, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svtbx', which expects a vector of unsigned integers} } */
svtbx (pg, pg, u8); /* { dg-error {'svtbx' has no form that takes 'svbool_t' arguments} } */
- svtbx (u8, 0, u8); /* { dg-error {passing 'int' to argument 2 of 'svtbx', which expects an SVE vector type} } */
- svtbx (u8, u8, 0); /* { dg-error {passing 'int' to argument 3 of 'svtbx', which expects an SVE vector type} } */
- svtbx (u8, s8, u8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svtbx', but previous arguments had type 'svuint8_t'} } */
+ svtbx (u8, 0, u8); /* { dg-error {passing 'int' to argument 2 of 'svtbx', which expects an SVE type rather than a scalar} } */
+ svtbx (u8, u8, 0); /* { dg-error {passing 'int' to argument 3 of 'svtbx', which expects an SVE type rather than a scalar} } */
+ svtbx (u8, s8, u8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svtbx', but argument 1 had type 'svuint8_t'} } */
svtbx (u8, u8, u8);
svtbx (u8, u8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svtbx', which expects a vector of unsigned integers} } */
svtbx (u8, u8, u16); /* { dg-error {arguments 1 and 3 of 'svtbx' must have the same element size, but the values passed here have type 'svuint8_t' and 'svuint16_t' respectively} } */
svtbx (u8, u8, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svtbx', which expects a vector of unsigned integers} } */
svtbx (u8, u8, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svtbx', which expects a vector of unsigned integers} } */
- svtbx (s8, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svtbx', but previous arguments had type 'svint8_t'} } */
+ svtbx (s8, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svtbx', but argument 1 had type 'svint8_t'} } */
svtbx (s8, s8, u8);
svtbx (s8, s8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svtbx', which expects a vector of unsigned integers} } */
svtbx (s8, s8, u16); /* { dg-error {arguments 1 and 3 of 'svtbx' must have the same element size, but the values passed here have type 'svint8_t' and 'svuint16_t' respectively} } */
svtbx (s8, s8, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svtbx', which expects a vector of unsigned integers} } */
svtbx (s8, s8, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svtbx', which expects a vector of unsigned integers} } */
- svtbx (u16, 0, u16); /* { dg-error {passing 'int' to argument 2 of 'svtbx', which expects an SVE vector type} } */
+ svtbx (u16, 0, u16); /* { dg-error {passing 'int' to argument 2 of 'svtbx', which expects an SVE type rather than a scalar} } */
svtbx (u16, u16, u8); /* { dg-error {arguments 1 and 3 of 'svtbx' must have the same element size, but the values passed here have type 'svuint16_t' and 'svuint8_t' respectively} } */
svtbx (u16, u16, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svtbx', which expects a vector of unsigned integers} } */
svtbx (u16, u16, u16);
svtbx (u16, u16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svtbx', which expects a vector of unsigned integers} } */
svtbx (u16, u16, f16); /* { dg-error {passing 'svfloat16_t' to argument 3 of 'svtbx', which expects a vector of unsigned integers} } */
- svtbx (s16, u16, u16); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svtbx', but previous arguments had type 'svint16_t'} } */
+ svtbx (s16, u16, u16); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svtbx', but argument 1 had type 'svint16_t'} } */
svtbx (s16, s16, u8); /* { dg-error {arguments 1 and 3 of 'svtbx' must have the same element size, but the values passed here have type 'svint16_t' and 'svuint8_t' respectively} } */
svtbx (s16, s16, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svtbx', which expects a vector of unsigned integers} } */
svtbx (s16, s16, u16);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_1.c
index f52fb39..d1aad1de1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_1.c
@@ -23,15 +23,15 @@ f2 (svbool_t pg, svint8_t s8, svuint8_t u8, svuint32_t u32,
{
svusmmla (s32, u8); /* { dg-error {too few arguments to function 'svusmmla'} } */
svusmmla (s32, u8, s8, u8); /* { dg-error {too many arguments to function 'svusmmla'} } */
- svusmmla (0, u8, s8); /* { dg-error {passing 'int' to argument 1 of 'svusmmla', which expects an SVE vector type} } */
+ svusmmla (0, u8, s8); /* { dg-error {passing 'int' to argument 1 of 'svusmmla', which expects an SVE type rather than a scalar} } */
svusmmla (pg, u8, s8); /* { dg-error {'svusmmla' has no form that takes 'svbool_t' arguments} } */
svusmmla (u8, u8, s8); /* { dg-error {'svusmmla' has no form that takes 'svuint8_t' arguments} } */
svusmmla (f32, u8, s8); /* { dg-error {'svusmmla' has no form that takes 'svfloat32_t' arguments} } */
svusmmla (s32, u8, s8);
- svusmmla (s32, 0, s8); /* { dg-error {passing 'int' to argument 2 of 'svusmmla', which expects an SVE vector type} } */
+ svusmmla (s32, 0, s8); /* { dg-error {passing 'int' to argument 2 of 'svusmmla', which expects an SVE type rather than a scalar} } */
svusmmla (s32, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svusmmla', which expects a vector of signed integers} } */
svusmmla (s32, s8, s8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svusmmla', which expects a vector of unsigned integers} } */
- svusmmla (s32, u8, 0); /* { dg-error {passing 'int' to argument 3 of 'svusmmla', which expects an SVE vector type} } */
+ svusmmla (s32, u8, 0); /* { dg-error {passing 'int' to argument 3 of 'svusmmla', which expects an SVE type rather than a scalar} } */
svusmmla (s32, u8, s8);
svusmmla (s32, u32, u32); /* { dg-error {passing 'svuint32_t' instead of the expected 'svuint8_t' to argument 2 of 'svusmmla', after passing 'svint32_t' to argument 1} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_lane_1.c
index b40cfe9..0cc5c74 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_lane_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_lane_1.c
@@ -10,14 +10,14 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
{
svusdot_lane (s32, u8, s8); /* { dg-error {too few arguments to function 'svusdot_lane'} } */
svusdot_lane (s32, u8, s8, 0, 0); /* { dg-error {too many arguments to function 'svusdot_lane'} } */
- svusdot_lane (0, u8, s8, 0); /* { dg-error {passing 'int' to argument 1 of 'svusdot_lane', which expects an SVE vector type} } */
+ svusdot_lane (0, u8, s8, 0); /* { dg-error {passing 'int' to argument 1 of 'svusdot_lane', which expects an SVE type rather than a scalar} } */
svusdot_lane (pg, u8, s8, 0); /* { dg-error {'svusdot_lane' has no form that takes 'svbool_t' arguments} } */
svusdot_lane (u8, u8, s8, 0); /* { dg-error {'svusdot_lane' has no form that takes 'svuint8_t' arguments} } */
svusdot_lane (f32, u8, s8, 0); /* { dg-error {'svusdot_lane' has no form that takes 'svfloat32_t' arguments} } */
svusdot_lane (u32, u8, s8, 0); /* { dg-error {'svusdot_lane' has no form that takes 'svuint32_t' arguments} } */
svusdot_lane (s32, u8, s8, 0);
- svusdot_lane (s32, 0, s8, 0); /* { dg-error {passing 'int' to argument 2 of 'svusdot_lane', which expects an SVE vector type} } */
- svusdot_lane (s32, u8, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svusdot_lane', which expects an SVE vector type} } */
+ svusdot_lane (s32, 0, s8, 0); /* { dg-error {passing 'int' to argument 2 of 'svusdot_lane', which expects an SVE type rather than a scalar} } */
+ svusdot_lane (s32, u8, 0, 0); /* { dg-error {passing 'int' to argument 3 of 'svusdot_lane', which expects an SVE type rather than a scalar} } */
svusdot_lane (s32, u8, s8, 0);
svusdot_lane (s32, s8, s8, 0); /* { dg-error {passing 'svint8_t' to argument 2 of 'svusdot_lane', which expects a vector of unsigned integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_opt_n_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_opt_n_1.c
index 896b803..f6585ae 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_opt_n_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ternary_uintq_intq_opt_n_1.c
@@ -23,12 +23,12 @@ f2 (svbool_t pg, svint8_t s8, svuint8_t u8, svuint32_t u32,
{
svusdot (s32, u8); /* { dg-error {too few arguments to function 'svusdot'} } */
svusdot (s32, u8, s8, u8); /* { dg-error {too many arguments to function 'svusdot'} } */
- svusdot (0, u8, s8); /* { dg-error {passing 'int' to argument 1 of 'svusdot', which expects an SVE vector type} } */
+ svusdot (0, u8, s8); /* { dg-error {passing 'int' to argument 1 of 'svusdot', which expects an SVE type rather than a scalar} } */
svusdot (pg, u8, s8); /* { dg-error {'svusdot' has no form that takes 'svbool_t' arguments} } */
svusdot (u8, u8, s8); /* { dg-error {'svusdot' has no form that takes 'svuint8_t' arguments} } */
svusdot (f32, u8, s8); /* { dg-error {'svusdot' has no form that takes 'svfloat32_t' arguments} } */
svusdot (s32, u8, s8);
- svusdot (s32, 0, s8); /* { dg-error {passing 'int' to argument 2 of 'svusdot', which expects an SVE vector type} } */
+ svusdot (s32, 0, s8); /* { dg-error {passing 'int' to argument 2 of 'svusdot', which expects an SVE type rather than a scalar} } */
svusdot (s32, u8, u8); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svusdot', which expects a vector of signed integers} } */
svusdot (s32, s8, s8); /* { dg-error {passing 'svint8_t' to argument 2 of 'svusdot', which expects a vector of unsigned integers} } */
svusdot (s32, u8, 0);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/tmad_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/tmad_1.c
index 8b98fc2..992b501 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/tmad_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/tmad_1.c
@@ -9,9 +9,9 @@ f1 (svbool_t pg, svfloat32_t f32, svfloat64_t f64, svint32_t s32, int i)
svtmad (f32, f32, 0, 0); /* { dg-error {too many arguments to function 'svtmad'} } */
svtmad (pg, pg, 0); /* { dg-error {'svtmad' has no form that takes 'svbool_t' arguments} } */
svtmad (s32, s32, 0); /* { dg-error {'svtmad' has no form that takes 'svint32_t' arguments} } */
- svtmad (1, f32, 0); /* { dg-error {passing 'int' to argument 1 of 'svtmad', which expects an SVE vector type} } */
- svtmad (f32, 1, 0); /* { dg-error {passing 'int' to argument 2 of 'svtmad', which expects an SVE vector type} } */
- svtmad (f32, f64, 0); /* { dg-error {passing 'svfloat64_t' to argument 2 of 'svtmad', but previous arguments had type 'svfloat32_t'} } */
+ svtmad (1, f32, 0); /* { dg-error {passing 'int' to argument 1 of 'svtmad', which expects an SVE type rather than a scalar} } */
+ svtmad (f32, 1, 0); /* { dg-error {passing 'int' to argument 2 of 'svtmad', which expects an SVE type rather than a scalar} } */
+ svtmad (f32, f64, 0); /* { dg-error {passing 'svfloat64_t' to argument 2 of 'svtmad', but argument 1 had type 'svfloat32_t'} } */
svtmad (f32, f32, s32); /* { dg-error {argument 3 of 'svtmad' must be an integer constant expression} } */
svtmad (f32, f32, i); /* { dg-error {argument 3 of 'svtmad' must be an integer constant expression} } */
svtmad (f32, f32, -1); /* { dg-error {passing -1 to argument 3 of 'svtmad', which expects a value in the range \[0, 7\]} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_1.c
index eef85a0..9c9c383 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_1.c
@@ -7,15 +7,15 @@ f1 (svbool_t pg, svint32_t s32, svuint32_t u32, svfloat32_t f32)
{
svabs_m (s32, pg); /* { dg-error {too few arguments to function 'svabs_m'} } */
svabs_m (s32, pg, s32, s32); /* { dg-error {too many arguments to function 'svabs_m'} } */
- svabs_m (0, pg, s32); /* { dg-error {passing 'int' to argument 1 of 'svabs_m', which expects an SVE vector type} } */
+ svabs_m (0, pg, s32); /* { dg-error {passing 'int' to argument 1 of 'svabs_m', which expects an SVE type rather than a scalar} } */
svabs_m (s32, s32, s32); /* { dg-error {passing 'svint32_t' to argument 2 of 'svabs_m', which expects 'svbool_t'} } */
svabs_m (s32, 0, s32); /* { dg-error {passing 'int' to argument 2 of 'svabs_m', which expects 'svbool_t'} } */
svabs_m (s32, pg, s32);
svabs_m (u32, pg, u32); /* { dg-error {'svabs_m' has no form that takes 'svuint32_t' arguments} } */
svabs_m (f32, pg, f32);
- svabs_m (s32, pg, u32); /* { dg-error {passing 'svuint32_t' to argument 3 of 'svabs_m', but previous arguments had type 'svint32_t'} } */
- svabs_m (s32, pg, f32); /* { dg-error {passing 'svfloat32_t' to argument 3 of 'svabs_m', but previous arguments had type 'svint32_t'} } */
- svabs_m (s32, pg, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svabs_m', but previous arguments had type 'svint32_t'} } */
- svabs_m (pg, pg, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svabs_m', but previous arguments had type 'svbool_t'} } */
+ svabs_m (s32, pg, u32); /* { dg-error {passing 'svuint32_t' to argument 3 of 'svabs_m', but argument 1 had type 'svint32_t'} } */
+ svabs_m (s32, pg, f32); /* { dg-error {passing 'svfloat32_t' to argument 3 of 'svabs_m', but argument 1 had type 'svint32_t'} } */
+ svabs_m (s32, pg, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svabs_m', but argument 1 had type 'svint32_t'} } */
+ svabs_m (pg, pg, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svabs_m', but argument 1 had type 'svbool_t'} } */
svabs_m (pg, pg, pg); /* { dg-error {'svabs_m' has no form that takes 'svbool_t' arguments} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_2.c
index e94673a..bf93e21 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_2.c
@@ -9,7 +9,7 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8)
svabs_x (pg, s8, s8); /* { dg-error {too many arguments to function 'svabs_x'} } */
svabs_x (s8, s8); /* { dg-error {passing 'svint8_t' to argument 1 of 'svabs_x', which expects 'svbool_t'} } */
svabs_x (pg, pg); /* { dg-error {'svabs_x' has no form that takes 'svbool_t' arguments} } */
- svabs_x (pg, 1); /* { dg-error {passing 'int' to argument 2 of 'svabs_x', which expects an SVE vector type} } */
+ svabs_x (pg, 1); /* { dg-error {passing 'int' to argument 2 of 'svabs_x', which expects an SVE type rather than a scalar} } */
svabs_x (pg, s8);
svabs_x (pg, u8); /* { dg-error {'svabs_x' has no form that takes 'svuint8_t' arguments} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_1.c
index caa4e62..b7258e4 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_1.c
@@ -4,12 +4,12 @@ void
test (svbool_t pg, svint8_t s8, svuint8_t u8,
svint16_t s16, svuint16_t u16, svint32_t s32, svuint32_t u32,
svint64_t s64, svuint64_t u64, svfloat16_t f16, svfloat32_t f32,
- svfloat64_t f64)
+ svfloat64_t f64, svcount_t pn)
{
svcvt_f64_x (pg); /* { dg-error {too few arguments to function 'svcvt_f64_x'} } */
svcvt_f64_x (pg, s32, 0); /* { dg-error {too many arguments to function 'svcvt_f64_x'} } */
svcvt_f64_x (s32, s32); /* { dg-error {passing 'svint32_t' to argument 1 of 'svcvt_f64_x', which expects 'svbool_t'} } */
- svcvt_f64_x (pg, 0); /* { dg-error {passing 'int' to argument 2 of 'svcvt_f64_x', which expects an SVE vector type} } */
+ svcvt_f64_x (pg, 0); /* { dg-error {passing 'int' to argument 2 of 'svcvt_f64_x', which expects an SVE type rather than a scalar} } */
svcvt_f64_x (pg, s8); /* { dg-error {'svcvt_f64_x' has no form that takes 'svint8_t' arguments} } */
svcvt_f64_x (pg, s16); /* { dg-error {'svcvt_f64_x' has no form that takes 'svint16_t' arguments} } */
@@ -70,4 +70,10 @@ test (svbool_t pg, svint8_t s8, svuint8_t u8,
svcvt_u16_x (pg, f16);
svcvt_u16_x (pg, f32); /* { dg-error {'svcvt_u16_x' has no form that takes 'svfloat32_t' arguments} } */
svcvt_u16_x (pg, f64); /* { dg-error {'svcvt_u16_x' has no form that takes 'svfloat64_t' arguments} } */
+
+ svreinterpret_b (pg); /* { dg-error {'svreinterpret_b' has no form that takes 'svbool_t' arguments} } */
+ svreinterpret_b (pn);
+
+ svreinterpret_c (pg);
+ svreinterpret_c (pn); /* { dg-error {'svreinterpret_c' has no form that takes 'svcount_t' arguments} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_2.c
index ddbd93b..2649fd6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_2.c
@@ -12,7 +12,7 @@ test (svbool_t pg, svint8_t s8, svuint8_t u8,
svcvt_f64_m (0, pg, s32); /* { dg-error {passing 'int' to argument 1 of 'svcvt_f64_m', which expects 'svfloat64_t'} } */
svcvt_f64_m (pg, pg, s32); /* { dg-error {passing 'svbool_t' to argument 1 of 'svcvt_f64_m', which expects 'svfloat64_t'} } */
svcvt_f64_m (f64, s32, s32); /* { dg-error {passing 'svint32_t' to argument 2 of 'svcvt_f64_m', which expects 'svbool_t'} } */
- svcvt_f64_m (f64, pg, 0); /* { dg-error {passing 'int' to argument 3 of 'svcvt_f64_m', which expects an SVE vector type} } */
+ svcvt_f64_m (f64, pg, 0); /* { dg-error {passing 'int' to argument 3 of 'svcvt_f64_m', which expects an SVE type rather than a scalar} } */
svcvt_f64_m (f64, pg, s8); /* { dg-error {'svcvt_f64_m' has no form that takes 'svint8_t' arguments} } */
svcvt_f64_m (f64, pg, s16); /* { dg-error {'svcvt_f64_m' has no form that takes 'svint16_t' arguments} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowt_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowt_1.c
index 92c07b8..a5d56de 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowt_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convert_narrowt_1.c
@@ -14,7 +14,7 @@ test (svbool_t pg, svint8_t s8, svuint8_t u8,
svcvtnt_f32_m (0, pg, f64); /* { dg-error {passing 'int' to argument 1 of 'svcvtnt_f32_m', which expects 'svfloat32_t'} } */
svcvtnt_f32_m (pg, pg, f64); /* { dg-error {passing 'svbool_t' to argument 1 of 'svcvtnt_f32_m', which expects 'svfloat32_t'} } */
svcvtnt_f32_m (f32, s32, f64); /* { dg-error {passing 'svint32_t' to argument 2 of 'svcvtnt_f32_m', which expects 'svbool_t'} } */
- svcvtnt_f32_m (f32, pg, 0); /* { dg-error {passing 'int' to argument 3 of 'svcvtnt_f32_m', which expects an SVE vector type} } */
+ svcvtnt_f32_m (f32, pg, 0); /* { dg-error {passing 'int' to argument 3 of 'svcvtnt_f32_m', which expects an SVE type rather than a scalar} } */
svcvtnt_f32_m (f32, pg, s8); /* { dg-error {'svcvtnt_f32_m' has no form that takes 'svint8_t' arguments} } */
svcvtnt_f32_m (f32, pg, s16); /* { dg-error {'svcvtnt_f32_m' has no form that takes 'svint16_t' arguments} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convertxn_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convertxn_1.c
new file mode 100644
index 0000000..85f8b45
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_convertxn_1.c
@@ -0,0 +1,28 @@
+#include <arm_sve.h>
+
+#pragma GCC target "+sme2"
+
+void
+test (svbool_t pg, float f, svint8_t s8, svfloat32_t f32,
+ svint32x2_t s32x2, svint32x3_t s32x3, svint32x4_t s32x4,
+ svfloat32x2_t f32x2, svfloat32x3_t f32x3, svfloat32x4_t f32x4)
+ __arm_streaming
+{
+ svcvt_bf16 (); /* { dg-error {too few arguments to function 'svcvt_bf16'} } */
+ svcvt_bf16 (f32x2, f32x2); /* { dg-error {too many arguments to function 'svcvt_bf16'} } */
+ svcvt_bf16 (0); /* { dg-error {passing 'int' to argument 1 of 'svcvt_bf16', which expects an SVE type rather than a scalar} } */
+ svcvt_bf16 (f); /* { dg-error {passing 'float' to argument 1 of 'svcvt_bf16', which expects an SVE type rather than a scalar} } */
+ svcvt_bf16 (pg); /* { dg-error {svcvt_bf16' has no form that takes 'svbool_t' arguments} } */
+ svcvt_bf16 (s8); /* { dg-error {svcvt_bf16' has no form that takes 'svint8_t' arguments} } */
+ svcvt_bf16 (f32); /* { dg-error {svcvt_bf16' has no form that takes 'svfloat32_t' arguments} } */
+ svcvt_bf16 (f32x2);
+ svcvt_bf16 (f32x3); /* { dg-error {svcvt_bf16' has no form that takes 'svfloat32x3_t' arguments} } */
+ svcvt_bf16 (f32x4); /* { dg-error {svcvt_bf16' has no form that takes 'svfloat32x4_t' arguments} } */
+ svcvt_bf16 (s32x2); /* { dg-error {svcvt_bf16' has no form that takes 'svint32x2_t' arguments} } */
+ svcvt_s32 (f32x2);
+ svcvt_s32 (f32x3); /* { dg-error {svcvt_s32' has no form that takes 'svfloat32x3_t' arguments} } */
+ svcvt_s32 (f32x4);
+ svcvt_f32 (s32x2);
+ svcvt_f32 (s32x3); /* { dg-error {svcvt_f32' has no form that takes 'svint32x3_t' arguments} } */
+ svcvt_f32 (s32x4);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowb_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowb_1.c
index c03d644..c2465e3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowb_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowb_1.c
@@ -23,5 +23,5 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svqxtnb (u64);
svqxtnb (s64);
svqxtnb (f32); /* { dg-error {'svqxtnb' has no form that takes 'svfloat32_t' arguments} } */
- svqxtnb (1); /* { dg-error {passing 'int' to argument 1 of 'svqxtnb', which expects an SVE vector type} } */
+ svqxtnb (1); /* { dg-error {passing 'int' to argument 1 of 'svqxtnb', which expects an SVE type rather than a scalar} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowb_to_uint_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowb_to_uint_1.c
index c3e2103..60051f8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowb_to_uint_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowb_to_uint_1.c
@@ -23,5 +23,5 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svqxtunb (u64); /* { dg-error {'svqxtunb' has no form that takes 'svuint64_t' arguments} } */
svqxtunb (s64);
svqxtunb (f32); /* { dg-error {'svqxtunb' has no form that takes 'svfloat32_t' arguments} } */
- svqxtunb (1); /* { dg-error {passing 'int' to argument 1 of 'svqxtunb', which expects an SVE vector type} } */
+ svqxtunb (1); /* { dg-error {passing 'int' to argument 1 of 'svqxtunb', which expects an SVE type rather than a scalar} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowt_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowt_1.c
index 4ed179c..a0612dc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowt_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowt_1.c
@@ -26,6 +26,6 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svqxtnt (u32, u64);
svqxtnt (s32, s64);
svqxtnt (f16, f32); /* { dg-error {'svqxtnt' has no form that takes 'svfloat32_t' arguments} } */
- svqxtnt (1, u16); /* { dg-error {passing 'int' to argument 1 of 'svqxtnt', which expects an SVE vector type} } */
- svqxtnt (u8, 1); /* { dg-error {passing 'int' to argument 2 of 'svqxtnt', which expects an SVE vector type} } */
+ svqxtnt (1, u16); /* { dg-error {passing 'int' to argument 1 of 'svqxtnt', which expects an SVE type rather than a scalar} } */
+ svqxtnt (u8, 1); /* { dg-error {passing 'int' to argument 2 of 'svqxtnt', which expects an SVE type rather than a scalar} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowt_to_uint_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowt_to_uint_1.c
index acaa546..8e5fa5b 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowt_to_uint_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_narrowt_to_uint_1.c
@@ -26,6 +26,6 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
svqxtunt (u32, u64); /* { dg-error {'svqxtunt' has no form that takes 'svuint64_t' arguments} } */
svqxtunt (u32, s64);
svqxtunt (u16, f32); /* { dg-error {'svqxtunt' has no form that takes 'svfloat32_t' arguments} } */
- svqxtunt (1, u16); /* { dg-error {passing 'int' to argument 1 of 'svqxtunt', which expects an SVE vector type} } */
- svqxtunt (u8, 1); /* { dg-error {passing 'int' to argument 2 of 'svqxtunt', which expects an SVE vector type} } */
+ svqxtunt (1, u16); /* { dg-error {passing 'int' to argument 1 of 'svqxtunt', which expects an SVE type rather than a scalar} } */
+ svqxtunt (u8, 1); /* { dg-error {passing 'int' to argument 2 of 'svqxtunt', which expects an SVE type rather than a scalar} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_int_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_int_1.c
index 517d11f..e2e172d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_int_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_int_1.c
@@ -10,7 +10,7 @@ f1 (svbool_t pg, svint32_t s32, svuint32_t u32, svfloat32_t f32,
{
svlogb_m (s32, pg); /* { dg-error {too few arguments to function 'svlogb_m'} } */
svlogb_m (s32, pg, f32, s32); /* { dg-error {too many arguments to function 'svlogb_m'} } */
- svlogb_m (0, pg, f32); /* { dg-error {passing 'int' to argument 1 of 'svlogb_m', which expects an SVE vector type} } */
+ svlogb_m (0, pg, f32); /* { dg-error {passing 'int' to argument 1 of 'svlogb_m', which expects an SVE type rather than a scalar} } */
svlogb_m (s32, u32, f32); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svlogb_m', which expects 'svbool_t'} } */
svlogb_m (s32, 0, f32); /* { dg-error {passing 'int' to argument 2 of 'svlogb_m', which expects 'svbool_t'} } */
svlogb_m (s32, pg, s32); /* { dg-error {'svlogb_m' has no form that takes 'svint32_t' arguments} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_1.c
index 888b525..b3cf0b9f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_1.c
@@ -8,7 +8,7 @@ f1 (svbool_t pg, svint32_t s32, svuint32_t u32, svfloat32_t f32,
{
svclz_m (u32, pg); /* { dg-error {too few arguments to function 'svclz_m'} } */
svclz_m (u32, pg, s32, s32); /* { dg-error {too many arguments to function 'svclz_m'} } */
- svclz_m (0, pg, f32); /* { dg-error {passing 'int' to argument 1 of 'svclz_m', which expects an SVE vector type} } */
+ svclz_m (0, pg, f32); /* { dg-error {passing 'int' to argument 1 of 'svclz_m', which expects an SVE type rather than a scalar} } */
svclz_m (u32, u32, f32); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svclz_m', which expects 'svbool_t'} } */
svclz_m (u32, 0, f32); /* { dg-error {passing 'int' to argument 2 of 'svclz_m', which expects 'svbool_t'} } */
svclz_m (u32, pg, s32);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_2.c
index 233e847..da02d12 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_2.c
@@ -9,7 +9,7 @@ f1 (svbool_t pg, svint32_t s32, svuint32_t u32, svfloat32_t f32,
{
svclz_m (u32, pg); /* { dg-error {too few arguments to function 'svclz_m'} } */
svclz_m (u32, pg, s32, s32); /* { dg-error {too many arguments to function 'svclz_m'} } */
- svclz_m (0, pg, f32); /* { dg-error {passing 'int' to argument 1 of 'svclz_m', which expects an SVE vector type} } */
+ svclz_m (0, pg, f32); /* { dg-error {passing 'int' to argument 1 of 'svclz_m', which expects an SVE type rather than a scalar} } */
svclz_m (u32, u32, f32); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svclz_m', which expects 'svbool_t'} } */
svclz_m (u32, 0, f32); /* { dg-error {passing 'int' to argument 2 of 'svclz_m', which expects 'svbool_t'} } */
svclz_m (u32, pg, s32);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_3.c
index da57b07..858a2a5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_to_uint_3.c
@@ -9,6 +9,6 @@ f1 (svbool_t pg, svuint8_t u8)
svcnt_x (pg, u8, u8); /* { dg-error {too many arguments to function 'svcnt_x'} } */
svcnt_x (u8, u8); /* { dg-error {passing 'svuint8_t' to argument 1 of 'svcnt_x', which expects 'svbool_t'} } */
svcnt_x (pg, pg); /* { dg-error {'svcnt_x' has no form that takes 'svbool_t' arguments} } */
- svcnt_x (pg, 1); /* { dg-error {passing 'int' to argument 2 of 'svcnt_x', which expects an SVE vector type} } */
+ svcnt_x (pg, 1); /* { dg-error {passing 'int' to argument 2 of 'svcnt_x', which expects an SVE type rather than a scalar} } */
svcnt_x (pg, u8);
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_uint_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_uint_1.c
index 9c8acdf..e3275a8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_uint_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_uint_1.c
@@ -8,7 +8,7 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
{
svexpa (); /* { dg-error {too few arguments to function 'svexpa'} } */
svexpa (u16, u16); /* { dg-error {too many arguments to function 'svexpa'} } */
- svexpa (1); /* { dg-error {passing 'int' to argument 1 of 'svexpa', which expects an SVE vector type} } */
+ svexpa (1); /* { dg-error {passing 'int' to argument 1 of 'svexpa', which expects an SVE type rather than a scalar} } */
svexpa (pg); /* { dg-error {passing 'svbool_t' to argument 1 of 'svexpa', which expects a vector of unsigned integers} } */
svexpa (s8); /* { dg-error {passing 'svint8_t' to argument 1 of 'svexpa', which expects a vector of unsigned integers} } */
svexpa (s16); /* { dg-error {passing 'svint16_t' to argument 1 of 'svexpa', which expects a vector of unsigned integers} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_widen_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_widen_1.c
index 95a97a7..a194bd6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_widen_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_widen_1.c
@@ -8,8 +8,8 @@ test (svbool_t pg, svint8_t s8, svuint8_t u8,
{
svunpklo (); /* { dg-error {too few arguments to function 'svunpklo'} } */
svunpklo (pg, s8); /* { dg-error {too many arguments to function 'svunpklo'} } */
- svunpklo (i); /* { dg-error {passing 'int' to argument 1 of 'svunpklo', which expects an SVE vector type} } */
- svunpklo (f); /* { dg-error {passing 'float' to argument 1 of 'svunpklo', which expects an SVE vector type} } */
+ svunpklo (i); /* { dg-error {passing 'int' to argument 1 of 'svunpklo', which expects an SVE type rather than a scalar} } */
+ svunpklo (f); /* { dg-error {passing 'float' to argument 1 of 'svunpklo', which expects an SVE type rather than a scalar} } */
svunpklo (pg);
svunpklo (s8);
svunpklo (s16);
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_m_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_m_1.c
new file mode 100644
index 0000000..948ce2c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_m_1.c
@@ -0,0 +1,49 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("arch=armv9-a+sme")
+
+void
+f1 (svbool_t pg, svuint8_t u8, svint16_t s16, svint32_t s32, svint64_t s64,
+ svfloat32_t f32, uint32_t tile)
+ __arm_streaming __arm_inout("za")
+{
+ svaddha_za32_m (0, pg, pg); /* { dg-error {too few arguments to function 'svaddha_za32_m'} } */
+ svaddha_za32_m (0, pg, pg, s32, s32); /* { dg-error {too many arguments to function 'svaddha_za32_m'} } */
+ svaddha_za32_m (tile, pg, pg, s32); /* { dg-error {argument 1 of 'svaddha_za32_m' must be an integer constant expression} } */
+ svaddha_za32_m (-1, pg, pg, s32); /* { dg-error {passing -1 to argument 1 of 'svaddha_za32_m', which expects a value in the range \[0, 3\]} } */
+ svaddha_za32_m (4, pg, pg, s32); /* { dg-error {passing 4 to argument 1 of 'svaddha_za32_m', which expects a value in the range \[0, 3\]} } */
+ svaddha_za32_m (0, u8, pg, s32); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svaddha_za32_m', which expects 'svbool_t'} } */
+ svaddha_za32_m (0, pg, u8, s32); /* { dg-error {passing 'svuint8_t' to argument 3 of 'svaddha_za32_m', which expects 'svbool_t'} } */
+ svaddha_za32_m (0, pg, pg, tile); /* { dg-error {passing 'uint32_t'.* to argument 4 of 'svaddha_za32_m', which expects an SVE type} } */
+ svaddha_za32_m (0, pg, pg, pg); /* { dg-error {'svaddha_za32_m' has no form that takes 'svbool_t' arguments} } */
+ svaddha_za32_m (0, pg, pg, u8); /* { dg-error {'svaddha_za32_m' has no form that takes 'svuint8_t' arguments} } */
+ svaddha_za32_m (0, pg, pg, s16); /* { dg-error {'svaddha_za32_m' has no form that takes 'svint16_t' arguments} } */
+ svaddha_za32_m (0, pg, pg, f32); /* { dg-error {'svaddha_za32_m' has no form that takes 'svfloat32_t' arguments} } */
+ svaddha_za32_m (0, pg, pg, s64); /* { dg-error {'svaddha_za32_m' has no form that takes 'svint64_t' arguments} } */
+
+ svaddha_za64_m (0, pg, pg, s64); /* { dg-error {ACLE function 'svaddha_za64_s64_m' requires ISA extension 'sme-i16i64'} } */
+}
+
+void
+f2 (svbool_t pg, svint32_t s32) __arm_streaming
+{
+ svaddha_za32_m (0, pg, pg, s32); /* { dg-error {ACLE function 'svaddha_za32_s32_m' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svbool_t pg, svint32_t s32) __arm_inout("za")
+{
+ svaddha_za32_m (0, pg, pg, s32); /* { dg-error {ACLE function 'svaddha_za32_s32_m' can only be called when SME streaming mode is enabled} } */
+}
+
+#pragma GCC target ("arch=armv9-a+sme-i16i64")
+
+void
+f4 (svbool_t pg, svint64_t s64)
+ __arm_streaming __arm_inout("za")
+{
+ svaddha_za64_m (-1, pg, pg, s64); /* { dg-error {passing -1 to argument 1 of 'svaddha_za64_m', which expects a value in the range \[0, 7\]} } */
+ svaddha_za64_m (8, pg, pg, s64); /* { dg-error {passing 8 to argument 1 of 'svaddha_za64_m', which expects a value in the range \[0, 7\]} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_1.c
new file mode 100644
index 0000000..e02fe54
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_1.c
@@ -0,0 +1,54 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("+sme2")
+
+void
+f1 (svbool_t pg, svint32_t s32, svint16x2_t s16x2, svint32x2_t s32x2,
+ svint32x3_t s32x3, svint32x4_t s32x4, svint64x2_t s64x2, float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svadd_za32_vg1x2 (1); /* { dg-error {too few arguments to function 'svadd_za32_vg1x2'} } */
+ svadd_za32_vg1x2 (1, s32x2, s32x2); /* { dg-error {too many arguments to function 'svadd_za32_vg1x2'} } */
+
+ svadd_za32_vg1x2 (s32x2, s32x2); /* { dg-error {passing 'svint32x2_t' to argument 1 of 'svadd_za32_vg1x2', which expects 'uint32_t'} } */
+ svadd_za32_vg1x2 (f, s32x2);
+ svadd_za32_vg1x2 (d, s32x2);
+ svadd_za32_vg1x2 (pg, s32x2); /* { dg-error {passing 'svbool_t' to argument 1 of 'svadd_za32_vg1x2', which expects 'uint32_t'} } */
+
+ svadd_za32_vg1x2 (1, 1); /* { dg-error {passing 'int' to argument 2 of 'svadd_za32_vg1x2', which expects an SVE type rather than a scalar type} } */
+ svadd_za32_vg1x2 (1, pg); /* { dg-error {passing 'svbool_t' to argument 2 of 'svadd_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svadd_za32_vg1x2 (1, s32); /* { dg-error {passing single vector 'svint32_t' to argument 2 of 'svadd_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svadd_za32_vg1x2 (1, s32x2);
+ svadd_za32_vg1x2 (1, s32x3); /* { dg-error {passing 'svint32x3_t' to argument 2 of 'svadd_za32_vg1x2', which expects a tuple of 2 vectors} } */
+ svadd_za32_vg1x2 (1, s32x4); /* { dg-error {passing 'svint32x4_t' to argument 2 of 'svadd_za32_vg1x2', which expects a tuple of 2 vectors} } */
+
+ svadd_za32_vg1x2 (1, s16x2); /* { dg-error {'svadd_za32_vg1x2' has no form that takes 'svint16x2_t' arguments} } */
+ svadd_za32_vg1x2 (1, s64x2); /* { dg-error {'svadd_za32_vg1x2' has no form that takes 'svint64x2_t' arguments} } */
+}
+
+void
+f2 (svint32x2_t s32x2) __arm_streaming
+{
+ svadd_za32_vg1x2 (0, s32x2); /* { dg-error {ACLE function 'svadd_za32_s32_vg1x2' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint32x2_t s32x2) __arm_inout("za")
+{
+ svadd_za32_vg1x2 (0, s32x2); /* { dg-error {ACLE function 'svadd_za32_s32_vg1x2' can only be called when SME streaming mode is enabled} } */
+}
+
+#pragma GCC target ("+sme-i16i64")
+
+void
+f4 (svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint64x2_t s64x2, svuint64x2_t u64x2)
+ __arm_streaming __arm_inout("za")
+{
+ svadd_za64_vg1x2 (1, s32x2); /* { dg-error {'svadd_za64_vg1x2' has no form that takes 'svint32x2_t' arguments} } */
+ svadd_za64_vg1x2 (1, u32x2); /* { dg-error {'svadd_za64_vg1x2' has no form that takes 'svuint32x2_t' arguments} } */
+ svadd_za64_vg1x2 (1, s64x2);
+ svadd_za64_vg1x2 (1, u64x2);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_2.c
new file mode 100644
index 0000000..b28b03e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_2.c
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("arch=armv9-a+sme2")
+
+void
+f1 (svbool_t pg, svint32_t s32, svint16x4_t s16x4, svint32x2_t s32x2,
+ svint32x3_t s32x3, svint32x4_t s32x4, svint64x4_t s64x4, float f, double d)
+ __arm_streaming __arm_inout("za")
+{
+ svadd_za32_vg1x4 (1); /* { dg-error {too few arguments to function 'svadd_za32_vg1x4'} } */
+ svadd_za32_vg1x4 (1, s32x4, s32x4); /* { dg-error {too many arguments to function 'svadd_za32_vg1x4'} } */
+
+ svadd_za32_vg1x4 (s32x2, s32x4); /* { dg-error {passing 'svint32x2_t' to argument 1 of 'svadd_za32_vg1x4', which expects 'uint32_t'} } */
+ svadd_za32_vg1x4 (f, s32x4);
+ svadd_za32_vg1x4 (d, s32x4);
+ svadd_za32_vg1x4 (pg, s32x4); /* { dg-error {passing 'svbool_t' to argument 1 of 'svadd_za32_vg1x4', which expects 'uint32_t'} } */
+
+ svadd_za32_vg1x4 (1, s32); /* { dg-error {passing single vector 'svint32_t' to argument 2 of 'svadd_za32_vg1x4', which expects a tuple of 4 vectors} } */
+ svadd_za32_vg1x4 (1, s32x2); /* { dg-error {passing 'svint32x2_t' to argument 2 of 'svadd_za32_vg1x4', which expects a tuple of 4 vectors} } */
+ svadd_za32_vg1x4 (1, s32x3); /* { dg-error {passing 'svint32x3_t' to argument 2 of 'svadd_za32_vg1x4', which expects a tuple of 4 vectors} } */
+ svadd_za32_vg1x4 (1, s32x4);
+
+ svadd_za32_vg1x4 (1, s16x4); /* { dg-error {'svadd_za32_vg1x4' has no form that takes 'svint16x4_t' arguments} } */
+ svadd_za32_vg1x4 (1, s64x4); /* { dg-error {'svadd_za32_vg1x4' has no form that takes 'svint64x4_t' arguments} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_3.c
new file mode 100644
index 0000000..22d91b1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_za_slice_3.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("arch=armv9-a+sme2+nosme-i16i64")
+
+void
+f1 (svint32x2_t s32x2, svuint32x2_t u32x2,
+ svint64x2_t s64x2, svuint64x2_t u64x2)
+ __arm_streaming __arm_inout("za")
+{
+ svadd_za64_vg1x2 (1, s32x2); /* { dg-error {'svadd_za64_vg1x2' has no form that takes 'svint32x2_t' arguments} } */
+ svadd_za64_vg1x2 (1, u32x2); /* { dg-error {'svadd_za64_vg1x2' has no form that takes 'svuint32x2_t' arguments} } */
+ svadd_za64_vg1x2 (1, s64x2); /* { dg-error {ACLE function 'svadd_za64_s64_vg1x2' requires ISA extension 'sme-i16i64'} } */
+ svadd_za64_vg1x2 (1, u64x2);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unaryxn_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unaryxn_1.c
new file mode 100644
index 0000000..f478945
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unaryxn_1.c
@@ -0,0 +1,15 @@
+#include <arm_sve.h>
+
+#pragma GCC target "+sme2"
+
+void
+test (svfloat32_t f32, svfloat32x2_t f32x2, svfloat32x3_t f32x3,
+ svfloat32x4_t f32x4) __arm_streaming
+{
+ svuzp (); /* { dg-error {too few arguments to function 'svuzp'} } */
+ svuzp (f32x2, f32x2); /* { dg-error {too many arguments to function 'svuzp'} } */
+ svuzp (f32); /* { dg-error {svuzp' has no form that takes 'svfloat32_t' arguments} } */
+ svuzp (f32x2);
+ svuzp (f32x3); /* { dg-error {svuzp' has no form that takes 'svfloat32x3_t' arguments} } */
+ svuzp (f32x4);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/undeclared_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/undeclared_2.c
index 7e869bd..6ffd3d9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/undeclared_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/undeclared_2.c
@@ -9,7 +9,7 @@ f (svint8_t s8, svuint16_t u16, svfloat32_t f32,
u16 = svneg_x (pg, u16); /* { dg-error {'svneg_x' has no form that takes 'svuint16_t' arguments} } */
f32 = svclz_x (pg, f32); /* { dg-error {'svclz_x' has no form that takes 'svfloat32_t' arguments} } */
s16x2 = svcreate2 (s8); /* { dg-error {too few arguments to function 'svcreate2'} } */
- u32x3 = svcreate3 (u16, u16, f32); /* { dg-error {passing 'svfloat32_t' to argument 3 of 'svcreate3', but previous arguments had type 'svuint16_t'} } */
+ u32x3 = svcreate3 (u16, u16, f32); /* { dg-error {passing 'svfloat32_t' to argument 3 of 'svcreate3', but argument 1 had type 'svuint16_t'} } */
f64x4 = svcreate4 (f32, f32, f32, f32, f32); /* { dg-error {too many arguments to function 'svcreate4'} } */
pg = svadd_x (pg, pg, pg); /* { dg-error {'svadd_x' has no form that takes 'svbool_t' arguments} } */
}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_1.c
new file mode 100644
index 0000000..3a45b58
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_1.c
@@ -0,0 +1,50 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target "+sme2"
+
+void
+f1 (svint8_t s8, svint8x2_t s8x2, svint8x3_t s8x3, svint8x4_t s8x4,
+ svuint8_t u8, svuint16x2_t u16x2, svfloat32x2_t f32x2, svint64x2_t s64x2,
+ uint32_t tile)
+ __arm_streaming __arm_inout("za")
+{
+ svwrite_ver_za8_vg2 (0, 0); /* { dg-error {too few arguments to function 'svwrite_ver_za8_vg2'} } */
+ svwrite_ver_za8_vg2 (0, 0, s8x2, 0); /* { dg-error {too many arguments to function 'svwrite_ver_za8_vg2'} } */
+ svwrite_ver_za8_vg2 (tile, 0, s8x2); /* { dg-error {argument 1 of 'svwrite_ver_za8_vg2' must be an integer constant expression} } */
+ svwrite_ver_za8_vg2 (-1, 0, s8x2); /* { dg-error {passing -1 to argument 1 of 'svwrite_ver_za8_vg2', which expects the value 0} } */
+ svwrite_ver_za8_vg2 (1, 0, s8x2); /* { dg-error {passing 1 to argument 1 of 'svwrite_ver_za8_vg2', which expects the value 0} } */
+ svwrite_ver_za8_vg2 (0, u8, s8x2); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svwrite_ver_za8_vg2', which expects 'uint32_t'} } */
+ svwrite_ver_za8_vg2 (0, 0, tile); /* { dg-error {passing 'uint32_t'.* to argument 3 of 'svwrite_ver_za8_vg2', which expects an SVE type} } */
+ svwrite_ver_za8_vg2 (0, 0, s8); /* { dg-error {passing single vector 'svint8_t' to argument 3 of 'svwrite_ver_za8_vg2', which expects a tuple of 2 vectors} } */
+ svwrite_ver_za8_vg2 (0, 0, s8x2);
+ svwrite_ver_za8_vg2 (0, 0, s8x3); /* { dg-error {passing 'svint8x3_t' to argument 3 of 'svwrite_ver_za8_vg2', which expects a tuple of 2 vectors} } */
+ svwrite_ver_za8_vg2 (0, 0, s8x4); /* { dg-error {passing 'svint8x4_t' to argument 3 of 'svwrite_ver_za8_vg2', which expects a tuple of 2 vectors} } */
+
+ svwrite_ver_za16_vg2 (-1, 0, u16x2); /* { dg-error {passing -1 to argument 1 of 'svwrite_ver_za16_vg2', which expects a value in the range \[0, 1\]} } */
+ svwrite_ver_za16_vg2 (2, 0, u16x2); /* { dg-error {passing 2 to argument 1 of 'svwrite_ver_za16_vg2', which expects a value in the range \[0, 1\]} } */
+
+ svwrite_ver_za32_vg2 (-1, 0, f32x2); /* { dg-error {passing -1 to argument 1 of 'svwrite_ver_za32_vg2', which expects a value in the range \[0, 3\]} } */
+ svwrite_ver_za32_vg2 (4, 0, f32x2); /* { dg-error {passing 4 to argument 1 of 'svwrite_ver_za32_vg2', which expects a value in the range \[0, 3\]} } */
+
+ svwrite_ver_za64_vg2 (-1, 0, s64x2); /* { dg-error {passing -1 to argument 1 of 'svwrite_ver_za64_vg2', which expects a value in the range \[0, 7\]} } */
+ svwrite_ver_za64_vg2 (8, 0, s64x2); /* { dg-error {passing 8 to argument 1 of 'svwrite_ver_za64_vg2', which expects a value in the range \[0, 7\]} } */
+
+ svwrite_ver_za8_vg4 (0, 0, s8); /* { dg-error {passing single vector 'svint8_t' to argument 3 of 'svwrite_ver_za8_vg4', which expects a tuple of 4 vectors} } */
+ svwrite_ver_za8_vg4 (0, 0, s8x2); /* { dg-error {passing 'svint8x2_t' to argument 3 of 'svwrite_ver_za8_vg4', which expects a tuple of 4 vectors} } */
+ svwrite_ver_za8_vg4 (0, 0, s8x3); /* { dg-error {passing 'svint8x3_t' to argument 3 of 'svwrite_ver_za8_vg4', which expects a tuple of 4 vectors} } */
+ svwrite_ver_za8_vg4 (0, 0, s8x4);
+}
+
+void
+f2 (svint8x2_t s8x2) __arm_streaming
+{
+ svwrite_ver_za8_vg2 (0, 0, s8x2); /* { dg-error {ACLE function 'svwrite_ver_za8_s8_vg2' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint8x2_t s8x2) __arm_inout("za")
+{
+ svwrite_ver_za8_vg2 (0, 0, s8x2); /* { dg-error {ACLE function 'svwrite_ver_za8_s8_vg2' can only be called when SME streaming mode is enabled} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_m_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_m_1.c
new file mode 100644
index 0000000..af79c40
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_m_1.c
@@ -0,0 +1,48 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target ("arch=armv9-a+sme")
+
+void
+f1 (svbool_t pg, svint8_t s8, svint64_t s64, svuint8_t u8, svuint16_t u16,
+ svfloat32_t f32, uint32_t tile)
+ __arm_streaming __arm_inout("za")
+{
+ svwrite_ver_za8_m (0, 0, pg); /* { dg-error {too few arguments to function 'svwrite_ver_za8_m'} } */
+ svwrite_ver_za8_m (0, 0, pg, s8, 0); /* { dg-error {too many arguments to function 'svwrite_ver_za8_m'} } */
+ svwrite_ver_za8_m (tile, 0, pg, s8); /* { dg-error {argument 1 of 'svwrite_ver_za8_m' must be an integer constant expression} } */
+ svwrite_ver_za8_m (-1, 0, pg, s8); /* { dg-error {passing -1 to argument 1 of 'svwrite_ver_za8_m', which expects the value 0} } */
+ svwrite_ver_za8_m (1, 0, pg, s8); /* { dg-error {passing 1 to argument 1 of 'svwrite_ver_za8_m', which expects the value 0} } */
+ svwrite_ver_za8_m (0, u8, pg, s8); /* { dg-error {passing 'svuint8_t' to argument 2 of 'svwrite_ver_za8_m', which expects 'uint32_t'} } */
+ svwrite_ver_za8_m (0, 0, s8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svwrite_ver_za8_m', which expects 'svbool_t'} } */
+ svwrite_ver_za8_m (0, 0, pg, tile); /* { dg-error {passing 'uint32_t'.* to argument 4 of 'svwrite_ver_za8_m', which expects an SVE type} } */
+ svwrite_ver_za8_m (0, 0, pg, pg); /* { dg-error {'svwrite_ver_za8_m' has no form that takes 'svbool_t' arguments} } */
+ svwrite_ver_za8_m (0, 0, pg, u16); /* { dg-error {'svwrite_ver_za8_m' has no form that takes 'svuint16_t' arguments} } */
+
+ svwrite_ver_za16_m (-1, 0, pg, u16); /* { dg-error {passing -1 to argument 1 of 'svwrite_ver_za16_m', which expects a value in the range \[0, 1\]} } */
+ svwrite_ver_za16_m (2, 0, pg, u16); /* { dg-error {passing 2 to argument 1 of 'svwrite_ver_za16_m', which expects a value in the range \[0, 1\]} } */
+
+ svwrite_ver_za32_m (-1, 0, pg, f32); /* { dg-error {passing -1 to argument 1 of 'svwrite_ver_za32_m', which expects a value in the range \[0, 3\]} } */
+ svwrite_ver_za32_m (4, 0, pg, f32); /* { dg-error {passing 4 to argument 1 of 'svwrite_ver_za32_m', which expects a value in the range \[0, 3\]} } */
+
+ svwrite_ver_za64_m (-1, 0, pg, s64); /* { dg-error {passing -1 to argument 1 of 'svwrite_ver_za64_m', which expects a value in the range \[0, 7\]} } */
+ svwrite_ver_za64_m (8, 0, pg, s64); /* { dg-error {passing 8 to argument 1 of 'svwrite_ver_za64_m', which expects a value in the range \[0, 7\]} } */
+
+ svwrite_ver_za128_m (-1, 0, pg, s8); /* { dg-error {passing -1 to argument 1 of 'svwrite_ver_za128_m', which expects a value in the range \[0, 15\]} } */
+ svwrite_ver_za128_m (16, 0, pg, s8); /* { dg-error {passing 16 to argument 1 of 'svwrite_ver_za128_m', which expects a value in the range \[0, 15\]} } */
+ svwrite_ver_za128_m (-1, 0, pg, f32); /* { dg-error {passing -1 to argument 1 of 'svwrite_ver_za128_m', which expects a value in the range \[0, 15\]} } */
+ svwrite_ver_za128_m (16, 0, pg, f32); /* { dg-error {passing 16 to argument 1 of 'svwrite_ver_za128_m', which expects a value in the range \[0, 15\]} } */
+}
+
+void
+f2 (svbool_t pg, svint8_t s8) __arm_streaming
+{
+ svwrite_ver_za8_m (0, 0, pg, s8); /* { dg-error {ACLE function 'svwrite_ver_za8_s8_m' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svbool_t pg, svint8_t s8) __arm_inout("za")
+{
+ svwrite_ver_za8_m (0, 0, pg, s8); /* { dg-error {ACLE function 'svwrite_ver_za8_s8_m' can only be called when SME streaming mode is enabled} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_slice_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_slice_1.c
new file mode 100644
index 0000000..dedd4b1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_slice_1.c
@@ -0,0 +1,38 @@
+/* { dg-do compile } */
+
+#include <arm_sme.h>
+
+#pragma GCC target "+sme2"
+
+void
+f1 (svint8_t s8, svint8x2_t s8x2, svint8x3_t s8x3, svint8x4_t s8x4,
+ svuint8_t u8, svuint16x2_t u16x2, svfloat32x2_t f32x2, svint64x2_t s64x2,
+ uint32_t tile)
+ __arm_streaming __arm_inout("za")
+{
+ svwrite_za8_vg1x2 (0); /* { dg-error {too few arguments to function 'svwrite_za8_vg1x2'} } */
+ svwrite_za8_vg1x2 (0, s8x2, 0); /* { dg-error {too many arguments to function 'svwrite_za8_vg1x2'} } */
+ svwrite_za8_vg1x2 (u8, s8x2); /* { dg-error {passing 'svuint8_t' to argument 1 of 'svwrite_za8_vg1x2', which expects 'uint32_t'} } */
+ svwrite_za8_vg1x2 (0, tile); /* { dg-error {passing 'uint32_t'.* to argument 2 of 'svwrite_za8_vg1x2', which expects an SVE type} } */
+ svwrite_za8_vg1x2 (0, s8); /* { dg-error {passing single vector 'svint8_t' to argument 2 of 'svwrite_za8_vg1x2', which expects a tuple of 2 vectors} } */
+ svwrite_za8_vg1x2 (0, s8x2);
+ svwrite_za8_vg1x2 (0, s8x3); /* { dg-error {passing 'svint8x3_t' to argument 2 of 'svwrite_za8_vg1x2', which expects a tuple of 2 vectors} } */
+ svwrite_za8_vg1x2 (0, s8x4); /* { dg-error {passing 'svint8x4_t' to argument 2 of 'svwrite_za8_vg1x2', which expects a tuple of 2 vectors} } */
+
+ svwrite_za8_vg1x4 (0, s8); /* { dg-error {passing single vector 'svint8_t' to argument 2 of 'svwrite_za8_vg1x4', which expects a tuple of 4 vectors} } */
+ svwrite_za8_vg1x4 (0, s8x2); /* { dg-error {passing 'svint8x2_t' to argument 2 of 'svwrite_za8_vg1x4', which expects a tuple of 4 vectors} } */
+ svwrite_za8_vg1x4 (0, s8x3); /* { dg-error {passing 'svint8x3_t' to argument 2 of 'svwrite_za8_vg1x4', which expects a tuple of 4 vectors} } */
+ svwrite_za8_vg1x4 (0, s8x4);
+}
+
+void
+f2 (svint8x2_t s8x2) __arm_streaming
+{
+ svwrite_za8_vg1x2 (0, s8x2); /* { dg-error {ACLE function 'svwrite_za8_s8_vg1x2' can only be called from a function that has 'za' state} } */
+}
+
+void
+f3 (svint8x2_t s8x2) __arm_inout("za")
+{
+ svwrite_za8_vg1x2 (0, s8x2); /* { dg-error {ACLE function 'svwrite_za8_s8_vg1x2' can only be called when SME streaming mode is enabled} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_7.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_7.c
index 95be605..edfadb8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_7.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_7.c
@@ -27,6 +27,7 @@ typedef svint32x2_t bad_type_6 __attribute__ ((arm_sve_vector_bits (N))); // { d
typedef svint8_t bad_type_7 __attribute__ ((arm_sve_vector_bits (N))) __attribute__ ((arm_sve_vector_bits (N))); // { dg-error {'arm_sve_vector_bits' applied to type 'svint8_t __attribute__\(\(arm_sve_vector_bits\([0-9]+\)\)\)', which already has a size} }
typedef fixed_bool_t bad_type_8 __attribute__ ((arm_sve_vector_bits (N))) __attribute__ ((arm_sve_vector_bits (N))); // { dg-error {'arm_sve_vector_bits' applied to type 'fixed_bool_t' {aka 'svbool_t __attribute__\(\(arm_sve_vector_bits\([0-9]+\)\)\)'}, which already has a size} }
typedef gnu_int8_t bad_type_9 __attribute__ ((arm_sve_vector_bits (N))) __attribute__ ((arm_sve_vector_bits (N))); // { dg-error {'arm_sve_vector_bits' applied to non-SVE type 'gnu_int8_t'} }
+typedef svcount_t bad_type_10 __attribute__ ((arm_sve_vector_bits (N))); // { dg-error {'arm_sve_vector_bits' applied to non-vector type 'svcount_t'} }
void
f (int c)
@@ -135,41 +136,41 @@ g (int c)
diff = gs8 - gs8;
fs8 = ss8; // { dg-error {invalid conversion} "" { target c++ } }
- // { dg-warning {incompatible pointer type} "c" { target c } .-1 }
+ // { dg-error {incompatible pointer type} "c" { target c } .-1 }
fs8 = fs8;
fs8 = gs8; // { dg-error {invalid conversion} "" { target c++ } }
- // { dg-warning {incompatible pointer type} "c" { target c } .-1 }
+ // { dg-error {incompatible pointer type} "c" { target c } .-1 }
fs8 = su8; // { dg-error {cannot convert} "c++" { target c++ } }
- // { dg-warning {incompatible pointer type} "c" { target c } .-1 }
+ // { dg-error {incompatible pointer type} "c" { target c } .-1 }
fs8 = fu8; // { dg-error {cannot convert} "c++" { target c++ } }
- // { dg-warning {incompatible pointer type} "c" { target c } .-1 }
+ // { dg-error {incompatible pointer type} "c" { target c } .-1 }
fs8 = gu8; // { dg-error {cannot convert} "c++" { target c++ } }
- // { dg-warning {incompatible pointer type} "c" { target c } .-1 }
+ // { dg-error {incompatible pointer type} "c" { target c } .-1 }
fs8 = ss16; // { dg-error {cannot convert} "c++" { target c++ } }
- // { dg-warning {incompatible pointer type} "c" { target c } .-1 }
+ // { dg-error {incompatible pointer type} "c" { target c } .-1 }
fs8 = fs16; // { dg-error {cannot convert} "c++" { target c++ } }
- // { dg-warning {incompatible pointer type} "c" { target c } .-1 }
+ // { dg-error {incompatible pointer type} "c" { target c } .-1 }
fs8 = gs16; // { dg-error {cannot convert} "c++" { target c++ } }
- // { dg-warning {incompatible pointer type} "c" { target c } .-1 }
+ // { dg-error {incompatible pointer type} "c" { target c } .-1 }
select = c ? ss8 : ss8;
select = c ? ss8 : fs8; // { dg-error {distinct pointer types} "" { target c++ } }
- // { dg-warning {pointer type mismatch} "c" { target c } .-1 }
+ // { dg-error {pointer type mismatch} "c" { target c } .-1 }
select = c ? ss8 : gs8; // { dg-error {distinct pointer types} "" { target c++ } }
- // { dg-warning {pointer type mismatch} "c" { target c } .-1 }
+ // { dg-error {pointer type mismatch} "c" { target c } .-1 }
select = c ? fs8 : ss8; // { dg-error {distinct pointer types} "" { target c++ } }
- // { dg-warning {pointer type mismatch} "c" { target c } .-1 }
+ // { dg-error {pointer type mismatch} "c" { target c } .-1 }
select = c ? fs8 : fs8;
select = c ? fs8 : gs8; // { dg-error {distinct pointer types} "" { target c++ } }
- // { dg-warning {pointer type mismatch} "c" { target c } .-1 }
+ // { dg-error {pointer type mismatch} "c" { target c } .-1 }
select = c ? gs8 : ss8; // { dg-error {distinct pointer types} "" { target c++ } }
- // { dg-warning {pointer type mismatch} "c" { target c } .-1 }
+ // { dg-error {pointer type mismatch} "c" { target c } .-1 }
select = c ? gs8 : fs8; // { dg-error {distinct pointer types} "" { target c++ } }
- // { dg-warning {pointer type mismatch} "c" { target c } .-1 }
+ // { dg-error {pointer type mismatch} "c" { target c } .-1 }
select = c ? gs8 : gs8;
diff = sb - sb; // { dg-error {arithmetic on pointer to SVE type 'svbool_t'} }
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr106326_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr106326_1.c
new file mode 100644
index 0000000..34604a8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr106326_1.c
@@ -0,0 +1,378 @@
+/* { dg-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <arm_sve.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** add1:
+** add z0\.s, (z1\.s, z0\.s|z0\.s, z1\.s)
+** ret
+*/
+svint32_t
+add1 (svint32_t x, svint32_t y)
+{
+ return svadd_z (svptrue_b8 (), x, y);
+}
+
+/*
+** add2:
+** add z0\.s, (z1\.s, z0\.s|z0\.s, z1\.s)
+** ret
+*/
+svint32_t
+add2 (svint32_t x, svint32_t y)
+{
+ return svadd_z (svptrue_b16 (), x, y);
+}
+
+/*
+** add3:
+** add z0\.s, (z1\.s, z0\.s|z0\.s, z1\.s)
+** ret
+*/
+svint32_t
+add3 (svint32_t x, svint32_t y)
+{
+ return svadd_z (svptrue_b32 (), x, y);
+}
+
+/*
+** add4:
+** ...
+** movprfx [^\n]+
+** ...
+** ret
+*/
+svint32_t
+add4 (svint32_t x, svint32_t y)
+{
+ return svadd_z (svptrue_b64 (), x, y);
+}
+
+/*
+** add5:
+** add z0\.s, (z1\.s, z0\.s|z0\.s, z1\.s)
+** ret
+*/
+svint32_t
+add5 (svint32_t x, svint32_t y)
+{
+ return svadd_m (svptrue_b8 (), x, y);
+}
+
+/*
+** add6:
+** add z0\.s, (z1\.s, z0\.s|z0\.s, z1\.s)
+** ret
+*/
+svint32_t
+add6 (svint32_t x, svint32_t y)
+{
+ return svadd_m (svptrue_b16 (), x, y);
+}
+
+/*
+** add7:
+** add z0\.s, (z1\.s, z0\.s|z0\.s, z1\.s)
+** ret
+*/
+svint32_t
+add7 (svint32_t x, svint32_t y)
+{
+ return svadd_m (svptrue_b32 (), x, y);
+}
+
+/*
+** add8:
+** ptrue (p[0-7])\.d(?:, all)?
+** add z0\.s, \1/m, z0\.s, z1\.s
+** ret
+*/
+svint32_t
+add8 (svint32_t x, svint32_t y)
+{
+ return svadd_m (svptrue_b64 (), x, y);
+}
+
+/*
+** add9:
+** ptrue (p[0-7])\.s(?:, all)?
+** add z0\.h, \1/m, z0\.h, z1\.h
+** ret
+*/
+svint16_t
+add9 (svint16_t x, svint16_t y)
+{
+ return svadd_m (svptrue_b32 (), x, y);
+}
+
+/*
+** and1:
+** and z0\.s, z0\.s, #(?:0x)?1
+** ret
+*/
+svint32_t
+and1 (svint32_t x)
+{
+ return svand_z (svptrue_b8 (), x, 1);
+}
+
+/*
+** and2:
+** and z0\.s, z0\.s, #(?:0x)?1
+** ret
+*/
+svint32_t
+and2 (svint32_t x)
+{
+ return svand_z (svptrue_b16 (), x, 1);
+}
+
+/*
+** and3:
+** and z0\.s, z0\.s, #(?:0x)?1
+** ret
+*/
+svint32_t
+and3 (svint32_t x)
+{
+ return svand_z (svptrue_b32 (), x, 1);
+}
+
+/*
+** and4:
+** (?!and z0\.s, z0\.s, #).*
+** ret
+*/
+svint32_t
+and4 (svint32_t x)
+{
+ return svand_z (svptrue_b64 (), x, 1);
+}
+
+/*
+** and5:
+** and z0\.s, z0\.s, #(?:0x)?1
+** ret
+*/
+svint32_t
+and5 (svint32_t x)
+{
+ return svand_m (svptrue_b8 (), x, 1);
+}
+
+/*
+** and6:
+** and z0\.s, z0\.s, #(?:0x)?1
+** ret
+*/
+svint32_t
+and6 (svint32_t x)
+{
+ return svand_m (svptrue_b16 (), x, 1);
+}
+
+/*
+** and7:
+** and z0\.s, z0\.s, #(?:0x)?1
+** ret
+*/
+svint32_t
+and7 (svint32_t x)
+{
+ return svand_m (svptrue_b32 (), x, 1);
+}
+
+/*
+** and8:
+** (?!and z0\.s, z0\.s, #).*
+** ret
+*/
+svint32_t
+and8 (svint32_t x)
+{
+ return svand_m (svptrue_b64 (), x, 1);
+}
+
+/*
+** and9:
+** (
+** and p0\.b, p0/z, p1\.b, p1\.b
+** |
+** and p0\.b, p1/z, p0\.b, p0\.b
+** )
+** ret
+*/
+svbool_t
+and9 (svbool_t x, svbool_t y)
+{
+ return svand_z (svptrue_b8 (), x, y);
+}
+
+/*
+** not1:
+** ptrue (p[0-7])\.b(?:, all)?
+** not z0\.s, \1/m, z1\.s
+** ret
+*/
+svint32_t
+not1 (svint32_t x, svint32_t y)
+{
+ return svnot_m (x, svptrue_b8 (), y);
+}
+
+/*
+** cvt1:
+** ptrue (p[0-7])\.b(?:, all)?
+** fcvtzs z0\.s, \1/m, z0\.h
+** ret
+*/
+svint32_t
+cvt1 (svfloat16_t x)
+{
+ return svcvt_s32_z (svptrue_b8 (), x);
+}
+
+/*
+** cvt2:
+** ptrue (p[0-7])\.b(?:, all)?
+** fcvtzs z0\.s, \1/m, z0\.h
+** ret
+*/
+svint32_t
+cvt2 (svfloat16_t x)
+{
+ return svcvt_s32_z (svptrue_b16 (), x);
+}
+
+/*
+** cvt3:
+** ptrue (p[0-7])\.b(?:, all)?
+** fcvtzs z0\.s, \1/m, z0\.h
+** ret
+*/
+svint32_t
+cvt3 (svfloat16_t x)
+{
+ return svcvt_s32_z (svptrue_b32 (), x);
+}
+
+/*
+** cvt4:
+** ...
+** movprfx [^\n]+
+** ...
+** ret
+*/
+svint32_t
+cvt4 (svfloat16_t x)
+{
+ return svcvt_s32_z (svptrue_b64 (), x);
+}
+
+/*
+** cvt5:
+** ptrue (p[0-7])\.b(?:, all)?
+** fcvt z0\.h, \1/m, z0\.s
+** ret
+*/
+svfloat16_t
+cvt5 (svfloat32_t x)
+{
+ return svcvt_f16_z (svptrue_b8 (), x);
+}
+
+/*
+** cvt6:
+** ptrue (p[0-7])\.b(?:, all)?
+** fcvt z0\.h, \1/m, z0\.s
+** ret
+*/
+svfloat16_t
+cvt6 (svfloat32_t x)
+{
+ return svcvt_f16_z (svptrue_b16 (), x);
+}
+
+/*
+** cvt7:
+** ptrue (p[0-7])\.b(?:, all)?
+** fcvt z0\.h, \1/m, z0\.s
+** ret
+*/
+svfloat16_t
+cvt7 (svfloat32_t x)
+{
+ return svcvt_f16_z (svptrue_b32 (), x);
+}
+
+/*
+** cvt8:
+** ...
+** movprfx [^\n]+
+** ...
+** ret
+*/
+svfloat16_t
+cvt8 (svfloat32_t x)
+{
+ return svcvt_f16_z (svptrue_b64 (), x);
+}
+
+/*
+** cvt9:
+** ptrue (p[0-7])\.b(?:, all)?
+** scvtf z0\.h, \1/m, z0\.h
+** ret
+*/
+svfloat16_t
+cvt9 (svint16_t x)
+{
+ return svcvt_f16_z (svptrue_b8 (), x);
+}
+
+/*
+** cvt10:
+** ptrue (p[0-7])\.b(?:, all)?
+** scvtf z0\.h, \1/m, z0\.h
+** ret
+*/
+svfloat16_t
+cvt10 (svint16_t x)
+{
+ return svcvt_f16_z (svptrue_b16 (), x);
+}
+
+/*
+** cvt11:
+** ...
+** movprfx [^\n]+
+** ...
+** ret
+*/
+svfloat16_t
+cvt11 (svint16_t x)
+{
+ return svcvt_f16_z (svptrue_b32 (), x);
+}
+
+/*
+** cvt12:
+** ...
+** movprfx [^\n]+
+** ...
+** ret
+*/
+svfloat16_t
+cvt12 (svint16_t x)
+{
+ return svcvt_f16_z (svptrue_b64 (), x);
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/loop_add_4.c b/gcc/testsuite/gcc.target/aarch64/sve/loop_add_4.c
index 9ead9c2..7f02497 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/loop_add_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/loop_add_4.c
@@ -68,8 +68,7 @@ TEST_ALL (LOOP)
/* { dg-final { scan-assembler-times {\tindex\tz[0-9]+\.s, w[0-9]+, w[0-9]+\n} 3 } } */
/* { dg-final { scan-assembler-times {\tld1w\tz[0-9]+\.s, p[0-7]+/z, \[x[0-9]+, x[0-9]+, lsl 2\]} 8 } } */
/* { dg-final { scan-assembler-times {\tst1w\tz[0-9]+\.s, p[0-7]+, \[x[0-9]+, x[0-9]+, lsl 2\]} 8 } } */
-/* 2 for the calculations of -17 and 17. */
-/* { dg-final { scan-assembler-times {\tincw\tx[0-9]+\n} 10 } } */
+/* { dg-final { scan-assembler-times {\tincw\tx[0-9]+\n} 8 } } */
/* { dg-final { scan-assembler-times {\tdecw\tz[0-9]+\.s, all, mul #16\n} 1 } } */
/* { dg-final { scan-assembler-times {\tdecw\tz[0-9]+\.s, all, mul #15\n} 1 } } */
@@ -86,8 +85,7 @@ TEST_ALL (LOOP)
/* { dg-final { scan-assembler-times {\tindex\tz[0-9]+\.d, x[0-9]+, x[0-9]+\n} 3 } } */
/* { dg-final { scan-assembler-times {\tld1d\tz[0-9]+\.d, p[0-7]+/z, \[x[0-9]+, x[0-9]+, lsl 3\]} 8 } } */
/* { dg-final { scan-assembler-times {\tst1d\tz[0-9]+\.d, p[0-7]+, \[x[0-9]+, x[0-9]+, lsl 3\]} 8 } } */
-/* 2 for the calculations of -17 and 17. */
-/* { dg-final { scan-assembler-times {\tincd\tx[0-9]+\n} 10 } } */
+/* { dg-final { scan-assembler-times {\tincd\tx[0-9]+\n} 8 } } */
/* { dg-final { scan-assembler-times {\tdecd\tz[0-9]+\.d, all, mul #16\n} 1 } } */
/* { dg-final { scan-assembler-times {\tdecd\tz[0-9]+\.d, all, mul #15\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/annotate_1.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/annotate_1.c
index 12ae767..c3ac692 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/annotate_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/annotate_1.c
@@ -4,6 +4,8 @@
svbool_t ret_b (void) { return svptrue_b8 (); }
+svcount_t ret_c (svcount_t *ptr) { return *ptr; }
+
svint8_t ret_s8 (void) { return svdup_s8 (0); }
svint16_t ret_s16 (void) { return svdup_s16 (0); }
svint32_t ret_s32 (void) { return svdup_s32 (0); }
@@ -58,6 +60,8 @@ svfloat64x4_t ret_f64x4 (void) { return svundef4_f64 (); }
/* { dg-final { scan-assembler {\t\.variant_pcs\tret_b\n} } } */
+/* { dg-final { scan-assembler {\t\.variant_pcs\tret_c\n} } } */
+
/* { dg-final { scan-assembler {\t\.variant_pcs\tret_s8\n} } } */
/* { dg-final { scan-assembler {\t\.variant_pcs\tret_s16\n} } } */
/* { dg-final { scan-assembler {\t\.variant_pcs\tret_s32\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/annotate_2.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/annotate_2.c
index 9f0741e..c350873 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/annotate_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/annotate_2.c
@@ -4,6 +4,8 @@
void fn_b (svbool_t x) {}
+void fn_c (svcount_t x) {}
+
void fn_s8 (svint8_t x) {}
void fn_s16 (svint16_t x) {}
void fn_s32 (svint32_t x) {}
@@ -58,6 +60,8 @@ void fn_f64x4 (svfloat64x4_t x) {}
/* { dg-final { scan-assembler {\t\.variant_pcs\tfn_b\n} } } */
+/* { dg-final { scan-assembler {\t\.variant_pcs\tfn_c\n} } } */
+
/* { dg-final { scan-assembler {\t\.variant_pcs\tfn_s8\n} } } */
/* { dg-final { scan-assembler {\t\.variant_pcs\tfn_s16\n} } } */
/* { dg-final { scan-assembler {\t\.variant_pcs\tfn_s32\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/args_12.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/args_12.c
new file mode 100644
index 0000000..a589484
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/args_12.c
@@ -0,0 +1,214 @@
+/* { dg-do compile } */
+/* { dg-options "-O -fno-stack-clash-protection -g" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include <arm_sve.h>
+
+/*
+** callee_1:
+** mov p0\.b, p3\.b
+** ret
+*/
+svcount_t __attribute__ ((noipa))
+callee_1 (svcount_t p0, svcount_t p1, svcount_t p2, svcount_t p3)
+{
+ return p3;
+}
+
+/*
+** callee_2:
+** str p0, \[x0\]
+** str p1, \[x1\]
+** str p2, \[x2\]
+** str p3, \[x3\]
+** ret
+*/
+void __attribute__ ((noipa))
+callee_2 (svcount_t p0, svcount_t p1, svcount_t p2, svcount_t p3,
+ svcount_t *ptr0, svcount_t *ptr1, svcount_t *ptr2, svcount_t *ptr3)
+{
+ *ptr0 = p0;
+ *ptr1 = p1;
+ *ptr2 = p2;
+ *ptr3 = p3;
+}
+
+/*
+** callee_3:
+** str p3, \[x0\]
+** ret
+*/
+void __attribute__ ((noipa))
+callee_3 (svbool_t p0, svbool_t p1, svbool_t p2, svcount_t p3, svcount_t *ptr)
+{
+ *ptr = p3;
+}
+
+/*
+** callee_4:
+** str p3, \[x0\]
+** ret
+*/
+void __attribute__ ((noipa))
+callee_4 (svcount_t p0, svcount_t p1, svcount_t p2, svbool_t p3, svbool_t *ptr)
+{
+ *ptr = p3;
+}
+
+/*
+** callee_5:
+** ldr p0, \[x0\]
+** ret
+*/
+svcount_t __attribute__ ((noipa))
+callee_5 (svcount_t p0, svcount_t p1, svcount_t p2, svcount_t p3,
+ svcount_t p4)
+{
+ return p4;
+}
+
+/*
+** callee_6:
+** ldr p0, \[x0\]
+** ret
+*/
+svcount_t __attribute__ ((noipa))
+callee_6 (svcount_t p0, svcount_t p1, svcount_t p2, svcount_t p3,
+ svcount_t p4, int x1, int x2, int x3, int x4, int x5, int x6, int x7,
+ int x8)
+{
+ return p4;
+}
+
+/*
+** callee_7:
+** ldr (x[0-9]+), \[sp\]
+** ldr p0, \[\1\]
+** ret
+*/
+svcount_t __attribute__ ((noipa))
+callee_7 (svcount_t p0, svcount_t p1, svcount_t p2, svcount_t p3,
+ int x0, int x1, int x2, int x3, int x4, int x5, int x6, int x7,
+ svcount_t p4)
+{
+ return p4;
+}
+
+/*
+** caller_1:
+** ...
+** ldr p0, \[x0\]
+** ldr p1, \[x1\]
+** ldr p2, \[x2\]
+** ldr p3, \[x3\]
+** bl callee_1
+** ...
+** str p0, .*
+** ...
+*/
+void __attribute__ ((noipa))
+caller_1 (volatile svcount_t *ptr0, volatile svcount_t *ptr1,
+ volatile svcount_t *ptr2, volatile svcount_t *ptr3,
+ svcount_t *ptr4)
+{
+ svcount_t p0 = *ptr0;
+ svcount_t p1 = *ptr1;
+ svcount_t p2 = *ptr2;
+ svcount_t p3 = *ptr3;
+ *ptr4 = callee_1 (p0, p1, p2, p3);
+}
+
+/*
+** caller_3:
+** ...
+** ldr p0, \[x1\]
+** ldr p1, \[x2\]
+** ldr p2, \[x3\]
+** ldr p3, \[x4\]
+** bl callee_3
+** ...
+*/
+void __attribute__ ((noipa))
+caller_3 (svcount_t *ptr,
+ volatile svbool_t *ptr0, volatile svbool_t *ptr1,
+ volatile svbool_t *ptr2, volatile svcount_t *ptr3)
+{
+ svbool_t p0 = *ptr0;
+ svbool_t p1 = *ptr1;
+ svbool_t p2 = *ptr2;
+ svcount_t p3 = *ptr3;
+ callee_3 (p0, p1, p2, p3, ptr);
+}
+
+/*
+** caller_4:
+** ...
+** ldr p0, \[x1\]
+** ldr p1, \[x2\]
+** ldr p2, \[x3\]
+** ldr p3, \[x4\]
+** bl callee_4
+** ...
+*/
+void __attribute__ ((noipa))
+caller_4 (svbool_t *ptr,
+ volatile svcount_t *ptr0, volatile svcount_t *ptr1,
+ volatile svcount_t *ptr2, volatile svbool_t *ptr3)
+{
+ svcount_t p0 = *ptr0;
+ svcount_t p1 = *ptr1;
+ svcount_t p2 = *ptr2;
+ svbool_t p3 = *ptr3;
+ callee_4 (p0, p1, p2, p3, ptr);
+}
+
+/*
+** caller_5:
+** ...
+** ldr p0, \[x1\]
+** ldr p1, \[x2\]
+** ldr p2, \[x3\]
+** ldr p3, \[x4\]
+** ...
+** mov x0, sp
+** ...
+** str p[0-9]+, \[(?:x0|sp)\]
+** ...
+** bl callee_5
+** ...
+** str p0, .*
+** ...
+*/
+void __attribute__ ((noipa))
+caller_5 (svcount_t *ptr,
+ volatile svcount_t *ptr0, volatile svcount_t *ptr1,
+ volatile svcount_t *ptr2, volatile svcount_t *ptr3,
+ volatile svcount_t *ptr4)
+{
+ svcount_t p0 = *ptr0;
+ svcount_t p1 = *ptr1;
+ svcount_t p2 = *ptr2;
+ svcount_t p3 = *ptr3;
+ svcount_t p4 = *ptr4;
+ *ptr = callee_5 (p0, p1, p2, p3, p4);
+}
+
+/*
+** caller_7:
+** ...
+** ldr (p[0-9]+), \[x2\]
+** ...
+** str \1, \[(x[0-9]+)\]
+** ...
+** str \2, \[sp\]
+** ...
+** bl callee_7
+** ...
+*/
+void __attribute__ ((noipa))
+caller_7 (svcount_t *ptr, volatile svcount_t *ptr0, volatile svcount_t *ptr1)
+{
+ svcount_t p0 = *ptr0;
+ svcount_t p1 = *ptr1;
+ *ptr = callee_7 (p0, p0, p0, p0, 0, 0, 0, 0, 0, 0, 0, 0, p1);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_1.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_1.c
index 110947a..5de34fc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/stack_clash_1.c
@@ -6,8 +6,7 @@
/*
** test_1:
-** cntd x12, all, mul #9
-** lsl x12, x12, #?4
+** rdvl x12, #18
** mov x11, sp
** ...
** sub sp, sp, x12
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/struct_3_128.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/struct_3_128.c
index f6d7846..b8fe860 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/struct_3_128.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/struct_3_128.c
@@ -908,8 +908,8 @@ SEL2 (union, nonpst3)
/*
** test_nonpst3:
** sub sp, sp, #16
-** str w0, \[sp, #?8\]
-** ldr p0, \[sp, #4, mul vl\]
+** str w0, \[sp, #?12\]
+** ldr p0, \[sp, #6, mul vl\]
** add sp, sp, #?16
** ret
*/
@@ -921,7 +921,7 @@ test_nonpst3 (union nonpst3 x)
}
/*
-** ret_nonpst3: { xfail *-*-* }
+** ret_nonpst3:
** mov w0, #?(?:0xffff|65535)
** ret
*/
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pr112278.c b/gcc/testsuite/gcc.target/aarch64/sve/pr112278.c
new file mode 100644
index 0000000..4f56add
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pr112278.c
@@ -0,0 +1,15 @@
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+void
+f (void)
+{
+ {
+ register svint8_t v0 asm ("z0");
+ asm volatile ("" : "=w" (v0));
+ }
+ {
+ register int8x8x4_t v0 asm ("v0");
+ asm volatile ("" : "=w" (v0));
+ }
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/slp_3.c b/gcc/testsuite/gcc.target/aarch64/sve/slp_3.c
index 82dd43a..775c1e1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/slp_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/slp_3.c
@@ -33,21 +33,14 @@ TEST_ALL (VEC_PERM)
/* 1 for each 8-bit type. */
/* { dg-final { scan-assembler-times {\tld1rw\tz[0-9]+\.s, } 2 } } */
-/* 1 for each 16-bit type plus 1 for double. */
-/* { dg-final { scan-assembler-times {\tld1rd\tz[0-9]+\.d, } 4 } } */
+/* 1 for each 16-bit type */
+/* { dg-final { scan-assembler-times {\tld1rd\tz[0-9]+\.d, } 3 } } */
/* 1 for each 32-bit type. */
/* { dg-final { scan-assembler-times {\tld1rqw\tz[0-9]+\.s, } 3 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #41\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #25\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #31\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #62\n} 2 } } */
-/* 3 for double. */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, x[0-9]+\n} 3 } } */
+/* { dg-final { scan-assembler-times {\tld1rqd\tz[0-9]+\.d, } 6 } } */
/* The 64-bit types need:
-
- ZIP1 ZIP1 (2 ZIP2s optimized away)
ZIP1 ZIP2. */
-/* { dg-final { scan-assembler-times {\tzip1\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 9 } } */
+/* { dg-final { scan-assembler-times {\tzip1\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 3 } } */
/* { dg-final { scan-assembler-times {\tzip2\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 3 } } */
/* The loop should be fully-masked. The 64-bit types need two loads
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/slp_4.c b/gcc/testsuite/gcc.target/aarch64/sve/slp_4.c
index b1fa5e3..5a9fc8f 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/slp_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/slp_4.c
@@ -35,31 +35,20 @@ vec_slp_##TYPE (TYPE *restrict a, int n) \
TEST_ALL (VEC_PERM)
-/* 1 for each 8-bit type, 4 for each 32-bit type and 4 for double. */
-/* { dg-final { scan-assembler-times {\tld1rd\tz[0-9]+\.d, } 18 } } */
+/* 1 for each 8-bit type */
+/* { dg-final { scan-assembler-times {\tld1rd\tz[0-9]+\.d, } 2 } } */
/* 1 for each 16-bit type. */
/* { dg-final { scan-assembler-times {\tld1rqh\tz[0-9]+\.h, } 3 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #99\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #11\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #17\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #80\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #63\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #37\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #24\n} 2 } } */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, #81\n} 2 } } */
-/* 4 for double. */
-/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, x[0-9]+\n} 4 } } */
+/* { dg-final { scan-assembler-times {\tld1rqd\tz[0-9]+\.d, } 18 } } */
/* The 32-bit types need:
- ZIP1 ZIP1 (2 ZIP2s optimized away)
ZIP1 ZIP2
and the 64-bit types need:
- ZIP1 ZIP1 ZIP1 ZIP1 (4 ZIP2s optimized away)
ZIP1 ZIP2 ZIP1 ZIP2
ZIP1 ZIP2 ZIP1 ZIP2. */
-/* { dg-final { scan-assembler-times {\tzip1\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 33 } } */
+/* { dg-final { scan-assembler-times {\tzip1\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 15 } } */
/* { dg-final { scan-assembler-times {\tzip2\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 15 } } */
/* The loop should be fully-masked. The 32-bit types need two loads
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp b/gcc/testsuite/gcc.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp
index 0ad6463..f62782e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/aarch64-sve2-acle-asm.exp
@@ -52,6 +52,7 @@ if { [info exists gcc_runtest_parallelize_limit_minor] } {
torture-init
set-torture-options {
"-std=c90 -O0 -g"
+ "-std=c90 -O0 -DSTREAMING_COMPATIBLE"
"-std=c90 -O1 -g"
"-std=c99 -O2 -g"
"-std=c11 -O3 -g"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesd_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesd_u8.c
index 384b6ff..65ba094 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesd_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesd_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aese_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aese_u8.c
index 6381bce..f902c3c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aese_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aese_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c
index 7625932..dab06b7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesimc_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c
index 30e83d3..7e7cc65 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/aesmc_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u16.c
index 1423085..c1a4e10 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u32.c
index 7f08df4..4f14cc4 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u64.c
index 7f7cbbe..091253e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u8.c
index b420323..deb1ad2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bdep_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u16.c
index 50a6479..9efa501 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u32.c
index 9f98b84..18963da 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u64.c
index 9dbaec1..91591f9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u8.c
index 81ed5a4..1211587 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bext_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u16.c
index 70aeae3..72868be 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u32.c
index 6e19e38..c892381 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u64.c
index 27fa40f..8698952 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u8.c
index b667e03..5cd941a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/bgrp_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_s32.c
index 7bf783a..53d6c5c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_s64.c
index 001f5f0..c6d9862 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_u32.c
index d93091a..cb11a00 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_u64.c
index 3b88980..0bb06cd 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histcnt_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histseg_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histseg_s8.c
index 380ccdf..ce3458e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histseg_s8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histseg_s8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histseg_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histseg_u8.c
index f43292f..7b1eff8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histseg_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/histseg_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f32.c
index 102810e..17e3673 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f64.c
index a0ed712..8ce32e9 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s32.c
index 94c6497..b7e1d7a 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s64.c
index a0aa670..b0789ad 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u32.c
index e147968..df09eaa 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u64.c
index 77cdcfe..5f185ea 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s32.c
index bb72948..71fece5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s64.c
index de5b693..1183e72 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u32.c
index d01ec18..4d5e6e7 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u64.c
index b96e943..ed329a2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sb_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s32.c
index 1dcfbc0..6dbd6ce 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s64.c
index 4166ed0..4ea3335 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u32.c
index 7680344..d554515 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u64.c
index 2427c83..18c8ca4 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sh_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_s64.c
index 2f538e8..41bff31 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_u64.c
index ace1c2f..30b8f69 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1sw_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s32.c
index d3b29eb..8750d11 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s64.c
index 3bc4066..f798199 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u32.c
index 0af4b40..4d5ee4e 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u64.c
index fe28d78..005c29c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1ub_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s32.c
index 9854326..92613b1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s64.c
index 3c5baee..be2e6d1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u32.c
index 4d945e9..4d12205 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u64.c
index 680238a..e3bc104 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uh_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_s64.c
index 787ae9d..9efa4b2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_u64.c
index 4810bc3..4ded445 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ldnt1uw_gather_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_s16.c
index baebc76..d0ce812 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_s16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_s16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_s8.c
index f35a753..0347390 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_s8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_s8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_u16.c
index 0bdf446..2a8b4d2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_u8.c
index 6d78692..8409276 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/match_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_s16.c
index 935b19a..044ba1d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_s16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_s16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_s8.c
index 8a00b30..6c2d890 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_s8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_s8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_u16.c
index 868c20a..863e310 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_u16.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_u16.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_u8.c
index af6b581..a62783d 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_u8.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/nmatch_u8.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmullb_pair_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmullb_pair_u64.c
index 9446092..1fd85e0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmullb_pair_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmullb_pair_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmullt_pair_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmullt_pair_u64.c
index 90e2e99..300d885 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmullt_pair_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmullt_pair_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/rax1_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/rax1_s64.c
index ea80d40..9dbc718 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/rax1_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/rax1_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/rax1_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/rax1_u64.c
index b237c7e..5caa2a5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/rax1_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/rax1_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c
index cf6a2a9..96c20dc 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4e_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4ekey_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4ekey_u32.c
index 58ad33c..e723841 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4ekey_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/sm4ekey_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f32.c
index 3f928e2..75539f6 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f64.c
index 8a35c76..c0d47d0 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_f64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s32.c
index bd60026..80fb3e8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s64.c
index 0bfa261..edd2bc4 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u32.c
index fbfa008..a6e5059 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u64.c
index c283135..067e5b1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1_scatter_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s32.c
index bf6ba59..498fe82 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s64.c
index a24d0c8..614f5fb 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u32.c
index 2b05a77..ce2c482 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u64.c
index a13c5f5..593dc19 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1b_scatter_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s32.c
index 4e012f6..b9d06c1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s64.c
index e934a70..006e0e2 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u32.c
index db21821..8cd7cb8 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u32.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u32.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u64.c
index 53f930d..972ee36 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1h_scatter_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_s64.c
index ec6c837..368a17c 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_s64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_s64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_u64.c
index 3c5d96d..57d60a3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_u64.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/stnt1w_scatter_u64.c
@@ -1,3 +1,4 @@
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
#include "test_sve_acle.h"
diff --git a/gcc/testsuite/gcc.target/arc/jli-1.c b/gcc/testsuite/gcc.target/arc/jli-1.c
index f7259b3..2ded750 100644
--- a/gcc/testsuite/gcc.target/arc/jli-1.c
+++ b/gcc/testsuite/gcc.target/arc/jli-1.c
@@ -14,6 +14,6 @@ int main()
return func(100);
}
-/* { dg-final { scan-assembler "jli_s @__jli.func" } } */
-/* { dg-final { scan-assembler ".weak __jli.func" } } */
+/* { dg-final { scan-assembler "jli_s\\\s+@__jli.func" } } */
+/* { dg-final { scan-assembler ".weak\\\s+__jli.func" } } */
/* { dg-final { scan-assembler "b\\\s+@func" } } */
diff --git a/gcc/testsuite/gcc.target/arc/jli-2.c b/gcc/testsuite/gcc.target/arc/jli-2.c
index 96a35a4..347d5b4 100644
--- a/gcc/testsuite/gcc.target/arc/jli-2.c
+++ b/gcc/testsuite/gcc.target/arc/jli-2.c
@@ -14,6 +14,6 @@ int main()
return func(100);
}
-/* { dg-final { scan-assembler "jli_s 2" } } */
+/* { dg-final { scan-assembler "jli_s\\\s+2" } } */
/* { dg-final { scan-assembler-not ".weak __jli.func" } } */
/* { dg-final { scan-assembler-not "b\\\s+@func" } } */
diff --git a/gcc/testsuite/gcc.target/arc/lra-1.c b/gcc/testsuite/gcc.target/arc/lra-1.c
index 27336d1..3c93645 100644
--- a/gcc/testsuite/gcc.target/arc/lra-1.c
+++ b/gcc/testsuite/gcc.target/arc/lra-1.c
@@ -4,12 +4,16 @@
/* ap is replaced with an address like base+offset by lra,
where offset is larger than s9, resulting into an ICE. */
-typedef struct { char a[500] } b;
-c;
+typedef struct { char a[500]; } b;
+int c;
struct d {
short e;
- b f
-} g(int h, int i, int j, int k, char l, int m, int n, char *p) {
+ b f;
+};
+
+int q (struct d);
+
+struct d g(int h, int i, int j, int k, char l, int m, int n, char *p) {
again:;
struct d o;
*p = c = ({ q(o); });
diff --git a/gcc/testsuite/gcc.target/arc/naked-1.c b/gcc/testsuite/gcc.target/arc/naked-1.c
index e45f433f..3705f40 100644
--- a/gcc/testsuite/gcc.target/arc/naked-1.c
+++ b/gcc/testsuite/gcc.target/arc/naked-1.c
@@ -9,10 +9,10 @@ foo (int n, int m)
{
bar (n + m);
}
-/* { dg-final { scan-assembler "\tbl @bar" } } */
+/* { dg-final { scan-assembler "\tbl\\\s+@bar" } } */
/* Look for things that would appear in a non-naked function, but which
should not appear in a naked function. */
-/* { dg-final { scan-assembler-not "\tj.* \\\[blink\\\]" } } */
-/* { dg-final { scan-assembler-not "\tst.* " } } */
-/* { dg-final { scan-assembler-not "\tmov fp,sp" } } */
+/* { dg-final { scan-assembler-not "\tj.*\\\s+\\\[blink\\\]" } } */
+/* { dg-final { scan-assembler-not "\tst.*\\\s+" } } */
+/* { dg-final { scan-assembler-not "\tmov\\\s+fp,sp" } } */
diff --git a/gcc/testsuite/gcc.target/arc/naked-2.c b/gcc/testsuite/gcc.target/arc/naked-2.c
index 7b7262f..271633e 100644
--- a/gcc/testsuite/gcc.target/arc/naked-2.c
+++ b/gcc/testsuite/gcc.target/arc/naked-2.c
@@ -16,11 +16,11 @@ foo (int n, int m)
{
bar (n + m);
}
-/* { dg-final { scan-assembler "\tbl @bar" } } */
+/* { dg-final { scan-assembler "\tbl\\\s+@bar" } } */
/* Look for things that would appear in a non-naked function, but which
should not appear in a naked function. */
/* { dg-final { scan-assembler-not "\trtie" } } */
/* { dg-final { scan-assembler-not "j.*\[ilink1\]" } } */
-/* { dg-final { scan-assembler-not "\tst.* " } } */
-/* { dg-final { scan-assembler-not "\tmov fp,sp" } } */
+/* { dg-final { scan-assembler-not "\tst.*\\\s+" } } */
+/* { dg-final { scan-assembler-not "\tmov\\\s+fp,sp" } } */
diff --git a/gcc/testsuite/gcc.target/arc/pic-1.c b/gcc/testsuite/gcc.target/arc/pic-1.c
index ab24763..ed1e4d3 100644
--- a/gcc/testsuite/gcc.target/arc/pic-1.c
+++ b/gcc/testsuite/gcc.target/arc/pic-1.c
@@ -3,6 +3,9 @@
/* { dg-skip-if "PIC not available for ARC6xx" { arc6xx } } */
/* { dg-options "-mno-sdata -w -Os -fpic" } */
+void e (char);
+
+void
a() {
char *b = "";
char c;
diff --git a/gcc/testsuite/gcc.target/arc/pr9001191897.c b/gcc/testsuite/gcc.target/arc/pr9001191897.c
index fc36426..d51b042 100644
--- a/gcc/testsuite/gcc.target/arc/pr9001191897.c
+++ b/gcc/testsuite/gcc.target/arc/pr9001191897.c
@@ -1,7 +1,8 @@
/* { dg-do compile } */
/* { dg-skip-if "" { ! { clmcpu } } } */
/* { dg-options "-mcpu=archs -Os -fpic -mno-sdata -mno-indexed-loads -w" } */
-a;
+int a;
+void
c() {
static char b[25];
for (; a >= 0; a--)
diff --git a/gcc/testsuite/gcc.target/arc/pr9001195952.c b/gcc/testsuite/gcc.target/arc/pr9001195952.c
index 252438d..f820960 100644
--- a/gcc/testsuite/gcc.target/arc/pr9001195952.c
+++ b/gcc/testsuite/gcc.target/arc/pr9001195952.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
/* { dg-skip-if "" { ! { clmcpu } } } */
-/* { dg-options "-mcpu=archs -Os -w -fpic" } */
+/* { dg-options "-mcpu=archs -Os -w -fpic -fpermissive" } */
/* tst_movb split pattern is wrong for anything else than NPS
chip. */
diff --git a/gcc/testsuite/gcc.target/arc/tmac-1.c b/gcc/testsuite/gcc.target/arc/tmac-1.c
index 5b302ca..797d028 100644
--- a/gcc/testsuite/gcc.target/arc/tmac-1.c
+++ b/gcc/testsuite/gcc.target/arc/tmac-1.c
@@ -5,7 +5,7 @@
/* Test MAC operation for MPY_OPTION = 8. */
#include "tmac.h"
-/* { dg-final { scan-assembler "macd " } } */
+/* { dg-final { scan-assembler "macd\\\s+" } } */
/* { dg-final { scan-assembler "macdu" } } */
-/* { dg-final { scan-assembler "mpyd\\t" } } */
+/* { dg-final { scan-assembler "mpyd\\\s+" } } */
/* { dg-final { scan-assembler "mpydu" } } */
diff --git a/gcc/testsuite/gcc.target/arc/tmac-2.c b/gcc/testsuite/gcc.target/arc/tmac-2.c
index 2bd051b..2af7fc1 100644
--- a/gcc/testsuite/gcc.target/arc/tmac-2.c
+++ b/gcc/testsuite/gcc.target/arc/tmac-2.c
@@ -5,7 +5,7 @@
/* Test MAC operation for MPY_OPTION = 7. */
#include "tmac.h"
-/* { dg-final { scan-assembler "mac " } } */
+/* { dg-final { scan-assembler "mac\\\s+" } } */
/* { dg-final { scan-assembler "macu" } } */
-/* { dg-final { scan-assembler "mpym\\t" } } */
+/* { dg-final { scan-assembler "mpym\\\s+" } } */
/* { dg-final { scan-assembler "mpymu" } } */
diff --git a/gcc/testsuite/gcc.target/arm/bfloat16_vector_typecheck_1.c b/gcc/testsuite/gcc.target/arm/bfloat16_vector_typecheck_1.c
index b677180..7016323 100644
--- a/gcc/testsuite/gcc.target/arm/bfloat16_vector_typecheck_1.c
+++ b/gcc/testsuite/gcc.target/arm/bfloat16_vector_typecheck_1.c
@@ -119,9 +119,9 @@ bfloat16x4_t footest (bfloat16x4_t vector0)
(bfloat16x4_t) { is_a_short_vec }; /* { dg-error {incompatible types when initializing type '__bf16' using type 'int16x4_t'} } */
(bfloat16x4_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type '__bf16' using type 'bfloat16x4_t'} } */
- (int32x4_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type 'int' using type 'bfloat16x4_t'} } */
+ (int32x4_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type '(?:long )?int' using type 'bfloat16x4_t'} } */
(float32x4_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type 'float' using type 'bfloat16x4_t'} } */
- (int32x2_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type 'int' using type 'bfloat16x4_t'} } */
+ (int32x2_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type '(?:long )?int' using type 'bfloat16x4_t'} } */
(float16x4_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type '__fp16' using type 'bfloat16x4_t'} } */
(int16x4_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type 'short int' using type 'bfloat16x4_t'} } */
diff --git a/gcc/testsuite/gcc.target/arm/bfloat16_vector_typecheck_2.c b/gcc/testsuite/gcc.target/arm/bfloat16_vector_typecheck_2.c
index 3c18dc5..df07687 100644
--- a/gcc/testsuite/gcc.target/arm/bfloat16_vector_typecheck_2.c
+++ b/gcc/testsuite/gcc.target/arm/bfloat16_vector_typecheck_2.c
@@ -111,7 +111,7 @@ bfloat16x8_t footest (bfloat16x8_t vector0)
(bfloat16x8_t) { is_a_short_vec }; /* { dg-error {incompatible types when initializing type '__bf16' using type 'int16x8_t'} } */
(bfloat16x8_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type '__bf16' using type 'bfloat16x8_t'} } */
- (int32x4_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type 'int' using type 'bfloat16x8_t'} } */
+ (int32x4_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type '(?:long )?int' using type 'bfloat16x8_t'} } */
(float32x4_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type 'float' using type 'bfloat16x8_t'} } */
(int64x2_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type 'long long int' using type 'bfloat16x8_t'} } */
(float16x8_t) { glob_bfloat_vec }; /* { dg-error {incompatible types when initializing type '__fp16' using type 'bfloat16x8_t'} } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/pr112337.c b/gcc/testsuite/gcc.target/arm/mve/pr112337.c
index 8f49199..d1a075e 100644
--- a/gcc/testsuite/gcc.target/arm/mve/pr112337.c
+++ b/gcc/testsuite/gcc.target/arm/mve/pr112337.c
@@ -5,8 +5,8 @@
#include <arm_mve.h>
void g(int32x4_t);
-void f(int, int, int, short, int *p) {
- int *bias = p;
+void f(int, int, int, short, int32_t *p) {
+ int32_t *bias = p;
for (;;) {
int32x4_t d = vldrwq_s32 (p);
bias += 4;
diff --git a/gcc/testsuite/gcc.target/avr/pr112830.c b/gcc/testsuite/gcc.target/avr/pr112830.c
new file mode 100644
index 0000000..c305dae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/pr112830.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+typedef __SIZE_TYPE__ size_t;
+
+void copy_n (void *vdst, const __memx void *vsrc, size_t n)
+{
+ typedef struct { char a[n]; } T;
+ T *dst = (T*) vdst;
+ const __memx T *src = (const __memx T*) vsrc;
+ *dst = *src;
+}
diff --git a/gcc/testsuite/gcc.target/avr/pr86869.c b/gcc/testsuite/gcc.target/avr/pr86869.c
new file mode 100644
index 0000000..fbfb378
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/pr86869.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+
+struct S {
+ char y[2];
+};
+
+void foo(const __memx struct S *s) {
+ const char (*p)[2] = &s->y;
+}
diff --git a/gcc/testsuite/gcc.target/avr/pr89270.c b/gcc/testsuite/gcc.target/avr/pr89270.c
new file mode 100644
index 0000000..2b6e4a8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/avr/pr89270.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+
+void test()
+{
+ extern const unsigned char __memx __data_load_end;
+ __uint24 top=(__uint24)&__data_load_end;
+}
diff --git a/gcc/testsuite/gcc.target/bpf/core-builtin-enumvalue-opt.c b/gcc/testsuite/gcc.target/bpf/core-builtin-enumvalue-opt.c
index c87e1a3..fc3c299 100644
--- a/gcc/testsuite/gcc.target/bpf/core-builtin-enumvalue-opt.c
+++ b/gcc/testsuite/gcc.target/bpf/core-builtin-enumvalue-opt.c
@@ -26,10 +26,10 @@ unsigned long foo(void *data)
return 0;
}
-/* { dg-final { scan-assembler-times "\t.4byte\t0x\[0-9a-f\]+\t; bpfcr_type \\(named_ue64\\)" 2 } } */
-/* { dg-final { scan-assembler-times "\t.4byte\t0x\[0-9a-f\]+\t; bpfcr_type \\(named_se64\\)" 2} } */
-/* { dg-final { scan-assembler-times "\t.4byte\t0xa\t; bpfcr_kind" 2 } } BPF_ENUMVAL_EXISTS */
-/* { dg-final { scan-assembler-times "\t.4byte\t0xb\t; bpfcr_kind" 2 } } BPF_ENUMVAL_VALUE */
+/* { dg-final { scan-assembler-times "bpfcr_type \\(named_ue64\\)" 2 } } */
+/* { dg-final { scan-assembler-times "bpfcr_type \\(named_se64\\)" 2} } */
+/* { dg-final { scan-assembler-times "0xa\[\t \]+\[^\n\]*bpfcr_kind" 2 } } BPF_ENUMVAL_EXISTS */
+/* { dg-final { scan-assembler-times "0xb\[\t \]+\[^\n\]*bpfcr_kind" 2 } } BPF_ENUMVAL_VALUE */
/* { dg-final { scan-assembler-times "bpfcr_astr_off \\(\"0\"\\)" 4 } } */
diff --git a/gcc/testsuite/gcc.target/bpf/core-builtin-enumvalue.c b/gcc/testsuite/gcc.target/bpf/core-builtin-enumvalue.c
index 2f16903..23dfd8a 100644
--- a/gcc/testsuite/gcc.target/bpf/core-builtin-enumvalue.c
+++ b/gcc/testsuite/gcc.target/bpf/core-builtin-enumvalue.c
@@ -40,12 +40,12 @@ int foo(void *data)
return 0;
}
-/* { dg-final { scan-assembler-times "\t.4byte\t0x\[0-9a-f\]+\t; bpfcr_type \\(named_ue64\\)" 5 } } */
-/* { dg-final { scan-assembler-times "\t.4byte\t0x\[0-9a-f\]+\t; bpfcr_type \\(named_se64\\)" 5} } */
-/* { dg-final { scan-assembler-times "\t.4byte\t0x\[0-9a-f\]+\t; bpfcr_type \\(named_ue\\)" 5 } } */
-/* { dg-final { scan-assembler-times "\t.4byte\t0x\[0-9a-f\]+\t; bpfcr_type \\(named_se\\)" 5} } */
-/* { dg-final { scan-assembler-times "\t.4byte\t0xa\t; bpfcr_kind" 12 } } BPF_ENUMVAL_EXISTS */
-/* { dg-final { scan-assembler-times "\t.4byte\t0xb\t; bpfcr_kind" 8 } } BPF_ENUMVAL_VALUE */
+/* { dg-final { scan-assembler-times "bpfcr_type \\(named_ue64\\)" 5 } } */
+/* { dg-final { scan-assembler-times "bpfcr_type \\(named_se64\\)" 5} } */
+/* { dg-final { scan-assembler-times "bpfcr_type \\(named_ue\\)" 5 } } */
+/* { dg-final { scan-assembler-times "bpfcr_type \\(named_se\\)" 5} } */
+/* { dg-final { scan-assembler-times "0xa\[\t \]+\[^\n\]*bpfcr_kind" 12 } } BPF_ENUMVAL_EXISTS */
+/* { dg-final { scan-assembler-times "0xb\[\t \]+\[^\n\]*bpfcr_kind" 8 } } BPF_ENUMVAL_VALUE */
/* { dg-final { scan-assembler-times "bpfcr_astr_off \\(\"0\"\\)" 8 } } */
/* { dg-final { scan-assembler-times "bpfcr_astr_off \\(\"1\"\\)" 8 } } */
diff --git a/gcc/testsuite/gcc.target/bpf/core-builtin-type-based.c b/gcc/testsuite/gcc.target/bpf/core-builtin-type-based.c
index 16b48ae..74a8d5a 100644
--- a/gcc/testsuite/gcc.target/bpf/core-builtin-type-based.c
+++ b/gcc/testsuite/gcc.target/bpf/core-builtin-type-based.c
@@ -52,7 +52,7 @@ int foo(void *data)
return 0;
}
-/* { dg-final { scan-assembler-times "\t.4byte\t0x0\t; bpfcr_type" 0 } } */
-/* { dg-final { scan-assembler-times "\t.4byte\t0x8\t; bpfcr_kind" 13 } } BPF_TYPE_EXISTS */
-/* { dg-final { scan-assembler-times "\t.4byte\t0x9\t; bpfcr_kind" 11 } } BPF_TYPE_SIZE */
-/* { dg-final { scan-assembler-times "\t.4byte\t0xc\t; bpfcr_kind" 13 } } BPF_TYPE_MATCHES */
+/* { dg-final { scan-assembler-times "0x0\[\t \]+\[^\n\]*bpfcr_type" 0 } } */
+/* { dg-final { scan-assembler-times "0x8\[\t \]+\[^\n\]*bpfcr_kind" 13 } } BPF_TYPE_EXISTS */
+/* { dg-final { scan-assembler-times "0x9\[\t \]+\[^\n\]*bpfcr_kind" 11 } } BPF_TYPE_SIZE */
+/* { dg-final { scan-assembler-times "0xc\[\t \]+\[^\n\]*bpfcr_kind" 13 } } BPF_TYPE_MATCHES */
diff --git a/gcc/testsuite/gcc.target/bpf/core-builtin-type-id.c b/gcc/testsuite/gcc.target/bpf/core-builtin-type-id.c
index 615bbc8..4b23288 100644
--- a/gcc/testsuite/gcc.target/bpf/core-builtin-type-id.c
+++ b/gcc/testsuite/gcc.target/bpf/core-builtin-type-id.c
@@ -35,6 +35,6 @@ int foo(void *data)
return 0;
}
-/* { dg-final { scan-assembler-times "\t.4byte\t0\t; bpfcr_type" 0 { xfail *-*-* } } } */
-/* { dg-final { scan-assembler-times "\t.4byte\t0x6\t; bpfcr_kind" 13 } } BPF_TYPE_ID_LOCAL */
-/* { dg-final { scan-assembler-times "\t.4byte\t0x7\t; bpfcr_kind" 7 } } BPF_TYPE_ID_TARGET */
+/* { dg-final { scan-assembler-times "0\[\t \]+\[^\n\]*bpfcr_type" 0 { xfail *-*-* } } } */
+/* { dg-final { scan-assembler-times "0x6\[\t \]+\[^\n\]*bpfcr_kind" 13 } } BPF_TYPE_ID_LOCAL */
+/* { dg-final { scan-assembler-times "0x7\[\t \]+\[^\n\]*bpfcr_kind" 7 } } BPF_TYPE_ID_TARGET */
diff --git a/gcc/testsuite/gcc.target/bpf/divmod-libcall-1.c b/gcc/testsuite/gcc.target/bpf/divmod-libcall-1.c
new file mode 100644
index 0000000..7481076
--- /dev/null
+++ b/gcc/testsuite/gcc.target/bpf/divmod-libcall-1.c
@@ -0,0 +1,19 @@
+/* This test makes sure that no spurious external symbol declarations are
+ emitted for libcalls in tried but eventually not used code sequences. */
+
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=v3" } */
+/* { dg-final { scan-assembler-not "global\t__divdi3" } } */
+/* { dg-final { scan-assembler-not "global\t__moddi3" } } */
+
+int
+foo (unsigned int len)
+{
+ return ((unsigned long)len) * 234 / 5;
+}
+
+int
+bar (unsigned int len)
+{
+ return ((unsigned long)len) * 234 % 5;
+}
diff --git a/gcc/testsuite/gcc.target/bpf/divmod-libcall-2.c b/gcc/testsuite/gcc.target/bpf/divmod-libcall-2.c
new file mode 100644
index 0000000..792d689
--- /dev/null
+++ b/gcc/testsuite/gcc.target/bpf/divmod-libcall-2.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=v3" } */
+/* { dg-final { scan-assembler "global\t__divdi3" } } */
+/* { dg-final { scan-assembler "global\t__moddi3" } } */
+
+int
+foo (unsigned int len)
+{
+ return ((long)len) * 234 / 5;
+}
+
+int
+bar (unsigned int len)
+{
+ return ((long)len) * 234 % 5;
+}
diff --git a/gcc/testsuite/gcc.target/bpf/section-name-quoting-1.c b/gcc/testsuite/gcc.target/bpf/section-name-quoting-1.c
new file mode 100644
index 0000000..2fa48de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/bpf/section-name-quoting-1.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+/* Make sure that section names that contain characters not in the set
+ [0-9a-zA-Z_] get quoted for the assembler. */
+
+__attribute__((section ("uretprobe//proc/self/exe:trigger_func2")))
+void
+foo ()
+{
+}
+
+__attribute__((section ("trigger_func3")))
+void
+bar ()
+{
+}
+
+/* { dg-final { scan-assembler {\.section\t"uretprobe//proc/self/exe:trigger_func2"} } } */
+/* { dg-final { scan-assembler {\.section\ttrigger_func3} } } */
diff --git a/gcc/testsuite/gcc.target/gcn/avgpr-mem-double.c b/gcc/testsuite/gcc.target/gcn/avgpr-mem-double.c
index ce089fb..34317a5 100644
--- a/gcc/testsuite/gcc.target/gcn/avgpr-mem-double.c
+++ b/gcc/testsuite/gcc.target/gcn/avgpr-mem-double.c
@@ -1,6 +1,5 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=gfx90a -O1" } */
-/* { dg-skip-if "incompatible ISA" { *-*-* } { "-march=gfx90[068]" } } */
/* { dg-final { scan-assembler {load[^\n]*a[0-9[]} } } */
/* { dg-final { scan-assembler {store[^\n]*a[0-9[]} } } */
diff --git a/gcc/testsuite/gcc.target/gcn/avgpr-mem-int.c b/gcc/testsuite/gcc.target/gcn/avgpr-mem-int.c
index 03d8148..5ea3755 100644
--- a/gcc/testsuite/gcc.target/gcn/avgpr-mem-int.c
+++ b/gcc/testsuite/gcc.target/gcn/avgpr-mem-int.c
@@ -1,6 +1,5 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=gfx90a -O1" } */
-/* { dg-skip-if "incompatible ISA" { *-*-* } { "-march=gfx90[068]" } } */
/* { dg-final { scan-assembler {load[^\n]*a[0-9[]} } } */
/* { dg-final { scan-assembler {store[^\n]*a[0-9[]} } } */
diff --git a/gcc/testsuite/gcc.target/gcn/avgpr-mem-long.c b/gcc/testsuite/gcc.target/gcn/avgpr-mem-long.c
index dcfb483..b52fc98 100644
--- a/gcc/testsuite/gcc.target/gcn/avgpr-mem-long.c
+++ b/gcc/testsuite/gcc.target/gcn/avgpr-mem-long.c
@@ -1,6 +1,5 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=gfx90a -O1" } */
-/* { dg-skip-if "incompatible ISA" { *-*-* } { "-march=gfx90[068]" } } */
/* { dg-final { scan-assembler {load[^\n]*a[0-9[]} } } */
/* { dg-final { scan-assembler {store[^\n]*a[0-9[]} } } */
diff --git a/gcc/testsuite/gcc.target/gcn/avgpr-mem-short.c b/gcc/testsuite/gcc.target/gcn/avgpr-mem-short.c
index 91cc14e..a3e4a8b 100644
--- a/gcc/testsuite/gcc.target/gcn/avgpr-mem-short.c
+++ b/gcc/testsuite/gcc.target/gcn/avgpr-mem-short.c
@@ -1,6 +1,5 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=gfx90a -O1" } */
-/* { dg-skip-if "incompatible ISA" { *-*-* } { "-march=gfx90[068]" } } */
/* { dg-final { scan-assembler {load[^\n]*a[0-9[]} } } */
/* { dg-final { scan-assembler {store[^\n]*a[0-9[]} } } */
diff --git a/gcc/testsuite/gcc.target/gcn/avgpr-spill-double.c b/gcc/testsuite/gcc.target/gcn/avgpr-spill-double.c
index 3e9996d..53853a4 100644
--- a/gcc/testsuite/gcc.target/gcn/avgpr-spill-double.c
+++ b/gcc/testsuite/gcc.target/gcn/avgpr-spill-double.c
@@ -1,6 +1,5 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=gfx908 -O1" } */
-/* { dg-skip-if "incompatible ISA" { *-*-* } { "-march=gfx90[06]" } } */
/* { dg-final { scan-assembler "accvgpr" } } */
#define TYPE double
diff --git a/gcc/testsuite/gcc.target/gcn/avgpr-spill-int.c b/gcc/testsuite/gcc.target/gcn/avgpr-spill-int.c
index 0b64c8e..650f158 100644
--- a/gcc/testsuite/gcc.target/gcn/avgpr-spill-int.c
+++ b/gcc/testsuite/gcc.target/gcn/avgpr-spill-int.c
@@ -1,6 +1,5 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=gfx908 -O1" } */
-/* { dg-skip-if "incompatible ISA" { *-*-* } { "-march=gfx90[06]" } } */
/* { dg-final { scan-assembler "accvgpr" } } */
#ifndef TYPE
diff --git a/gcc/testsuite/gcc.target/gcn/avgpr-spill-long.c b/gcc/testsuite/gcc.target/gcn/avgpr-spill-long.c
index 516890d..51f887c 100644
--- a/gcc/testsuite/gcc.target/gcn/avgpr-spill-long.c
+++ b/gcc/testsuite/gcc.target/gcn/avgpr-spill-long.c
@@ -1,6 +1,5 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=gfx908 -O1" } */
-/* { dg-skip-if "incompatible ISA" { *-*-* } { "-march=gfx90[06]" } } */
/* { dg-final { scan-assembler "accvgpr" } } */
#define TYPE long
diff --git a/gcc/testsuite/gcc.target/gcn/avgpr-spill-short.c b/gcc/testsuite/gcc.target/gcn/avgpr-spill-short.c
index 1e55684..983d201 100644
--- a/gcc/testsuite/gcc.target/gcn/avgpr-spill-short.c
+++ b/gcc/testsuite/gcc.target/gcn/avgpr-spill-short.c
@@ -1,6 +1,5 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=gfx908 -O1" } */
-/* { dg-skip-if "incompatible ISA" { *-*-* } { "-march=gfx90[06]" } } */
/* { dg-final { scan-assembler "accvgpr" } } */
#define TYPE short
diff --git a/gcc/testsuite/gcc.target/h8300/pr17306-2.c b/gcc/testsuite/gcc.target/h8300/pr17306-2.c
index a407c74..8c79f31 100644
--- a/gcc/testsuite/gcc.target/h8300/pr17306-2.c
+++ b/gcc/testsuite/gcc.target/h8300/pr17306-2.c
@@ -8,6 +8,8 @@ struct x {
char y;
};
+void oof (void);
+
struct x __attribute__ ((eightbit_data)) foo;
int bar ()
diff --git a/gcc/testsuite/gcc.target/h8300/pr58400.c b/gcc/testsuite/gcc.target/h8300/pr58400.c
index 496626f..9d1ad7a 100644
--- a/gcc/testsuite/gcc.target/h8300/pr58400.c
+++ b/gcc/testsuite/gcc.target/h8300/pr58400.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-Os -mh -mint32 -w" } */
+/* { dg-options "-Os -mh -mint32 -w -fpermissive" } */
typedef unsigned short __u16;
typedef __signed__ int __s32;
diff --git a/gcc/testsuite/gcc.target/i386/apx-interrupt-1.c b/gcc/testsuite/gcc.target/i386/apx-interrupt-1.c
index ffcb8fc..fefe2e6 100644
--- a/gcc/testsuite/gcc.target/i386/apx-interrupt-1.c
+++ b/gcc/testsuite/gcc.target/i386/apx-interrupt-1.c
@@ -1,6 +1,5 @@
-/* { dg-do compile { target { ! ia32 } } } */
-/* { dg-options "-mapx-features=egpr -m64 -O2 -mgeneral-regs-only -mno-cld -mno-push-args -maccumulate-outgoing-args" } */
-/* { dg-skip-if "does not emit .cfi_xxx" "*-*-darwin*" } */
+/* { dg-do compile { target { { ! ia32 } && cfi } } } */
+/* { dg-options "-mapx-features=egpr -m64 -O2 -mgeneral-regs-only -mno-cld -mno-push-args -maccumulate-outgoing-args -fomit-frame-pointer" } */
extern void foo (void *) __attribute__ ((interrupt));
extern int bar (int);
diff --git a/gcc/testsuite/gcc.target/i386/apx-push2pop2-1.c b/gcc/testsuite/gcc.target/i386/apx-push2pop2-1.c
index d78c96d..06a7c0d 100644
--- a/gcc/testsuite/gcc.target/i386/apx-push2pop2-1.c
+++ b/gcc/testsuite/gcc.target/i386/apx-push2pop2-1.c
@@ -1,6 +1,5 @@
-/* { dg-do compile { target { ! ia32 } } } */
-/* { dg-options "-O2 -mapx-features=push2pop2" } */
-/* { dg-skip-if "does not emit .cfi_xxx" "*-*-darwin*" } */
+/* { dg-do compile { target { { ! ia32 } && cfi } } } */
+/* { dg-options "-O2 -mapx-features=push2pop2 -fomit-frame-pointer" } */
extern int bar (int);
diff --git a/gcc/testsuite/gcc.target/i386/apx-push2pop2_force_drap-1.c b/gcc/testsuite/gcc.target/i386/apx-push2pop2_force_drap-1.c
index 3cac7b1..7e86e15 100644
--- a/gcc/testsuite/gcc.target/i386/apx-push2pop2_force_drap-1.c
+++ b/gcc/testsuite/gcc.target/i386/apx-push2pop2_force_drap-1.c
@@ -1,6 +1,5 @@
-/* { dg-do compile { target { ! ia32 } } } */
-/* { dg-options "-O2 -mapx-features=push2pop2 -mforce-drap" } */
-/* { dg-skip-if "does not emit .cfi_xxx" "*-*-darwin*" } */
+/* { dg-do compile { target { { ! ia32 } && cfi } } } */
+/* { dg-options "-O2 -mapx-features=push2pop2 -fomit-frame-pointer -mforce-drap" } */
#include "apx-push2pop2-1.c"
diff --git a/gcc/testsuite/gcc.target/i386/libcall-1.c b/gcc/testsuite/gcc.target/i386/libcall-1.c
new file mode 100644
index 0000000..a40ff56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/libcall-1.c
@@ -0,0 +1,9 @@
+/* Make sure that external refences for libcalls are generated even for
+ indirect calls. */
+
+/* { dg-do compile { target int128 } } */
+/* { dg-options "-O2 -mcmodel=large" } */
+/* { dg-final { scan-assembler "globl\t__divti3" } } */
+/* { dg-xfail-if "PR90698" { *-*-darwin* } } */
+
+__int128 a, b; void foo () { a = a / b; }
diff --git a/gcc/testsuite/gcc.target/i386/pr112445.c b/gcc/testsuite/gcc.target/i386/pr112445.c
new file mode 100644
index 0000000..91ed421
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr112445.c
@@ -0,0 +1,22 @@
+/* { dg-do compile { target int128 } } */
+/* { dg-options "-O -march=cascadelake -fwrapv" } */
+
+typedef _Decimal64 d64;
+int foo0_f128_0, foo0_ret, foo0_s64_0;
+_Complex float foo0_cf128_0;
+
+void
+foo (char u8_0, char s8_0, _Complex unsigned cu8_0, int cs32_0,
+ _Complex _Float16 cf16_0, _Complex int cf32_0, int d32_0,
+ _Decimal64 d64_0)
+{
+ cu8_0 *= (__int128) foo0_s64_0;
+ int cf32_1 = __builtin_ccosf (cu8_0);
+ __int128 u128_r =
+ foo0_f128_0 + (__int128) foo0_cf128_0 + (__int128) __imag__ foo0_cf128_0;
+ int u64_r = u128_r + foo0_s64_0 + d64_0;
+ int u32_r = u64_r + cs32_0 + cf32_0 + __imag__ cf32_0 + cf32_1 + d32_0;
+ short u16_r = u32_r + cf16_0 + __imag__ cf16_0;
+ char u8_r = u16_r + u8_0 + s8_0 + cu8_0 + __imag__ cu8_0;
+ foo0_ret = u8_r;
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr112816.c b/gcc/testsuite/gcc.target/i386/pr112816.c
new file mode 100644
index 0000000..4748156
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr112816.c
@@ -0,0 +1,27 @@
+/* PR target/112816 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -mno-avx512f -masm=att" } */
+/* { dg-final { scan-assembler-times "psrad\t\\\$31," 2 } } */
+/* { dg-final { scan-assembler-not "pcmpeqd\t" } } */
+
+#define N 4
+struct S { float x[N]; };
+struct T { int x[N]; };
+
+__attribute__((target ("no-sse3,sse2"))) struct T
+foo (struct S x)
+{
+ struct T res;
+ for (int i = 0; i < N; ++i)
+ res.x[i] = __builtin_signbit (x.x[i]) ? -1 : 0;
+ return res;
+}
+
+__attribute__((target ("avx2"))) struct T
+bar (struct S x)
+{
+ struct T res;
+ for (int i = 0; i < N; ++i)
+ res.x[i] = __builtin_signbit (x.x[i]) ? -1 : 0;
+ return res;
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr112830.c b/gcc/testsuite/gcc.target/i386/pr112830.c
new file mode 100644
index 0000000..2ba6104
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr112830.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "" } */
+
+void
+foo (int n, __seg_fs void *p, __seg_gs void *q)
+{
+ typedef struct { char t[n]; } T;
+ *(__seg_fs T *)p = *(__seg_gs T *)q;
+}
diff --git a/gcc/testsuite/gcc.target/i386/sdotprodint8_emulate.c b/gcc/testsuite/gcc.target/i386/sdotprodint8_emulate.c
new file mode 100644
index 0000000..ed58460
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/sdotprodint8_emulate.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-mavxvnni -O2 -fdump-tree-optimized" } */
+/* { dg-final { scan-tree-dump-times "DOT_PROD_EXPR" 1 "optimized" } } */
+/* { dg-final { scan-assembler-times "vpdpwssd" 2 } } */
+
+int
+foo (char* a, char* b)
+{
+ int sum = 0;
+ for (int i = 0; i != 16; i++)
+ {
+ sum += a[i] * b[i];
+ }
+ return sum;
+}
diff --git a/gcc/testsuite/gcc.target/i386/sse2-bfloat16-scalar-typecheck.c b/gcc/testsuite/gcc.target/i386/sse2-bfloat16-scalar-typecheck.c
index d1a76db..599b026 100644
--- a/gcc/testsuite/gcc.target/i386/sse2-bfloat16-scalar-typecheck.c
+++ b/gcc/testsuite/gcc.target/i386/sse2-bfloat16-scalar-typecheck.c
@@ -181,8 +181,8 @@ __bf16 footest (__bf16 scalar0)
0 ? 0.1 : scalar0;
0 ? scalar0 : 0.1;
0 ? bfloat_ptr : bfloat_ptr2;
- 0 ? bfloat_ptr : float_ptr; /* { dg-warning {pointer type mismatch in conditional expression} } */
- 0 ? float_ptr : bfloat_ptr; /* { dg-warning {pointer type mismatch in conditional expression} } */
+ 0 ? bfloat_ptr : float_ptr; /* { dg-error {pointer type mismatch in conditional expression} } */
+ 0 ? float_ptr : bfloat_ptr; /* { dg-error {pointer type mismatch in conditional expression} } */
scalar0 ? scalar0 : scalar0;
scalar0 ? is_a_float : scalar0;
diff --git a/gcc/testsuite/gcc.target/i386/sse2-pr112816.c b/gcc/testsuite/gcc.target/i386/sse2-pr112816.c
new file mode 100644
index 0000000..0701f3c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/sse2-pr112816.c
@@ -0,0 +1,16 @@
+/* PR target/112816 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -msse2" } */
+
+#define N 4
+struct S { float x[N]; };
+struct T { int x[N]; };
+
+struct T
+foo (struct S x)
+{
+ struct T res;
+ for (int i = 0; i < N; ++i)
+ res.x[i] = __builtin_signbit (x.x[i]) ? -1 : 0;
+ return res;
+}
diff --git a/gcc/testsuite/gcc.target/i386/udotprodint8_emulate.c b/gcc/testsuite/gcc.target/i386/udotprodint8_emulate.c
new file mode 100644
index 0000000..1e8f2cf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/udotprodint8_emulate.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-mavxvnni -O2 -fdump-tree-optimized" } */
+/* { dg-final { scan-tree-dump-times "DOT_PROD_EXPR" 1 "optimized" } } */
+/* { dg-final { scan-assembler-times "vpdpwssd" 2 } } */
+
+int
+foo (unsigned char* a, unsigned char* b)
+{
+ int sum = 0;
+ for (int i = 0; i != 16; i++)
+ {
+ sum += a[i] * b[i];
+ }
+ return sum;
+}
diff --git a/gcc/testsuite/gcc.target/i386/user_msr-1.c b/gcc/testsuite/gcc.target/i386/user_msr-1.c
index 4478523..f315016 100644
--- a/gcc/testsuite/gcc.target/i386/user_msr-1.c
+++ b/gcc/testsuite/gcc.target/i386/user_msr-1.c
@@ -1,9 +1,9 @@
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-musermsr -O2" } */
/* { dg-final { scan-assembler-times "urdmsr\[ \\t\]\\%r\[a-z\]x, \\%r\[a-z\]x" 1 } } */
-/* { dg-final { scan-assembler-times "urdmsr\[ \\t\]\\\$121" 1 } } */
+/* { dg-final { scan-assembler-times "urdmsr\[ \\t\]\\\$6912" 1 } } */
/* { dg-final { scan-assembler-times "uwrmsr\[ \\t\]\\%r\[a-z\]x, \\%r\[a-z\]x" 1 } } */
-/* { dg-final { scan-assembler-times "uwrmsr\[ \\t\]\\%r\[a-z\]x, \\\$121" 1 } } */
+/* { dg-final { scan-assembler-times "uwrmsr\[ \\t\]\\%r\[a-z\]x, \\\$6912" 1 } } */
#include <x86gprintrin.h>
@@ -13,8 +13,9 @@ volatile unsigned long long y;
void extern
user_msr_test (void)
{
+ y = 6913;
x = _urdmsr(y);
- x = _urdmsr(121);
+ x = _urdmsr(6912);
_uwrmsr(y, x);
- _uwrmsr(121, x);
+ _uwrmsr(6912, x);
}
diff --git a/gcc/testsuite/gcc.target/i386/vect-bfloat16-typecheck_1.c b/gcc/testsuite/gcc.target/i386/vect-bfloat16-typecheck_1.c
index 5d19dcb..fdcead3 100644
--- a/gcc/testsuite/gcc.target/i386/vect-bfloat16-typecheck_1.c
+++ b/gcc/testsuite/gcc.target/i386/vect-bfloat16-typecheck_1.c
@@ -224,8 +224,8 @@ __m128bf16 footest (__m128bf16 vector0)
0 ? 0.1 : vector0; /* { dg-error {type mismatch in conditional expression} } */
0 ? vector0 : 0.1; /* { dg-error {type mismatch in conditional expression} } */
0 ? bfloat_ptr : bfloat_ptr2;
- 0 ? bfloat_ptr : float_ptr; /* { dg-warning {pointer type mismatch in conditional expression} } */
- 0 ? float_ptr : bfloat_ptr; /* { dg-warning {pointer type mismatch in conditional expression} } */
+ 0 ? bfloat_ptr : float_ptr; /* { dg-error {pointer type mismatch in conditional expression} } */
+ 0 ? float_ptr : bfloat_ptr; /* { dg-error {pointer type mismatch in conditional expression} } */
vector0 ? vector0 : vector0; /* { dg-error {used vector type where scalar is required} } */
vector0 ? is_a_float16_vec : vector0; /* { dg-error {used vector type where scalar is required} } */
diff --git a/gcc/testsuite/gcc.target/i386/vect-bfloat16-typecheck_2.c b/gcc/testsuite/gcc.target/i386/vect-bfloat16-typecheck_2.c
index d4e6fc8..17ca3aa 100644
--- a/gcc/testsuite/gcc.target/i386/vect-bfloat16-typecheck_2.c
+++ b/gcc/testsuite/gcc.target/i386/vect-bfloat16-typecheck_2.c
@@ -214,8 +214,8 @@ __m256bf16 footest (__m256bf16 vector0)
0 ? 0.1 : vector0; /* { dg-error {type mismatch in conditional expression} } */
0 ? vector0 : 0.1; /* { dg-error {type mismatch in conditional expression} } */
0 ? bfloat_ptr : bfloat_ptr2;
- 0 ? bfloat_ptr : float_ptr; /* { dg-warning {pointer type mismatch in conditional expression} } */
- 0 ? float_ptr : bfloat_ptr; /* { dg-warning {pointer type mismatch in conditional expression} } */
+ 0 ? bfloat_ptr : float_ptr; /* { dg-error {pointer type mismatch in conditional expression} } */
+ 0 ? float_ptr : bfloat_ptr; /* { dg-error {pointer type mismatch in conditional expression} } */
vector0 ? vector0 : vector0; /* { dg-error {used vector type where scalar is required} } */
vector0 ? is_a_float16_vec : vector0; /* { dg-error {used vector type where scalar is required} } */
diff --git a/gcc/testsuite/gcc.target/loongarch/lasx-extract-even_odd-opt.c b/gcc/testsuite/gcc.target/loongarch/lasx-extract-even_odd-opt.c
new file mode 100644
index 0000000..515f0c8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/lasx-extract-even_odd-opt.c
@@ -0,0 +1,54 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -mlasx" } */
+/* { dg-final { scan-assembler "xvilvl.d" } } */
+/* { dg-final { scan-assembler "xvilvh.d" } } */
+
+#define CMUL(a, b, c) \
+ { \
+ (c).ai = (a).ai * (b).ai - (a).bi * (b).bi; \
+ (c).bi = (a).ai * (b).bi + (a).bi * (b).ai; \
+ (c).ci = (a).ci * (b).ci - (a).di * (b).di; \
+ (c).di = (a).ci * (b).di + (a).di * (b).ci; \
+ }
+#define CSUM(a, b) \
+ { \
+ (a).ai += (b).ai; \
+ (a).bi += (b).bi; \
+ (a).ci += (b).ci; \
+ (a).di += (b).di; \
+ }
+
+typedef struct
+{
+ double ai;
+ double bi;
+ double ci;
+ double di;
+} complex;
+
+typedef struct
+{
+ complex e[6][6];
+} matrix;
+
+typedef struct
+{
+ complex c[6];
+} vector;
+
+void
+mult_adj_mat_vec (matrix *a, vector *b, vector *c)
+{
+ register int i, j;
+ register complex x, y;
+ for (i = 0; i < 6; i++)
+ {
+ x.ai = x.bi = x.ci = x.di = 0.0;
+ for (j = 0; j < 6; j++)
+ {
+ CMUL (a->e[j][i], b->c[j], y);
+ CSUM (x, y);
+ }
+ c->c[i] = x;
+ }
+}
diff --git a/gcc/testsuite/gcc.target/loongarch/popcnt.c b/gcc/testsuite/gcc.target/loongarch/popcnt.c
new file mode 100644
index 0000000..a10fca4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/popcnt.c
@@ -0,0 +1,41 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mlsx" } */
+/* { dg-final { scan-assembler-not {popcount} } } */
+/* { dg-final { scan-assembler-times "vpcnt.d" 2 { target { loongarch64*-*-* } } } } */
+/* { dg-final { scan-assembler-times "vpcnt.w" 4 { target { loongarch64*-*-* } } } } */
+
+int
+foo (int x)
+{
+ return __builtin_popcount (x);
+}
+
+long
+foo1 (long x)
+{
+ return __builtin_popcountl (x);
+}
+
+long long
+foo2 (long long x)
+{
+ return __builtin_popcountll (x);
+}
+
+int
+foo3 (int *p)
+{
+ return __builtin_popcount (*p);
+}
+
+unsigned
+foo4 (int x)
+{
+ return __builtin_popcount (x);
+}
+
+unsigned long
+foo5 (int x)
+{
+ return __builtin_popcount (x);
+}
diff --git a/gcc/testsuite/gcc.target/loongarch/popcount.c b/gcc/testsuite/gcc.target/loongarch/popcount.c
new file mode 100644
index 0000000..390ff06
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/popcount.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mlsx -fdump-tree-optimized" } */
+/* { dg-final { scan-tree-dump-times "__builtin_popcount|\\.POPCOUNT" 1 "optimized" } } */
+
+int
+PopCount (long b)
+{
+ int c = 0;
+
+ while (b)
+ {
+ b &= b - 1;
+ c++;
+ }
+
+ return c;
+}
diff --git a/gcc/testsuite/gcc.target/loongarch/vect-frint-no-inexact.c b/gcc/testsuite/gcc.target/loongarch/vect-frint-no-inexact.c
new file mode 100644
index 0000000..7bbaf1f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/vect-frint-no-inexact.c
@@ -0,0 +1,48 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mabi=lp64d -mdouble-float -fno-math-errno -fno-fp-int-builtin-inexact -mlasx" } */
+
+#include "vect-frint.c"
+
+/* ceil */
+/* { dg-final { scan-assembler "bl\t%plt\\(ceil\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(ceilf\\)" } } */
+/* { dg-final { scan-assembler-not "\tvfrintrp\.s" } } */
+/* { dg-final { scan-assembler-not "\tvfrintrp\.d" } } */
+/* { dg-final { scan-assembler-not "\txvfrintrp\.s" } } */
+/* { dg-final { scan-assembler-not "\txvfrintrp\.d" } } */
+
+/* floor */
+/* { dg-final { scan-assembler "bl\t%plt\\(floor\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(floorf\\)" } } */
+/* { dg-final { scan-assembler-not "\tvfrintrm\.s" } } */
+/* { dg-final { scan-assembler-not "\tvfrintrm\.d" } } */
+/* { dg-final { scan-assembler-not "\txvfrintrm\.s" } } */
+/* { dg-final { scan-assembler-not "\txvfrintrm\.d" } } */
+
+/* nearbyint + rint: Only rint is allowed */
+/* { dg-final { scan-assembler "bl\t%plt\\(nearbyint\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(nearbyintf\\)" } } */
+/* { dg-final { scan-assembler-times "\tvfrint\.s" 1 } } */
+/* { dg-final { scan-assembler-times "\tvfrint\.d" 1 } } */
+/* { dg-final { scan-assembler-times "\txvfrint\.s" 1 } } */
+/* { dg-final { scan-assembler-times "\txvfrint\.d" 1 } } */
+
+/* round: we don't have a corresponding instruction */
+/* { dg-final { scan-assembler "bl\t%plt\\(round\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(roundf\\)" } } */
+
+/* roundeven */
+/* { dg-final { scan-assembler "bl\t%plt\\(roundeven\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(roundevenf\\)" } } */
+/* { dg-final { scan-assembler-not "\tvfrintrne\.s" } } */
+/* { dg-final { scan-assembler-not "\tvfrintrne\.d" } } */
+/* { dg-final { scan-assembler-not "\txvfrintrne\.s" } } */
+/* { dg-final { scan-assembler-not "\txvfrintrne\.d" } } */
+
+/* trunc */
+/* { dg-final { scan-assembler "bl\t%plt\\(trunc\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(truncf\\)" } } */
+/* { dg-final { scan-assembler-not "\tvfrintrz\.s" } } */
+/* { dg-final { scan-assembler-not "\tvfrintrz\.d" } } */
+/* { dg-final { scan-assembler-not "\txvfrintrz\.s" } } */
+/* { dg-final { scan-assembler-not "\txvfrintrz\.d" } } */
diff --git a/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c
new file mode 100644
index 0000000..002e3b9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mlsx -fno-fp-int-builtin-inexact" } */
+
+#include "vect-frint-scalar.c"
+
+/* cannot use LSX for these with -fno-fp-int-builtin-inexact,
+ call library function. */
+/* { dg-final { scan-assembler "\tb\t%plt\\(ceil\\)" } } */
+/* { dg-final { scan-assembler "\tb\t%plt\\(ceilf\\)" } } */
+/* { dg-final { scan-assembler "\tb\t%plt\\(floor\\)" } } */
+/* { dg-final { scan-assembler "\tb\t%plt\\(floorf\\)" } } */
+/* { dg-final { scan-assembler "\tb\t%plt\\(trunc\\)" } } */
+/* { dg-final { scan-assembler "\tb\t%plt\\(truncf\\)" } } */
+/* { dg-final { scan-assembler "\tb\t%plt\\(roundeven\\)" } } */
+/* { dg-final { scan-assembler "\tb\t%plt\\(roundevenf\\)" } } */
+
+/* nearbyint is not allowed to rasie FE_INEXACT for decades */
+/* { dg-final { scan-assembler "\tb\t%plt\\(nearbyint\\)" } } */
+/* { dg-final { scan-assembler "\tb\t%plt\\(nearbyintf\\)" } } */
+
+/* rint should just use basic FP operation */
+/* { dg-final { scan-assembler "\tfrint\.s" } } */
+/* { dg-final { scan-assembler "\tfrint\.d" } } */
diff --git a/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c
new file mode 100644
index 0000000..c7cb40b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c
@@ -0,0 +1,43 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mlsx" } */
+
+#define test(func, suffix) \
+__typeof__ (1.##suffix) \
+_##func##suffix (__typeof__ (1.##suffix) x) \
+{ \
+ return __builtin_##func##suffix (x); \
+}
+
+test (ceil, f)
+test (ceil, )
+test (floor, f)
+test (floor, )
+test (trunc, f)
+test (trunc, )
+test (roundeven, f)
+test (roundeven, )
+test (nearbyint, f)
+test (nearbyint, )
+test (rint, f)
+test (rint, )
+
+/* { dg-final { scan-assembler "\tvfrintrp\.s" } } */
+/* { dg-final { scan-assembler "\tvfrintrm\.s" } } */
+/* { dg-final { scan-assembler "\tvfrintrz\.s" } } */
+/* { dg-final { scan-assembler "\tvfrintrne\.s" } } */
+/* { dg-final { scan-assembler "\tvfrintrp\.d" } } */
+/* { dg-final { scan-assembler "\tvfrintrm\.d" } } */
+/* { dg-final { scan-assembler "\tvfrintrz\.d" } } */
+/* { dg-final { scan-assembler "\tvfrintrne\.d" } } */
+
+/* must do vreplvei first */
+/* { dg-final { scan-assembler-times "\tvreplvei\.w\t\\\$vr0,\\\$vr0,0" 4 } } */
+/* { dg-final { scan-assembler-times "\tvreplvei\.d\t\\\$vr0,\\\$vr0,0" 4 } } */
+
+/* nearbyint is not allowed to rasie FE_INEXACT for decades */
+/* { dg-final { scan-assembler "\tb\t%plt\\(nearbyint\\)" } } */
+/* { dg-final { scan-assembler "\tb\t%plt\\(nearbyintf\\)" } } */
+
+/* rint should just use basic FP operation */
+/* { dg-final { scan-assembler "\tfrint\.s" } } */
+/* { dg-final { scan-assembler "\tfrint\.d" } } */
diff --git a/gcc/testsuite/gcc.target/loongarch/vect-frint.c b/gcc/testsuite/gcc.target/loongarch/vect-frint.c
new file mode 100644
index 0000000..6bf211e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/vect-frint.c
@@ -0,0 +1,85 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mabi=lp64d -mdouble-float -fno-math-errno -ffp-int-builtin-inexact -mlasx" } */
+
+float out_x[8];
+double out_y[4];
+
+float x[8];
+double y[4];
+
+#define TEST(op, N, func) \
+void \
+test_##op##_##N##_##func () \
+{ \
+ for (int i = 0; i < N; i++) \
+ out_##op[i] = __builtin_##func (op[i]); \
+}
+
+TEST(x, 4, ceilf);
+TEST(x, 4, floorf);
+TEST(x, 4, nearbyintf);
+TEST(x, 4, rintf);
+TEST(x, 4, roundf);
+TEST(x, 4, roundevenf);
+TEST(x, 4, truncf);
+
+TEST(x, 8, ceilf);
+TEST(x, 8, floorf);
+TEST(x, 8, nearbyintf);
+TEST(x, 8, rintf);
+TEST(x, 8, roundf);
+TEST(x, 8, roundevenf);
+TEST(x, 8, truncf);
+
+TEST(y, 2, ceil);
+TEST(y, 2, floor);
+TEST(y, 2, nearbyint);
+TEST(y, 2, rint);
+TEST(y, 2, round);
+TEST(y, 2, roundeven);
+TEST(y, 2, trunc);
+
+TEST(y, 4, ceil);
+TEST(y, 4, floor);
+TEST(y, 4, nearbyint);
+TEST(y, 4, rint);
+TEST(y, 4, round);
+TEST(y, 4, roundeven);
+TEST(y, 4, trunc);
+
+/* ceil */
+/* { dg-final { scan-assembler "\tvfrintrp\.s" } } */
+/* { dg-final { scan-assembler "\tvfrintrp\.d" } } */
+/* { dg-final { scan-assembler "\txvfrintrp\.s" } } */
+/* { dg-final { scan-assembler "\txvfrintrp\.d" } } */
+
+/* floor */
+/* { dg-final { scan-assembler "\tvfrintrm\.s" } } */
+/* { dg-final { scan-assembler "\tvfrintrm\.d" } } */
+/* { dg-final { scan-assembler "\txvfrintrm\.s" } } */
+/* { dg-final { scan-assembler "\txvfrintrm\.d" } } */
+
+/* rint and nearbyint
+ nearbyint has been disallowed to raise FE_INEXACT for decades. */
+/* { dg-final { scan-assembler-times "\tvfrint\.s" 1 } } */
+/* { dg-final { scan-assembler-times "\tvfrint\.d" 1 } } */
+/* { dg-final { scan-assembler-times "\txvfrint\.s" 1 } } */
+/* { dg-final { scan-assembler-times "\txvfrint\.d" 1 } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(nearbyint\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(nearbyintf\\)" } } */
+
+/* round: we don't have a corresponding instruction */
+/* { dg-final { scan-assembler "bl\t%plt\\(round\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(roundf\\)" } } */
+
+/* roundeven */
+/* { dg-final { scan-assembler "\tvfrintrne\.s" } } */
+/* { dg-final { scan-assembler "\tvfrintrne\.d" } } */
+/* { dg-final { scan-assembler "\txvfrintrne\.s" } } */
+/* { dg-final { scan-assembler "\txvfrintrne\.d" } } */
+
+/* trunc */
+/* { dg-final { scan-assembler "\tvfrintrz\.s" } } */
+/* { dg-final { scan-assembler "\tvfrintrz\.d" } } */
+/* { dg-final { scan-assembler "\txvfrintrz\.s" } } */
+/* { dg-final { scan-assembler "\txvfrintrz\.d" } } */
diff --git a/gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c b/gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c
new file mode 100644
index 0000000..83d2680
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c
@@ -0,0 +1,44 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mabi=lp64d -mdouble-float -fno-math-errno -fno-fp-int-builtin-inexact -mlasx" } */
+
+#include "vect-ftint.c"
+
+/* ceil */
+/* { dg-final { scan-assembler "bl\t%plt\\(ceil\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(ceilf\\)" } } */
+/* { dg-final { scan-assembler-not "\tvftintrp\.w\.s" } } */
+/* { dg-final { scan-assembler-not "\tvftintrp\.l\.d" } } */
+/* { dg-final { scan-assembler-not "\txvftintrp\.w\.s" } } */
+/* { dg-final { scan-assembler-not "\txvftintrp\.l\.d" } } */
+
+/* floor */
+/* { dg-final { scan-assembler "bl\t%plt\\(floor\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(floorf\\)" } } */
+/* { dg-final { scan-assembler-not "\tvftintrm\.w\.s" } } */
+/* { dg-final { scan-assembler-not "\tvftintrm\.l\.d" } } */
+/* { dg-final { scan-assembler-not "\txvftintrm\.w\.s" } } */
+/* { dg-final { scan-assembler-not "\txvftintrm\.l\.d" } } */
+
+/* nearbyint + rint */
+/* { dg-final { scan-assembler "bl\t%plt\\(floor\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(floorf\\)" } } */
+/* { dg-final { scan-assembler-times "\tvftint\.w\.s" 1 } } */
+/* { dg-final { scan-assembler-times "\tvftint\.l\.d" 1 } } */
+/* { dg-final { scan-assembler-times "\txvftint\.w\.s" 1 } } */
+/* { dg-final { scan-assembler-times "\txvftint\.l\.d" 1 } } */
+
+/* round: we don't have a corresponding instruction */
+/* { dg-final { scan-assembler "bl\t%plt\\(lround\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(roundf\\)" } } */
+
+/* roundeven */
+/* { dg-final { scan-assembler "bl\t%plt\\(roundeven\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(roundevenf\\)" } } */
+/* { dg-final { scan-assembler-not "\tvftintrne\.w\.s" } } */
+/* { dg-final { scan-assembler-not "\tvftintrne\.l\.d" } } */
+/* { dg-final { scan-assembler-not "\txvftintrne\.w\.s" } } */
+/* { dg-final { scan-assembler-not "\txvftintrne\.l\.d" } } */
+
+/* trunc: XFAIL due to PR 107723 */
+/* { dg-final { scan-assembler "bl\t%plt\\(trunc\\)" { xfail *-*-* } } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(truncf\\)" } } */
diff --git a/gcc/testsuite/gcc.target/loongarch/vect-ftint.c b/gcc/testsuite/gcc.target/loongarch/vect-ftint.c
new file mode 100644
index 0000000..c4962ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/vect-ftint.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mabi=lp64d -mdouble-float -fno-math-errno -ffp-int-builtin-inexact -mlasx" } */
+
+int out_x[8];
+long out_y[4];
+
+float x[8];
+double y[4];
+
+#define TEST(op, N, func) \
+void \
+test_##op##_##N##_##func () \
+{ \
+ for (int i = 0; i < N; i++) \
+ out_##op[i] = __builtin_##func (op[i]); \
+}
+
+TEST(x, 4, ceilf);
+TEST(x, 4, floorf);
+TEST(x, 4, nearbyintf);
+TEST(x, 4, rintf);
+TEST(x, 4, roundf);
+TEST(x, 4, roundevenf);
+TEST(x, 4, truncf);
+
+TEST(x, 8, ceilf);
+TEST(x, 8, floorf);
+TEST(x, 8, nearbyintf);
+TEST(x, 8, rintf);
+TEST(x, 8, roundf);
+TEST(x, 8, roundevenf);
+TEST(x, 8, truncf);
+
+TEST(y, 2, ceil);
+TEST(y, 2, floor);
+TEST(y, 2, nearbyint);
+TEST(y, 2, rint);
+TEST(y, 2, round);
+TEST(y, 2, roundeven);
+TEST(y, 2, trunc);
+
+TEST(y, 4, ceil);
+TEST(y, 4, floor);
+TEST(y, 4, nearbyint);
+TEST(y, 4, rint);
+TEST(y, 4, round);
+TEST(y, 4, roundeven);
+TEST(y, 4, trunc);
+
+/* ceil */
+/* { dg-final { scan-assembler "\tvftintrp\.w\.s" } } */
+/* { dg-final { scan-assembler "\tvftintrp\.l\.d" } } */
+/* { dg-final { scan-assembler "\txvftintrp\.w\.s" } } */
+/* { dg-final { scan-assembler "\txvftintrp\.l\.d" } } */
+
+/* floor */
+/* { dg-final { scan-assembler "\tvftintrm\.w\.s" } } */
+/* { dg-final { scan-assembler "\tvftintrm\.l\.d" } } */
+/* { dg-final { scan-assembler "\txvftintrm\.w\.s" } } */
+/* { dg-final { scan-assembler "\txvftintrm\.l\.d" } } */
+
+/* rint and nearbyint
+ nearbyint has been disallowed to raise FE_INEXACT for decades. */
+/* { dg-final { scan-assembler-times "\tvftint\.w\.s" 1 } } */
+/* { dg-final { scan-assembler-times "\tvftint\.l\.d" 1 } } */
+/* { dg-final { scan-assembler-times "\txvftint\.w\.s" 1 } } */
+/* { dg-final { scan-assembler-times "\txvftint\.l\.d" 1 } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(nearbyint\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(nearbyintf\\)" } } */
+
+/* round: we don't have a corresponding instruction */
+/* { dg-final { scan-assembler "bl\t%plt\\(lround\\)" } } */
+/* { dg-final { scan-assembler "bl\t%plt\\(roundf\\)" } } */
+
+/* roundeven */
+/* { dg-final { scan-assembler "\tvftintrne\.w\.s" } } */
+/* { dg-final { scan-assembler "\tvftintrne\.l\.d" } } */
+/* { dg-final { scan-assembler "\txvftintrne\.w\.s" } } */
+/* { dg-final { scan-assembler "\txvftintrne\.l\.d" } } */
+
+/* trunc */
+/* { dg-final { scan-assembler-not "bl\t%plt\\(trunc\\)" } } */
+/* { dg-final { scan-assembler-not "bl\t%plt\\(truncf\\)" } } */
diff --git a/gcc/testsuite/gcc.target/loongarch/vect-muh.c b/gcc/testsuite/gcc.target/loongarch/vect-muh.c
new file mode 100644
index 0000000..a788840
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/vect-muh.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-mlasx -O3" } */
+/* { dg-final { scan-assembler "\tvmuh\.w\t" } } */
+/* { dg-final { scan-assembler "\tvmuh\.wu\t" } } */
+/* { dg-final { scan-assembler "\txvmuh\.w\t" } } */
+/* { dg-final { scan-assembler "\txvmuh\.wu\t" } } */
+
+int a[8], b[8], c[8];
+
+void
+test1 (void)
+{
+ for (int i = 0; i < 4; i++)
+ c[i] = ((long)a[i] * (long)b[i]) >> 32;
+}
+
+void
+test2 (void)
+{
+ for (int i = 0; i < 4; i++)
+ c[i] = ((long)(unsigned)a[i] * (long)(unsigned)b[i]) >> 32;
+}
+
+void
+test3 (void)
+{
+ for (int i = 0; i < 8; i++)
+ c[i] = ((long)a[i] * (long)b[i]) >> 32;
+}
+
+void
+test4 (void)
+{
+ for (int i = 0; i < 8; i++)
+ c[i] = ((long)(unsigned)a[i] * (long)(unsigned)b[i]) >> 32;
+}
diff --git a/gcc/testsuite/gcc.target/loongarch/vect-rotr.c b/gcc/testsuite/gcc.target/loongarch/vect-rotr.c
new file mode 100644
index 0000000..733c363
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/vect-rotr.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mlasx" } */
+/* { dg-final { scan-assembler "\tvrotr\.w\t" } } */
+/* { dg-final { scan-assembler "\txvrotr\.w\t" } } */
+/* { dg-final { scan-assembler "\tvrotri\.w\t\[^\n\]*7\n" } } */
+/* { dg-final { scan-assembler "\txvrotri\.w\t\[^\n\]*7\n" } } */
+
+unsigned int a[8], b[8];
+
+void
+test1 (void)
+{
+ for (int i = 0; i < 4; i++)
+ a[i] = a[i] >> b[i] | a[i] << (32 - b[i]);
+}
+
+void
+test2 (void)
+{
+ for (int i = 0; i < 8; i++)
+ a[i] = a[i] >> b[i] | a[i] << (32 - b[i]);
+}
+
+void
+test3 (void)
+{
+ for (int i = 0; i < 4; i++)
+ a[i] = a[i] >> 7 | a[i] << 25;
+}
+
+void
+test4 (void)
+{
+ for (int i = 0; i < 8; i++)
+ a[i] = a[i] >> 7 | a[i] << 25;
+}
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c
index 41fae32..5e15a12 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c
index bd7a906..fa0f9f6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c
index 2932957..82da734 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c
index d6b57d1..2c2701d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c
index 054bf6e..064b26f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c
index 70f3bf7..1600739 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c
index 22528a1..c45840e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c
index 38a0a53..567bc1f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c
index a4dc565..775b905 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c
index a2fbe9e..34721ad 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c
index 8c98fc4..30d52b0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c
index e485786..96ad473 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c
index 26cddc5..59d6a14 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c
index bc3590c..b2809d3 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c
index 5ce31eb..18d1862 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c
index d04e427..4a79277 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c
index 37b78aa..7e6a244 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c
index 3944a6a..f020cbe 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c
index def7b58..70c9288 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c
index 713eb19..7eee98f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c
index 2b0e7f8..a4f104e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c
index 2b8327d..967a01f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c
index c9847a6..4140805 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c
index 1edb4fc..b2532f5 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c
index c195cd9..ff9d030 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c
index 47f37e4..9081443 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c
index 3c1a8b8..7110423 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c
index 340f769..236b5b2 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c
index dbc52f9..927fa16 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c
index 89191c4..3e39c21 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c
index 0d7c677..e3cfe28 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c
index fd8b6d3..7154329 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c
index 94f3101..2e9e4b0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c
index d93201b..f6a098d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c
index 9fb4e3f..c64e6ca 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c
index fe6ff15..33ede4d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c
index c0d3e8e..7f59c76 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c
index 8c7ab4e..d9eee59 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c
index 8e61f1c..e4dc8bf 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c
index 657a19e..7cd7ad8 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c
index 4002c40..62ca8c9 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c
index 5d5b4c4..5a27330 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
/* { dg-timeout 500 } */
#include "../simd_correctness_check.h"
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c
index 888e85b..cae82f6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
/* { dg-timeout 500 } */
#include "../simd_correctness_check.h"
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c
index fa33723..1fe7c8b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c
index 6d6649f..d4c4aa1 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c
index a64dd75..1ca2fbd 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c
index 733cc00..0dffd68 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c
index 1907410..77ba5fc 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c
index 8dd58f2..954c757 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c
index 3230c10..98eb385 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c
index 23cbc4b..1427165 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c
index 6641d2c..e61e0e6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c
index d25fc25..24f4f20 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c
index 8210f74..f468d93 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c
index 9d015a5..29c128e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c
index a616810..29c080c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c
index 41f2749..eee5616 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c
index 116399a..8b6225d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c
index 001ce1c..7933ec5 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c
index dd04fd7..e0240cb 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c
index 3e2b155..c6f4aea 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c
index e310ff5..4d8e71b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c
index bba1a06..57a4cd2 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c
index b641c73..798c752 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c
index c85c94b..f5c49f9 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c
index bde41dd..d25bbe6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c
index 207ba16..eefa1e5 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c
index 9b77032..a9271e6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c
index 96bbb94..63605b8 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c
index c73a8a7..4b59e34 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c
index d161c85..0f6c5e4c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c
index c5e9576..3f45404 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c
index 4babf16..e65ded1 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
/* { dg-timeout 500 } */
#include "../simd_correctness_check.h"
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
index 9f2fa67..fbfe300 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
/* { dg-timeout 500 } */
#include "../simd_correctness_check.h"
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c
index 557f9f8..72b3fe0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c
index cdb7b11..cbb23e0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c
index 18d5c51..21f6172 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c
index 27df4a2..0a28716 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c
index c75468d..24b21ef 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c
index ad72f75..5a72994 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c
index 19db4e1..c02e00b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c
index b0fdf7e..f20ec5b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c
index 1cf0ec6..03a8856 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c
index 14ec081..9ee92aa 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c
index fa4d5fd..e5101a8 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c
index 87c3e25..685b76e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c
index 5a047a5..cbadbd3 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c
index 4393045..c78eb7f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c
index ce28c48..9e3cd70 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c
index 644d2ce..b356dd1 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c
index c1eda6c..f39a94a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c
index 84b3c65..51e4661 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c
index f9634b1..6a04e72 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c
index 6238685..5e5b35d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c
index 5fa0803..bfa095d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c
index 4054944..6a47045 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c
index 6838769..d456cbf 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c
index f9f88b6..7f1c40c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c
index 5210e4c..abe92a6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c
index 96c6671..4b8932a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c
index 38f2c0a..561d964 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c
index e804a0a..cc52343 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c
index b6b3406..2373c96 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c
index 7dbf335c1..9df0af7 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c
index 9eaa0e9..0eb03ac 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c
index 01aabad..6579978 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c
index 8eb7d93..7402ff6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c
index 6f34f6f..fd052cd 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c
index d0a9e9d..cb39dbb 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c
index 15e66ae..952725a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c
index 53b21f9..22aa6ab 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c
index 81865fd..6b48f8a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c
index 8c8d499..4e13f34 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c
index 58ad8bf..2e42c1d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c
index 85d24fe..2d420c2 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c
index be3c8e7..f14aa47 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c
index 01ff716..e09174d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c
index 32088f4..2a4c09c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c
index 19157f6..7afa6ad 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c
index 80fdcda..ad69c1e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c
index 1a4b221..27a7fdd 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c
index 9fcd3ce..c55d20d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c
index 3cd1626..fe17ef1 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c
index 3a491eca..2b8e622 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c
index 995a34c..8a8062a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c
index 27eef71..1164389 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c
index ee91af9..0341bde 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c
index fa6cdff..de7a208 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c
index 33b96d6..e839570 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c
index cdd20e8..ee33577 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c
index d2e742e..7d6be36 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c
index 66faa74..831247b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c
index a977880..65188ad 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c
index a2edbb8..d234066 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c
index 8bd3a82..2e18db1 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c
index 9346f9b..e9fc1d7 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
index 9346f9b..e9fc1d7 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c
index 81456bc..1685747 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c
index 7aa76c2..beeee76 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c
index a2bc2da..5643b91 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c
index 9346f9b..e9fc1d7 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c
index 21446e5..4943986 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c
index c1b8e17..24d508f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c
index 2a4f29b..cecac61 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c
index a3afc98..6cd4e05 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c
index b4ac502..29a4f5a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c
index e5ee89d..571145b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c
index 2a42386..41b9470 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c
index 5478d19..6c9b964 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c
index c8a00ca..6001681 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
index 641ea23..d8a29db 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c
index 2a6eee0..5137f5d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c
index ed752df..13f8c8c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c
index bc98b41..ef1784f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c
index 0671780..21f6813 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c
index 093d564..0adadaa 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c
index 7179e71..4a29276 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c
index 003e29b..50e9a9f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c
index ef3a47d..22a7a31 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c
index 76651af..4b68aeb 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c
index ca1f5e9..f44f083 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c
index 6864f5e..60278e2 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c
index 7dd2778..87d069d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c
index d93e431..9eefa78 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c
index 2bf9ae9..b4bda4d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c
index a51be89..871d024 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c
index e08934b..eba7c11 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c
index 44c20a9..9638248 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c
index fb47385..542b6fd 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c
index 63ba92e..cfd61ba 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c
index c145f7f..c847e28 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c
index b5c0fca..c0ce0dd 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c
index 1d591c3..8ac09a0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c
index e869670..dd0a09c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c
index d549910..42a6958 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c
index 0fb6483..a7acf35 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c
index 22e62a3..c4e1e14 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c
index 71f770a..4a2e147 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c
index cbc1de3..b17c7c4 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c
index 8fc7a00..bfca007 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c
index fdb0c25..4648f75 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c
index dd3c2c6..25482ae 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c
index 7848ddd..c284254 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c
index b1c16ba..c39002e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c
index 356eb21..09313d0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c
index 116bebb..6d53719 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c
index 9770610..c812a1b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c
index b55e388..2683355 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c
index ada72a1..dc187aa 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c
index f425238..b13ea88 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c
index 3c5e775..68a2cac 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c
index c1de1e8..e940491 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c
index a3c0de6..8a12726 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c
index caa72ca..ba535d1 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c
index 57d883c..0a1d027 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c
index 1687729..660c20d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c
index 8d6ed92..9710d12 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c
index 18b36c8..506e983 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c
index 8fd6298..da7203a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lasxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp b/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp
index 2c37aa9..d53bee5 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp
+++ b/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp
@@ -33,9 +33,32 @@ if ![info exists DEFAULT_CFLAGS] then {
#Initialize `dg'.
dg-init
+# If the target hardware supports LSX, the default action is "run", otherwise
+# just "compile".
+global dg-do-what-default
+if {[check_effective_target_loongarch_sx_hw]} then {
+ set dg-do-what-default run
+} else {
+ set dg-do-what-default compile
+}
+
#Main loop.
dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/lsx/*.\[cS\]]] \
" -mlsx" $DEFAULT_CFLAGS
+
+dg-finish
+
+dg-init
+# If the target hardware supports LASX, the default action is "run", otherwise
+# just "compile".
+
+global dg-do-what-default
+if {[check_effective_target_loongarch_asx_hw]} then {
+ set dg-do-what-default run
+} else {
+ set dg-do-what-default compile
+}
+
dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/lasx/*.\[cS\]]] \
" -mlasx" $DEFAULT_CFLAGS
# All done.
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c
index e336581..8790470 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c
index c1af80e..77e027b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c
index 7cfb989..e2c4f3a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c
index 4bb699e..c7ce0a7 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c
index 77afabe..23f28bc 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c
index b7b16a32..54503e2 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c
index a407cad..0b1e909 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c
index 4d5c609..eefd0be 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c
index 0ebe8c8..1016afe 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c
index 379517f..befbf70 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c
index 30dc835..9365d24 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c
index 1597749..374b8b0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c
index 906da69..ad4b5d3 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c
index 3ae2d76..e645b94 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c
index 2177ca3..0d7463e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c
index 1b0d879..bc16057 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c
index 4b72625..e494870 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c
index 22908b1..ff9907d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c
index 411dcaa..d663653 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c
index 5d7d66e..9017d15 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c
index ba4f4b6..5d6d1ef 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c
index 9739182..1f730a6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c
index 52ac993..2239b37 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c
index f2d6fb0..d581887 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c
index e05af67..a1737c5 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c
index 540a724..577fbeb 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c
index 34246c5..d60d843 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c
index 986b7d5..a8d0e0f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c
index 2c1099a..c386ed7 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c
index 12df2c6..aa3e54a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c
index cb4be04..36ee4b8 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c
index f2bc7df..7cf31e2 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c
index f639080..32db7a9 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c
index 6ab217e..78afaa8 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c
index 99854db..9985961 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c
index 73bb530..31a3b5e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c
index 8d4158b..e9187db 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c
index 7ffbd38..b4d65d6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c
index 3884302..83b013b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c
index 9706d7a..d570dcd 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c
index 7166f95..a3a5f44 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c
index b448c20..d38b6ab 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c
index 98941b4..74ff46f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c
index 409bce0..a40019e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c
index 39c9cf7..934169c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c
index c3da43b..c351daa 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c
index 5228dbe..8ca078c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c
index a2beff5..b57cf604 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c
index bfa4914..6d35a4a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c
index bc57393..0710110 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c
index 87cb8da7..dd41811 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c
index 3845e8e..5b2e8d6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c
index 964eff7..98a798c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c
index ea47baf..413a81c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c
index 68cb5a5..78c8f19 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c
index d4a86e2..4d71b07 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c
index e8f4f12..476782c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c
index 85db957..4a54fe1 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c
index f8839cf..bb4ac9d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c
index 9150e27..e12e953 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c
index cc36bf1..de5c461 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c
index 6245896..3556daa 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c
index c5de1ac..fa6ee6f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c
index 6b85e87..22a8f6b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c
index 442473f..bd942da 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c
index 8765888..a5e513c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c
index c2766d5..ab8265b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c
index 5fcdedd..8a09f61 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c
index 96b14aa..0d0475a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c
index bf8414b..58470ae 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c
index c60ff2b..0b10740 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
/* { dg-timeout 500 } */
#include "../simd_correctness_check.h"
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
index 12cb023..61f2832 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
/* { dg-timeout 500 } */
#include "../simd_correctness_check.h"
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c
index ac0ade8..30d6ed5 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c
index a2b110f..e74dfb0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c
index 8a35dfe..5bae5a6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c
index ffd8054..4a76ee6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c
index 8d0d566..5bf7536 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c
index 5dba807..ffbdb00 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c
index 7f6d2f4..d13f7d0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c
index 9c5bb91..2d6b923 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c
index af75f8e..ab3abf2 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c
index 37c769a..078d229 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c
index 0b51cb8..1999543 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c
index 26b51ee..3d9b1a81 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c
index aa802b2..aefcdb9 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c
index 88c66f2..4226f86 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c
index 2b9dcc0..c45d726 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c
index 7cd9abb..815ca0c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c
index 089500e..6ba93f7 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c
index 3fade51..3336930 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c
index d3fd83d..2f55309 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c
index 8392856..0a48f65 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c
index bab2c6c..091343e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c
index 5875aa5..42d873b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c
index 4be7fce..9f6aa3d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c
index 8a4c395..6b06e20 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c
index b0e22f9..c964629 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c
index 51a9a92..96db676 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c
index 7cff1d8..64c61f0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c
index b79af22..27c50bd 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c
index b2a7a35..d076ae8 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c
index c90cae7..c6e183f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c
index 772d040..e1e10cb 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c
index 6eaae21..c0e9a1a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c
index 5470d40..cade92d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c
index 8deb044..4ecfff1 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c
index 64a950f..7173052 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c
index 8f743ec..cfccbb7 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c
index d547af0..1cd2e7c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c
index 47cf33c..b4f171d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c
index ab650a0..8f63037 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c
index 60b6e35..78b745a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c
index 8ba6662..5f3c049 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c
index 8357f4e..9a949ef 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c
index e4afc82..a16b518 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c
index 346f031..5fbb48e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c
index 6eea49a..570bd1d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c
index f3e4e03..522f079 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c
index 9f5702e..62d1e34 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c
index 9441ba5..e077ce7 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c
index a7a3acc..80b2da4 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c
index a07a02a..fb43da2 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c
index 537a1bb..7686bcb 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c
index 8a6e035..d40b093 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c
index bb59bc3..6eb69cb 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c
index 030e87f..17a43bb 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c
index 783eeda..85ae43e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c
index 66982d8..0b0200e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c
index 58591f1..5fd4af8 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c
index 74269e31..e41c2f8 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c
index acca2be..5ec0a4d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c
index ef0ad67..36c9bf3 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c
index a5f02b1..2f16a34 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c
index 463adb4..6634b3a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c
index a81be76..157132c 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c
index c42440c..286fe93 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c
index 4ae4dbf..81b1654 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c
index 1bc27c9..3eda1f1 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c
index 67d1899..d08f844 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c
index cd8eefb..0cf4c66 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c
index 31e3919..d709dbd 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c
index 4362941..a031aae 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c
index c16a291..f33c4a8 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
index 4e7fcc0..8153964 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c
index cd441b8..ee4a7e5 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c
index 0fb1bc1..933cb3b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c
index a26eb0a..febb634 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c
index 15c6ced..80b2db3 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c
index 0e72a33..e78a8b0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c
index 685a1bb..361d41a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c
index 7b8ad7d..169627d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c
index 7a77e80..6a39783 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c
index 796e88c..985e32a 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c
index 5f46293..b20f92e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c
index 15c96cc..8ce161e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c
index e8d69f0..6f8ddd2 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c
index 5bf3ce6..442abf6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c
index 768df52..8dbba94 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c
index fd7c22a..1285aa8 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c
index 2ca4f0b..efccd18 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c
index 4e7c7ab..ad6dd09 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c
index 9298803..6cfec397 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c
index 6a842d9..b4ff772 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c
index 2a353d6..8ddc615 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c
index 60d4742..a0ecbc7 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c
index 3aa23bd..6abc66b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c
index f9c7898..9f59d5e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c
index 7b5e9a7..29e51a3 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c
index 5a8f4f7..28e8a3f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c
index ca462c8..94b58e6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c
index 211339b..ae9d885 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c
index 2c3a534..d18448e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c
index c630b42..639361d 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c
index 468a17c..11f19c2 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c
index e45ca36..5ab683f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c
index 7ffcecd..526fb15 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c
index a23ad7c..b3c0c37 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c
index 76fac97..7785e9f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c
index ed600c7..a07d5c5 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c
index 6136681..2189b81 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c
index ec688bb..e1a6330 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c
index 02f7ca0..7035d25 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c
index fc4cbb4..d7a5d7f 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c
index 0d59875..028664b 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c
index 8afdffa..ad0eef8 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c
index f5c82bc..01907dc 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c
index 37e0ccf..35cd761 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c
index f0d391a..358775e 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c
index 3b18bc1..986ead0 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c
index 39ebff1..9c86884 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c
index 62837f1..c762b88 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c
index 72fa971..0b9ba47 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c
index cc823d4..08ceab6 100644
--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c
+++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c
@@ -1,4 +1,3 @@
-/* { dg-do run } */
/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
#include "../simd_correctness_check.h"
#include <lsxintrin.h>
diff --git a/gcc/testsuite/gcc.target/nios2/cdx-ldstwm-1.c b/gcc/testsuite/gcc.target/nios2/cdx-ldstwm-1.c
index 7beeea1..6b7a7d0 100644
--- a/gcc/testsuite/gcc.target/nios2/cdx-ldstwm-1.c
+++ b/gcc/testsuite/gcc.target/nios2/cdx-ldstwm-1.c
@@ -1,5 +1,5 @@
/* { dg-do assemble } */
-/* { dg-options "-O3 -fomit-frame-pointer -funroll-all-loops -finline-functions -march=r2 -mcdx -w" } */
+/* { dg-options "-O3 -fomit-frame-pointer -funroll-all-loops -finline-functions -march=r2 -mcdx -w -fpermissive" } */
/* Based on gcc.c-torture/compile/920501-23.c.
This test used to result in assembler errors with R2 CDX because of
diff --git a/gcc/testsuite/gcc.target/nios2/cdx-ldstwm-2.c b/gcc/testsuite/gcc.target/nios2/cdx-ldstwm-2.c
index 0e69534..eb273bb 100644
--- a/gcc/testsuite/gcc.target/nios2/cdx-ldstwm-2.c
+++ b/gcc/testsuite/gcc.target/nios2/cdx-ldstwm-2.c
@@ -1,6 +1,9 @@
/* { dg-do assemble } */
/* { dg-options "-O3 -fomit-frame-pointer -funroll-loops -march=r2 -mcdx -w" } */
+extern void abort (void);
+extern int exit (int);
+
/* Based on gcc.c-torture/execute/20021120-1.c.
This test used to result in assembler errors with R2 CDX because of
a bug in regrename; it wasn't re-validating insns after renaming, so
diff --git a/gcc/testsuite/gcc.target/powerpc/conditional-return.c b/gcc/testsuite/gcc.target/powerpc/conditional-return.c
index 6b3ef5f..c6491216 100644
--- a/gcc/testsuite/gcc.target/powerpc/conditional-return.c
+++ b/gcc/testsuite/gcc.target/powerpc/conditional-return.c
@@ -1,7 +1,7 @@
/* Check that a conditional return is used. */
/* { dg-do compile } */
-/* { dg-options "-O2 -w" } */
+/* { dg-options "-O2 -fpermissive -w" } */
/* { dg-final { scan-assembler {\mbeqlr\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/arch-29.c b/gcc/testsuite/gcc.target/riscv/arch-29.c
new file mode 100644
index 0000000..f828127
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/arch-29.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64id_zcd_zcmt -mabi=lp64d" } */
+int foo()
+{
+}
+
+/* { dg-error "zcd conflicts with zcmt" "" { target *-*-* } 0 } */
diff --git a/gcc/testsuite/gcc.target/riscv/arch-30.c b/gcc/testsuite/gcc.target/riscv/arch-30.c
new file mode 100644
index 0000000..3e67ea0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/arch-30.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64id_zcd_zcmp -mabi=lp64d" } */
+int foo()
+{
+}
+
+/* { dg-error "zcd conflicts with zcmp" "" { target *-*-* } 0 } */
diff --git a/gcc/testsuite/gcc.target/riscv/mcpu-sifive-x280.c b/gcc/testsuite/gcc.target/riscv/mcpu-sifive-x280.c
new file mode 100644
index 0000000..be6e13f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/mcpu-sifive-x280.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-skip-if "-march given" { *-*-* } { "-march=*" } } */
+/* { dg-options "-mcpu=sifive-x280 -mabi=lp64" } */
+/* SiFive x280 => rv64imafdcv_zfh_zba_zbb_zvfh_zvl512b */
+
+#if !((__riscv_xlen == 64) \
+ && !defined(__riscv_32e) \
+ && (__riscv_flen == 64) \
+ && defined(__riscv_c) \
+ && defined(__riscv_zfh) \
+ && defined(__riscv_zvfh) \
+ && defined(__riscv_zvl512b) \
+ && defined(__riscv_v))
+#error "unexpected arch"
+#endif
+
+int main()
+{
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/predef-13.c b/gcc/testsuite/gcc.target/riscv/predef-13.c
index 3836255..93ebb33 100644
--- a/gcc/testsuite/gcc.target/riscv/predef-13.c
+++ b/gcc/testsuite/gcc.target/riscv/predef-13.c
@@ -19,7 +19,7 @@ int main () {
#error "__riscv_c"
#endif
-#if !defined(__riscv_e) || (__riscv_e != (1 * 1000 * 1000 + 9 * 1000))
+#if !defined(__riscv_e) || (__riscv_e != (2 * 1000 * 1000 + 0 * 1000))
#error "__riscv_e"
#endif
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/copysign-zvfh-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/copysign-zvfh-run.c
index 3bf64ab..e71b658 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/copysign-zvfh-run.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/copysign-zvfh-run.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "-std=c99 -fno-vect-cost-model --param=riscv-autovec-preference=fixed-vlmax -ffast-math" } */
#include "copysign-template.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vadd-zvfh-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vadd-zvfh-run.c
index 2a8618a..6c2d096 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vadd-zvfh-run.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vadd-zvfh-run.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "-std=c99 -fno-vect-cost-model --param=riscv-autovec-preference=fixed-vlmax -ffast-math" } */
#include "vadd-template.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-zvfh-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-zvfh-run.c
index 1b8e692..c9f9d83 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-zvfh-run.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-zvfh-run.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "-std=c99 -fno-vect-cost-model --param=riscv-autovec-preference=fixed-vlmax -ffast-math" } */
#include "vdiv-template.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmax-zvfh-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmax-zvfh-run.c
index ea9455a..85e19c1 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmax-zvfh-run.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmax-zvfh-run.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "-std=c99 -fno-vect-cost-model --param=riscv-autovec-preference=fixed-vlmax -ffast-math" } */
#include "vmax-template.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmin-zvfh-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmin-zvfh-run.c
index 7be92f5..b24d4f3 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmin-zvfh-run.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmin-zvfh-run.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "-std=c99 -fno-vect-cost-model --param=riscv-autovec-preference=fixed-vlmax -ffast-math" } */
#include "vmin-template.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmul-zvfh-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmul-zvfh-run.c
index 1082695..63bcf70 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmul-zvfh-run.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vmul-zvfh-run.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_zvfh } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "-std=c99 -fno-vect-cost-model --param=riscv-autovec-preference=fixed-vlmax -ffast-math" } */
#include "vmul-template.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_copysign-zvfh-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_copysign-zvfh-run.c
index bdf6eed..79a5130 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_copysign-zvfh-run.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/cond/cond_copysign-zvfh-run.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "-std=c99 -fno-vect-cost-model --param=riscv-autovec-preference=fixed-vlmax -ffast-math" } */
#include "cond_copysign-template.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112552.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112552.c
index 32d221c..4ef76cd 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112552.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112552.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O3 -march=rv64gcv -mabi=lp64d --param=riscv-autovec-preference=fixed-vlmax -w" } */
+/* { dg-options "-O3 -march=rv64gcv -mabi=lp64d --param=riscv-autovec-preference=fixed-vlmax -w -Wno-incompatible-pointer-types" } */
int a, c, d;
void (*b)();
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112694-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112694-2.c
new file mode 100644
index 0000000..b99cd45
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112694-2.c
@@ -0,0 +1,35 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zve64d_zvfh_zfh -mabi=lp64d -O3 -fno-vect-cost-model" } */
+
+long a[100], b[100], c[100];
+
+void g1 ()
+{
+ for (int i = 0; i < 100; i += 2)
+ {
+ c[i] += a[b[i]] + 1;
+ c[i + 1] += a[b[i + 1]] + 2;
+ }
+}
+
+long g2 ()
+{
+ long res = 0;
+ for (int i = 0; i < 100; i += 2)
+ {
+ res += a[b[i + 1]];
+ res += a[b[i]];
+ }
+ return res;
+}
+
+long g3 ()
+{
+ long res = 0;
+ for (int i = 0; i < 100; i += 2)
+ {
+ res += a[b[i]];
+ res += a[b[i + 1]];
+ }
+ return res;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112694-3.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112694-3.c
new file mode 100644
index 0000000..d65488b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112694-3.c
@@ -0,0 +1,37 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zve64d_zvfh_zfh -mabi=lp64d -fdiagnostics-plain-output -flto -ffat-lto-objects -ftree-vectorize -fno-tree-loop-distribute-patterns -fno-vect-cost-model -fno-common -O3" } */
+
+#define VECTOR_BITS 512
+#define N (VECTOR_BITS * 11 / 64 + 4)
+
+#define add(A, B) ((A) + (B))
+
+#define DEF(OP) \
+ void __attribute__ ((noipa)) \
+ f_##OP (double *restrict a, double *restrict b, double x) \
+ { \
+ for (int i = 0; i < N; i += 2) \
+ { \
+ a[i] = b[i] < 100 ? OP (b[i], x) : b[i]; \
+ a[i + 1] = b[i + 1] < 70 ? OP (b[i + 1], x) : b[i + 1]; \
+ } \
+ }
+
+#define TEST(OP) \
+ { \
+ f_##OP (a, b, 10); \
+ _Pragma("GCC novector") \
+ for (int i = 0; i < N; ++i) \
+ { \
+ int bval = (i % 17) * 10; \
+ int truev = OP (bval, 10); \
+ if (a[i] != (bval < (i & 1 ? 70 : 100) ? truev : bval)) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ }
+
+#define FOR_EACH_OP(T) \
+ T (add) \
+
+FOR_EACH_OP (DEF)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112801.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112801.c
new file mode 100644
index 0000000..eaf5c1c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112801.c
@@ -0,0 +1,36 @@
+/* { dg-do run } */
+/* { dg-options "-O3" } */
+/* { dg-require-effective-target rv64 } */
+/* { dg-require-effective-target riscv_v } */
+
+#include <assert.h>
+int a;
+void c(int b) { a = b; }
+char d;
+char *const e = &d;
+long f = 66483309998;
+unsigned long g[2];
+short h;
+int k;
+void __attribute__ ((noinline)) l() {
+ int i = 0;
+ for (; i < 2; i++) {
+ {
+ unsigned long *m = &g[0];
+ *m &= 2;
+ if (f && *e)
+ for (;;)
+ ;
+ }
+ k = f;
+ g[1] = k;
+ for (; h;)
+ ;
+ }
+}
+int main() {
+ l();
+ assert (g[1] == 2058800558);
+ c(g[1] >> 32);
+ assert (a == 0);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112851.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112851.c
new file mode 100644
index 0000000..ff2e4fa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112851.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv_zvl1024b -mabi=ilp32d -O3 -fomit-frame-pointer -funroll-loops -fpeel-loops -ftracer -finline-functions" } */
+
+int safe_lshift_func_int32_t_s_s_left, safe_lshift_func_int32_t_s_s_right,
+ safe_sub_func_uint64_t_u_u_ui2, safe_mul_func_uint64_t_u_u_ui2, g_79_2,
+ g_97_l_439;
+void g_97(int * __restrict l_437)
+{
+ for (; g_97_l_439; g_97_l_439 += 1)
+ for (char l_502 = 0; l_502 < 4; l_502++)
+ {
+ int __trans_tmp_14 = ((safe_lshift_func_int32_t_s_s_right >= 2
+ || safe_lshift_func_int32_t_s_s_left)
+ ? 1 : safe_lshift_func_int32_t_s_s_right);
+ long __trans_tmp_15 = __trans_tmp_14 * safe_mul_func_uint64_t_u_u_ui2;
+ unsigned short __trans_tmp_16 = -__trans_tmp_15;
+ int __trans_tmp_7
+ = (__trans_tmp_16 ^ 65535UL) - safe_sub_func_uint64_t_u_u_ui2;
+ *l_437 ^= (short)(__trans_tmp_7 ^ g_79_2);
+ }
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112852.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112852.c
new file mode 100644
index 0000000..2d6e27e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112852.c
@@ -0,0 +1,87 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl512b -mabi=lp64d -O3 -funroll-loops -ftracer" } */
+
+struct platform_device;
+typedef unsigned long __kernel_size_t;
+typedef unsigned short __u16;
+typedef unsigned int __u32;
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef __kernel_size_t size_t;
+typedef __u32 uint32_t;
+static inline __attribute__ ((always_inline))
+uint32_t __attribute__ ((pure)) bfin_dspid (void)
+{
+ return ( {
+ uint32_t __v; __v;}
+ );
+}
+struct list_head {
+ struct list_head *next, *prev;
+};
+struct page {
+ union {
+ };
+ struct list_head lru;
+};
+struct device_driver {
+ const char *name;
+ struct module *owner;
+};
+struct fb_info {
+ struct device *dev;
+};
+struct platform_driver {
+ int (*probe) (struct platform_device *);
+ int (*remove) (struct platform_device *);
+ struct device_driver driver;
+};
+struct firmware {
+ size_t size;
+ const u8 *data;
+};
+struct metronomefb_par {
+ struct fb_info *info;
+};
+struct waveform_hdr {
+ u8 trc;
+};
+static u8 calc_cksum (int start, int end, u8 * mem)
+{
+ u8 tmp = 0;
+ int i;
+ for (i = start; i < end; i++)
+ tmp += mem[i];
+ return tmp;
+}
+extern struct waveform_hdr *wfm_hdr;
+extern int wmta;
+
+static int
+load_waveform (u8 * mem, size_t size, int m, int t, struct metronomefb_par *par)
+{
+ int tta;
+ int trn = 0;
+ int i;
+ u8 cksum;
+ int cksum_idx;
+ struct device *dev = par->info->dev;
+ for (i = 0; i <= sizeof (*wfm_hdr) + wfm_hdr->trc; i++) {
+ if (mem[i] > t) {
+ trn = i - sizeof (*wfm_hdr) - 1;
+ }
+ }
+ tta = * (mem + wmta + m * 4) & 0x00FFFFFF;
+ cksum_idx = tta + trn * 4 + 3;
+ cksum = calc_cksum (cksum_idx - 3, cksum_idx, mem);
+ if (cksum != mem[cksum_idx]) {
+ __builtin_abort();
+ }
+}
+extern struct firmware *fw_entry;
+extern struct metronomefb_par *par;
+
+int metronomefb_probe (struct platform_device *dev)
+{
+ return load_waveform ((u8 *) fw_entry->data, fw_entry->size, 3, 31, par);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112854.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112854.c
new file mode 100644
index 0000000..8f7f13f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112854.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv_zvl1024b -mabi=ilp32d --param=riscv-autovec-preference=fixed-vlmax" } */
+
+short a, b;
+void c(int d) {
+ for (; a; a--) {
+ b = 0;
+ for (; b <= 8; b++)
+ if (d)
+ break;
+ }
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112855.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112855.c
new file mode 100644
index 0000000..f1fa669
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112855.c
@@ -0,0 +1,26 @@
+/* { dg-do run } */
+/* { dg-options "-O3" } */
+/* { dg-require-effective-target rv64 } */
+/* { dg-require-effective-target riscv_v } */
+
+#include <assert.h>
+int a;
+int b = 100;
+int c[25];
+int d;
+int main() {
+ int e;
+ d = 0;
+ for (; d < 5; d++) {
+ e = 0;
+ for (; e < 5; e++)
+ c[d * 5 + e] = 0;
+ }
+ if (b)
+ if (a)
+ for (;;)
+ ;
+ b++;
+ int volatile f = *c;
+ assert(b == 101);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112872.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112872.c
new file mode 100644
index 0000000..5c1d218
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112872.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvl1024b -mabi=lp64d --param=riscv-autovec-preference=fixed-vlmax -O3" } */
+
+int a, c;
+char b;
+short d;
+void e() {
+ for (; d; d++) {
+ for (; c;)
+ ;
+ b = 3;
+ for (; b; b = 0)
+ if (a)
+ break;
+ }
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/struct/struct_vect_run-10.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/struct/struct_vect_run-10.c
index 7903704..049280b 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/struct/struct_vect_run-10.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/struct/struct_vect_run-10.c
@@ -1,6 +1,9 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
-/* { dg-additional-options "-std=c99 --param=riscv-autovec-preference=scalable -fno-vect-cost-model" } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
+/* { dg-additional-options "-std=gnu99 --param=riscv-autovec-preference=scalable -fno-vect-cost-model" } */
#define TYPE _Float16
#define ITYPE int16_t
+
+/* Use a lower iteration count so we do not run into precision problems. */
+#define N 46
#include "struct_vect_run-6.c"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/struct/struct_vect_run-6.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/struct/struct_vect_run-6.c
index c096888..c836bcd 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/struct/struct_vect_run-6.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/struct/struct_vect_run-6.c
@@ -3,7 +3,9 @@
#include "struct_vect-6.c"
+#ifndef N
#define N 93
+#endif
TYPE a[N], b[N], c[N], d[N], a2[N], b2[N], c2[N], d2[N], e[N * 8];
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/abs-zvfh-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/abs-zvfh-run.c
index 65087d5..f0c00de 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/abs-zvfh-run.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/abs-zvfh-run.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "-std=c99 -fno-vect-cost-model --param=riscv-autovec-preference=fixed-vlmax -ffast-math" } */
#include "abs-template.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/vneg-zvfh-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/vneg-zvfh-run.c
index 64c965f..38c8c7a 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/vneg-zvfh-run.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/unop/vneg-zvfh-run.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "-std=c99 -fno-vect-cost-model --param=riscv-autovec-preference=fixed-vlmax -ffast-math" } */
#include "vneg-template.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-1.c
index c010c88..b9bc15f 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fdump-tree-optimized" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fdump-tree-optimized" } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-2.c
index ccbbb24..8c0bc20 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/consecutive-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fdump-tree-optimized" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fdump-tree-optimized" } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mod-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mod-1.c
index c8caf35..57bbf8f 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mod-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mod-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2 --param=riscv-autovec-lmul=m8" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 --param=riscv-autovec-lmul=m8 -fno-schedule-insns -fno-schedule-insns2 --param=riscv-autovec-lmul=m8" } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-1.c
index 24bb724..18dad34 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-10.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-10.c
index cae96b3..c199c33 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-10.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-10.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-11.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-11.c
index e2ca21e..4737008 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-11.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-11.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-12.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-12.c
index fc38e79..f61c372 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-12.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-12.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-13.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-13.c
index d51922e..56a7cf0 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-13.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-13.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-14.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-14.c
index 8ed8f6b..de49ed8 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-14.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-14.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-15.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-15.c
index f4ce5b3..bed6a47 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-15.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-15.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-16.c
index ff36d78..06ab31b 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-16.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-17.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-17.c
index 754771a..c2f0e3c 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-17.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-17.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-3.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-3.c
index 86ce228..77d3fed 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-3.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-3.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-5.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-5.c
index 0447520..5fae343 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-5.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-5.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-7.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-7.c
index d0674a4..c515f02 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-7.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-7.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-8.c
index b905c74..1164ab5 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-8.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-9.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-9.c
index 5f9bc05..404ef5d 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-9.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/mov-9.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-1.c
index 753fa25..842bb63 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-2.c
index e8fa54c..8f6ee81 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-3.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-3.c
index 86a404c..0f317d6 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-3.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-3.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-5.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-5.c
index 7bff6e3..b366a46 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-5.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-5.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-6.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-6.c
index 1e4eca3..d35e2a4 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-6.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/spill-6.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
#include "def.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-1.c
index 5661252..41c5734 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-1.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "--param=riscv-autovec-preference=scalable -ffast-math" } */
#include <assert.h>
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-10.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-10.c
index 1fcd836..99ceef0 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-10.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-10.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "--param=riscv-autovec-preference=scalable -ffast-math" } */
#include <assert.h>
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-11.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-11.c
index 8e73095..cec71f9 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-11.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-11.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "--param=riscv-autovec-preference=scalable -ffast-math" } */
#include <assert.h>
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-12.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-12.c
index 6f04595..4afdcba 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-12.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-12.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "--param=riscv-autovec-preference=scalable -ffast-math" } */
#include <assert.h>
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-2.c
index a3ddeb0..ffb8d7f 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-2.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "--param=riscv-autovec-preference=scalable -ffast-math" } */
#include <assert.h>
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-3.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-3.c
index 47a1803..5c23112 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-3.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-3.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "--param=riscv-autovec-preference=scalable -ffast-math" } */
#include <assert.h>
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-5.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-5.c
index a5eb476..a91a516 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-5.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-5.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "--param=riscv-autovec-preference=scalable -ffast-math" } */
#include <assert.h>
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-6.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-6.c
index 046d471..5b7f000 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-6.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-6.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "--param=riscv-autovec-preference=scalable -ffast-math" } */
#include <assert.h>
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-7.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-7.c
index d10017c..f01efa3 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-7.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-7.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "--param=riscv-autovec-preference=scalable -ffast-math" } */
#include <assert.h>
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-8.c
index 2b945f9..ed79ac8 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/widen/widen_run_zvfh-8.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { riscv_v && riscv_zvfh_hw } } } */
+/* { dg-do run { target { riscv_v && riscv_zvfh } } } */
/* { dg-additional-options "--param=riscv-autovec-preference=scalable -ffast-math" } */
#include <assert.h>
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/zve32f-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/zve32f-1.c
index e0a4a1f..ab57e89 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/zve32f-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/zve32f-1.c
@@ -3,4 +3,4 @@
#include "template-1.h"
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops in function" 4 "vect" } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops in function" 2 "vect" } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-1.c
index ae49706..adad5ab 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-1.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv32gcv -mabi=ilp32d -mmemcpy-strategy=libcall" } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -mstringop-strategy=libcall" } */
#include "cpymem-strategy.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-2.c
index 73ffc57..7a7c97d 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-2.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -march=rv32gcv -mabi=ilp32d -mmemcpy-strategy=scalar" } */
+/* { dg-options "-O2 -march=rv32gcv -mabi=ilp32d -mstringop-strategy=scalar" } */
#include "cpymem-strategy.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-3.c
index 44f5f78..83e5a83 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-3.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-3.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv32gcv -mabi=ilp32d -mmemcpy-strategy=vector" } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -mstringop-strategy=vector" } */
#include "cpymem-strategy.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-4.c b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-4.c
index 8056895..800549c 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-4.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-4.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv32gcv -mabi=ilp32d -mmemcpy-strategy=auto" } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -mstringop-strategy=auto" } */
#include "cpymem-strategy.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-5.c b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-5.c
index 82ecab0..134fd2e 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-5.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/cpymem-strategy-5.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv32gc -mabi=ilp32d -mmemcpy-strategy=vector" } */
+/* { dg-options "-march=rv32gc -mabi=ilp32d -mstringop-strategy=vector" } */
#include "cpymem-strategy.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-1.c
new file mode 100644
index 0000000..6b9a7c4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-1.c
@@ -0,0 +1,104 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m1_t v0 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v1 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v2 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v3 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v4 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v5 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v6 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v7 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v8 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v9 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v10 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v11 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v12 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v13 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v14 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v15 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m2_t vw0 = __riscv_vwcvt_x_x_v_i16m2 (v0, vl);
+ vint16m2_t vw1 = __riscv_vwcvt_x_x_v_i16m2 (v1, vl);
+ vint16m2_t vw2 = __riscv_vwcvt_x_x_v_i16m2 (v2, vl);
+ vint16m2_t vw3 = __riscv_vwcvt_x_x_v_i16m2 (v3, vl);
+ vint16m2_t vw4 = __riscv_vwcvt_x_x_v_i16m2 (v4, vl);
+ vint16m2_t vw5 = __riscv_vwcvt_x_x_v_i16m2 (v5, vl);
+ vint16m2_t vw6 = __riscv_vwcvt_x_x_v_i16m2 (v6, vl);
+ vint16m2_t vw7 = __riscv_vwcvt_x_x_v_i16m2 (v7, vl);
+ vint16m2_t vw8 = __riscv_vwcvt_x_x_v_i16m2 (v8, vl);
+ vint16m2_t vw9 = __riscv_vwcvt_x_x_v_i16m2 (v9, vl);
+ vint16m2_t vw10 = __riscv_vwcvt_x_x_v_i16m2 (v10, vl);
+ vint16m2_t vw11 = __riscv_vwcvt_x_x_v_i16m2 (v11, vl);
+ vint16m2_t vw12 = __riscv_vwcvt_x_x_v_i16m2 (v12, vl);
+ vint16m2_t vw13 = __riscv_vwcvt_x_x_v_i16m2 (v13, vl);
+ vint16m2_t vw14 = __riscv_vwcvt_x_x_v_i16m2 (v14, vl);
+ vint16m2_t vw15 = __riscv_vwcvt_x_x_v_i16m2 (v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m2_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m2_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m2_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m2_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m2_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m2_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m2_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m2_i16 (vw7);
+ size_t sum8 = __riscv_vmv_x_s_i16m2_i16 (vw8);
+ size_t sum9 = __riscv_vmv_x_s_i16m2_i16 (vw9);
+ size_t sum10 = __riscv_vmv_x_s_i16m2_i16 (vw10);
+ size_t sum11 = __riscv_vmv_x_s_i16m2_i16 (vw11);
+ size_t sum12 = __riscv_vmv_x_s_i16m2_i16 (vw12);
+ size_t sum13 = __riscv_vmv_x_s_i16m2_i16 (vw13);
+ size_t sum14 = __riscv_vmv_x_s_i16m2_i16 (vw14);
+ size_t sum15 = __riscv_vmv_x_s_i16m2_i16 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c
new file mode 100644
index 0000000..5f161b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c
@@ -0,0 +1,104 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7, double sum8, double sum9,
+ double sum10, double sum11, double sum12, double sum13, double sum14,
+ double sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint32m1_t v0 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v1 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v2 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v3 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v4 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v5 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v6 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v7 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v8 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v9 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v10 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v11 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v12 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v13 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v14 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+ vint32m1_t v15 = __riscv_vle32_v_i32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m2_t vw0 = __riscv_vfwcvt_f_x_v_f64m2 (v0, vl);
+ vfloat64m2_t vw1 = __riscv_vfwcvt_f_x_v_f64m2 (v1, vl);
+ vfloat64m2_t vw2 = __riscv_vfwcvt_f_x_v_f64m2 (v2, vl);
+ vfloat64m2_t vw3 = __riscv_vfwcvt_f_x_v_f64m2 (v3, vl);
+ vfloat64m2_t vw4 = __riscv_vfwcvt_f_x_v_f64m2 (v4, vl);
+ vfloat64m2_t vw5 = __riscv_vfwcvt_f_x_v_f64m2 (v5, vl);
+ vfloat64m2_t vw6 = __riscv_vfwcvt_f_x_v_f64m2 (v6, vl);
+ vfloat64m2_t vw7 = __riscv_vfwcvt_f_x_v_f64m2 (v7, vl);
+ vfloat64m2_t vw8 = __riscv_vfwcvt_f_x_v_f64m2 (v8, vl);
+ vfloat64m2_t vw9 = __riscv_vfwcvt_f_x_v_f64m2 (v9, vl);
+ vfloat64m2_t vw10 = __riscv_vfwcvt_f_x_v_f64m2 (v10, vl);
+ vfloat64m2_t vw11 = __riscv_vfwcvt_f_x_v_f64m2 (v11, vl);
+ vfloat64m2_t vw12 = __riscv_vfwcvt_f_x_v_f64m2 (v12, vl);
+ vfloat64m2_t vw13 = __riscv_vfwcvt_f_x_v_f64m2 (v13, vl);
+ vfloat64m2_t vw14 = __riscv_vfwcvt_f_x_v_f64m2 (v14, vl);
+ vfloat64m2_t vw15 = __riscv_vfwcvt_f_x_v_f64m2 (v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3);
+ double sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4);
+ double sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5);
+ double sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6);
+ double sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7);
+ double sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8);
+ double sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9);
+ double sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10);
+ double sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11);
+ double sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12);
+ double sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13);
+ double sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14);
+ double sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c
new file mode 100644
index 0000000..82827d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint32m2_t v0 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v1 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v2 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v3 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v4 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v5 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v6 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+ vint32m2_t v7 = __riscv_vle32_v_i32m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m4_t vw0 = __riscv_vfwcvt_f_x_v_f64m4 (v0, vl);
+ vfloat64m4_t vw1 = __riscv_vfwcvt_f_x_v_f64m4 (v1, vl);
+ vfloat64m4_t vw2 = __riscv_vfwcvt_f_x_v_f64m4 (v2, vl);
+ vfloat64m4_t vw3 = __riscv_vfwcvt_f_x_v_f64m4 (v3, vl);
+ vfloat64m4_t vw4 = __riscv_vfwcvt_f_x_v_f64m4 (v4, vl);
+ vfloat64m4_t vw5 = __riscv_vfwcvt_f_x_v_f64m4 (v5, vl);
+ vfloat64m4_t vw6 = __riscv_vfwcvt_f_x_v_f64m4 (v6, vl);
+ vfloat64m4_t vw7 = __riscv_vfwcvt_f_x_v_f64m4 (v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3);
+ double sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4);
+ double sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5);
+ double sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6);
+ double sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c
new file mode 100644
index 0000000..c4ae607
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint32m4_t v0 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+ it += vl;
+ vint32m4_t v1 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+ it += vl;
+ vint32m4_t v2 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+ it += vl;
+ vint32m4_t v3 = __riscv_vle32_v_i32m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m8_t vw0 = __riscv_vfwcvt_f_x_v_f64m8 (v0, vl);
+ vfloat64m8_t vw1 = __riscv_vfwcvt_f_x_v_f64m8 (v1, vl);
+ vfloat64m8_t vw2 = __riscv_vfwcvt_f_x_v_f64m8 (v2, vl);
+ vfloat64m8_t vw3 = __riscv_vfwcvt_f_x_v_f64m8 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c
new file mode 100644
index 0000000..fde7076
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c
@@ -0,0 +1,188 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7, double sum8, double sum9,
+ double sum10, double sum11, double sum12, double sum13, double sum14,
+ double sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+ vint64m2_t vw4 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v4, vl);
+ vint64m2_t vw5 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v5, vl);
+ vint64m2_t vw6 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v6, vl);
+ vint64m2_t vw7 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v7, vl);
+ vint64m2_t vw8 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v8, vl);
+ vint64m2_t vw9 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v9, vl);
+ vint64m2_t vw10 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v10, vl);
+ vint64m2_t vw11 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v11, vl);
+ vint64m2_t vw12 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v12, vl);
+ vint64m2_t vw13 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v13, vl);
+ vint64m2_t vw14 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v14, vl);
+ vint64m2_t vw15 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+ double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+ double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+ double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+ double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+ double sum8 = __riscv_vmv_x_s_i64m2_i64 (vw8);
+ double sum9 = __riscv_vmv_x_s_i64m2_i64 (vw9);
+ double sum10 = __riscv_vmv_x_s_i64m2_i64 (vw10);
+ double sum11 = __riscv_vmv_x_s_i64m2_i64 (vw11);
+ double sum12 = __riscv_vmv_x_s_i64m2_i64 (vw12);
+ double sum13 = __riscv_vmv_x_s_i64m2_i64 (vw13);
+ double sum14 = __riscv_vmv_x_s_i64m2_i64 (vw14);
+ double sum15 = __riscv_vmv_x_s_i64m2_i64 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+ vint64m2_t vw4 = __riscv_vfwcvt_x_f_v_i64m2 (v4, vl);
+ vint64m2_t vw5 = __riscv_vfwcvt_x_f_v_i64m2 (v5, vl);
+ vint64m2_t vw6 = __riscv_vfwcvt_x_f_v_i64m2 (v6, vl);
+ vint64m2_t vw7 = __riscv_vfwcvt_x_f_v_i64m2 (v7, vl);
+ vint64m2_t vw8 = __riscv_vfwcvt_x_f_v_i64m2 (v8, vl);
+ vint64m2_t vw9 = __riscv_vfwcvt_x_f_v_i64m2 (v9, vl);
+ vint64m2_t vw10 = __riscv_vfwcvt_x_f_v_i64m2 (v10, vl);
+ vint64m2_t vw11 = __riscv_vfwcvt_x_f_v_i64m2 (v11, vl);
+ vint64m2_t vw12 = __riscv_vfwcvt_x_f_v_i64m2 (v12, vl);
+ vint64m2_t vw13 = __riscv_vfwcvt_x_f_v_i64m2 (v13, vl);
+ vint64m2_t vw14 = __riscv_vfwcvt_x_f_v_i64m2 (v14, vl);
+ vint64m2_t vw15 = __riscv_vfwcvt_x_f_v_i64m2 (v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+ double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+ double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+ double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+ double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+ double sum8 = __riscv_vmv_x_s_i64m2_i64 (vw8);
+ double sum9 = __riscv_vmv_x_s_i64m2_i64 (vw9);
+ double sum10 = __riscv_vmv_x_s_i64m2_i64 (vw10);
+ double sum11 = __riscv_vmv_x_s_i64m2_i64 (vw11);
+ double sum12 = __riscv_vmv_x_s_i64m2_i64 (vw12);
+ double sum13 = __riscv_vmv_x_s_i64m2_i64 (vw13);
+ double sum14 = __riscv_vmv_x_s_i64m2_i64 (vw14);
+ double sum15 = __riscv_vmv_x_s_i64m2_i64 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c
new file mode 100644
index 0000000..535ea7c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c
@@ -0,0 +1,119 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+ vint64m2_t vw4 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v4, vl);
+ vint64m2_t vw5 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v5, vl);
+ vint64m2_t vw6 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v6, vl);
+ vint64m2_t vw7 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+ double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+ double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+ double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+ double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+ vint64m2_t vw4 = __riscv_vfwcvt_x_f_v_i64m2 (v4, vl);
+ vint64m2_t vw5 = __riscv_vfwcvt_x_f_v_i64m2 (v5, vl);
+ vint64m2_t vw6 = __riscv_vfwcvt_x_f_v_i64m2 (v6, vl);
+ vint64m2_t vw7 = __riscv_vfwcvt_x_f_v_i64m2 (v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+ double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4);
+ double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5);
+ double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6);
+ double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c
new file mode 100644
index 0000000..3d46e4a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c
@@ -0,0 +1,86 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+double
+foo2 (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl);
+ vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl);
+ vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl);
+ vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0);
+ double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1);
+ double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2);
+ double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-16.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-16.c
new file mode 100644
index 0000000..98f4245
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-16.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m1_t v0 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v1 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v2 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v3 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v4 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v5 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v6 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v7 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint32m4_t vw0 = __riscv_vsext_vf4_i32m4 (v0, vl);
+ vint32m4_t vw1 = __riscv_vsext_vf4_i32m4 (v1, vl);
+ vint32m4_t vw2 = __riscv_vsext_vf4_i32m4 (v2, vl);
+ vint32m4_t vw3 = __riscv_vsext_vf4_i32m4 (v3, vl);
+ vint32m4_t vw4 = __riscv_vsext_vf4_i32m4 (v4, vl);
+ vint32m4_t vw5 = __riscv_vsext_vf4_i32m4 (v5, vl);
+ vint32m4_t vw6 = __riscv_vsext_vf4_i32m4 (v6, vl);
+ vint32m4_t vw7 = __riscv_vsext_vf4_i32m4 (v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i32m4_i32 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i32m4_i32 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i32m4_i32 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i32m4_i32 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i32m4_i32 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i32m4_i32 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i32m4_i32 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i32m4_i32 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-17.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-17.c
new file mode 100644
index 0000000..9b60005
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-17.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m2_t v0 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v1 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v2 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v3 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint32m8_t vw0 = __riscv_vsext_vf4_i32m8 (v0, vl);
+ vint32m8_t vw1 = __riscv_vsext_vf4_i32m8 (v1, vl);
+ vint32m8_t vw2 = __riscv_vsext_vf4_i32m8 (v2, vl);
+ vint32m8_t vw3 = __riscv_vsext_vf4_i32m8 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i32m8_i32 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i32m8_i32 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i32m8_i32 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i32m8_i32 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-18.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-18.c
new file mode 100644
index 0000000..dd65b2f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-18.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m1_t v0 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v1 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v2 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v3 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m8_t vw0 = __riscv_vsext_vf8_i64m8 (v0, vl);
+ vint64m8_t vw1 = __riscv_vsext_vf8_i64m8 (v1, vl);
+ vint64m8_t vw2 = __riscv_vsext_vf8_i64m8 (v2, vl);
+ vint64m8_t vw3 = __riscv_vsext_vf8_i64m8 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i64m8_i64 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i64m8_i64 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i64m8_i64 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i64m8_i64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-19.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-19.c
new file mode 100644
index 0000000..affe1aa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-19.c
@@ -0,0 +1,103 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t __attribute__ ((noinline))
+foo (short const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = 4;
+ const short *it = buf;
+ for (int i = 0; i < len; i++)
+ {
+ vint16m2_t v0 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v1 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v2 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v3 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v4 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v5 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v6 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v7 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v8 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v9 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v10 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v11 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v12 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v13 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v14 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v15 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m2_t vw0 = __riscv_vwadd_wx_i16m2 (v0, 55, vl);
+ vint16m2_t vw1 = __riscv_vwadd_wx_i16m2 (v1, 55, vl);
+ vint16m2_t vw2 = __riscv_vwadd_wx_i16m2 (v2, 55, vl);
+ vint16m2_t vw3 = __riscv_vwadd_wx_i16m2 (v3, 55, vl);
+ vint16m2_t vw4 = __riscv_vwadd_wx_i16m2 (v4, 55, vl);
+ vint16m2_t vw5 = __riscv_vwadd_wx_i16m2 (v5, 55, vl);
+ vint16m2_t vw6 = __riscv_vwadd_wx_i16m2 (v6, 55, vl);
+ vint16m2_t vw7 = __riscv_vwadd_wx_i16m2 (v7, 55, vl);
+ vint16m2_t vw8 = __riscv_vwadd_wx_i16m2 (v8, 55, vl);
+ vint16m2_t vw9 = __riscv_vwadd_wx_i16m2 (v9, 55, vl);
+ vint16m2_t vw10 = __riscv_vwadd_wx_i16m2 (v10, 55, vl);
+ vint16m2_t vw11 = __riscv_vwadd_wx_i16m2 (v11, 55, vl);
+ vint16m2_t vw12 = __riscv_vwadd_wx_i16m2 (v12, 55, vl);
+ vint16m2_t vw13 = __riscv_vwadd_wx_i16m2 (v13, 55, vl);
+ vint16m2_t vw14 = __riscv_vwadd_wx_i16m2 (v14, 55, vl);
+ vint16m2_t vw15 = __riscv_vwadd_wx_i16m2 (v15, 55, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m2_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m2_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m2_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m2_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m2_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m2_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m2_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m2_i16 (vw7);
+ size_t sum8 = __riscv_vmv_x_s_i16m2_i16 (vw8);
+ size_t sum9 = __riscv_vmv_x_s_i16m2_i16 (vw9);
+ size_t sum10 = __riscv_vmv_x_s_i16m2_i16 (vw10);
+ size_t sum11 = __riscv_vmv_x_s_i16m2_i16 (vw11);
+ size_t sum12 = __riscv_vmv_x_s_i16m2_i16 (vw12);
+ size_t sum13 = __riscv_vmv_x_s_i16m2_i16 (vw13);
+ size_t sum14 = __riscv_vmv_x_s_i16m2_i16 (vw14);
+ size_t sum15 = __riscv_vmv_x_s_i16m2_i16 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-2.c
new file mode 100644
index 0000000..da92d59
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-2.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m2_t v0 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v1 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v2 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v3 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v4 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v5 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v6 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v7 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m4_t vw0 = __riscv_vwcvt_x_x_v_i16m4 (v0, vl);
+ vint16m4_t vw1 = __riscv_vwcvt_x_x_v_i16m4 (v1, vl);
+ vint16m4_t vw2 = __riscv_vwcvt_x_x_v_i16m4 (v2, vl);
+ vint16m4_t vw3 = __riscv_vwcvt_x_x_v_i16m4 (v3, vl);
+ vint16m4_t vw4 = __riscv_vwcvt_x_x_v_i16m4 (v4, vl);
+ vint16m4_t vw5 = __riscv_vwcvt_x_x_v_i16m4 (v5, vl);
+ vint16m4_t vw6 = __riscv_vwcvt_x_x_v_i16m4 (v6, vl);
+ vint16m4_t vw7 = __riscv_vwcvt_x_x_v_i16m4 (v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m4_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m4_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m4_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m4_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m4_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m4_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m4_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m4_i16 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-20.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-20.c
new file mode 100644
index 0000000..72f3644
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-20.c
@@ -0,0 +1,103 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_zvfh_zfh -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t __attribute__ ((noinline))
+foo (float const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = 4;
+ const float *it = buf;
+ for (int i = 0; i < len; i++)
+ {
+ vfloat32m2_t v0 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v1 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v2 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v3 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v4 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v5 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v6 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v7 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v8 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v9 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v10 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v11 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v12 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v13 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v14 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+ vfloat32m2_t v15 = __riscv_vle32_v_f32m2 (it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat32m2_t vw0 = __riscv_vfwadd_wf_f32m2 (v0, 55, vl);
+ vfloat32m2_t vw1 = __riscv_vfwadd_wf_f32m2 (v1, 55, vl);
+ vfloat32m2_t vw2 = __riscv_vfwadd_wf_f32m2 (v2, 55, vl);
+ vfloat32m2_t vw3 = __riscv_vfwadd_wf_f32m2 (v3, 55, vl);
+ vfloat32m2_t vw4 = __riscv_vfwadd_wf_f32m2 (v4, 55, vl);
+ vfloat32m2_t vw5 = __riscv_vfwadd_wf_f32m2 (v5, 55, vl);
+ vfloat32m2_t vw6 = __riscv_vfwadd_wf_f32m2 (v6, 55, vl);
+ vfloat32m2_t vw7 = __riscv_vfwadd_wf_f32m2 (v7, 55, vl);
+ vfloat32m2_t vw8 = __riscv_vfwadd_wf_f32m2 (v8, 55, vl);
+ vfloat32m2_t vw9 = __riscv_vfwadd_wf_f32m2 (v9, 55, vl);
+ vfloat32m2_t vw10 = __riscv_vfwadd_wf_f32m2 (v10, 55, vl);
+ vfloat32m2_t vw11 = __riscv_vfwadd_wf_f32m2 (v11, 55, vl);
+ vfloat32m2_t vw12 = __riscv_vfwadd_wf_f32m2 (v12, 55, vl);
+ vfloat32m2_t vw13 = __riscv_vfwadd_wf_f32m2 (v13, 55, vl);
+ vfloat32m2_t vw14 = __riscv_vfwadd_wf_f32m2 (v14, 55, vl);
+ vfloat32m2_t vw15 = __riscv_vfwadd_wf_f32m2 (v15, 55, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vfmv_f_s_f32m2_f32 (vw0);
+ size_t sum1 = __riscv_vfmv_f_s_f32m2_f32 (vw1);
+ size_t sum2 = __riscv_vfmv_f_s_f32m2_f32 (vw2);
+ size_t sum3 = __riscv_vfmv_f_s_f32m2_f32 (vw3);
+ size_t sum4 = __riscv_vfmv_f_s_f32m2_f32 (vw4);
+ size_t sum5 = __riscv_vfmv_f_s_f32m2_f32 (vw5);
+ size_t sum6 = __riscv_vfmv_f_s_f32m2_f32 (vw6);
+ size_t sum7 = __riscv_vfmv_f_s_f32m2_f32 (vw7);
+ size_t sum8 = __riscv_vfmv_f_s_f32m2_f32 (vw8);
+ size_t sum9 = __riscv_vfmv_f_s_f32m2_f32 (vw9);
+ size_t sum10 = __riscv_vfmv_f_s_f32m2_f32 (vw10);
+ size_t sum11 = __riscv_vfmv_f_s_f32m2_f32 (vw11);
+ size_t sum12 = __riscv_vfmv_f_s_f32m2_f32 (vw12);
+ size_t sum13 = __riscv_vfmv_f_s_f32m2_f32 (vw13);
+ size_t sum14 = __riscv_vfmv_f_s_f32m2_f32 (vw14);
+ size_t sum15 = __riscv_vfmv_f_s_f32m2_f32 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-21.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-21.c
new file mode 100644
index 0000000..3e43c94
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-21.c
@@ -0,0 +1,106 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-O3 -ansi -pedantic-errors -std=gnu99" } */
+
+#include <riscv_vector.h>
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t __attribute__ ((noinline))
+foo (short const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = 4;
+ const short *it = buf;
+ for (int i = 0; i < len; i++)
+ {
+ vint16m2_t v0 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v1 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v2 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v3 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v4 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v5 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v6 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v7 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v8 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v9 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v10 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v11 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v12 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v13 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v14 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+ vint16m2_t v15 = __riscv_vle16_v_i16m2 (it, vl);
+ it += vl;
+
+ asm volatile ("" ::: "memory");
+ vint16m2_t vw0 = __riscv_vwadd_wx_i16m2 (v0, 55, vl);
+ vint16m2_t vw1 = __riscv_vwadd_wx_i16m2 (v1, 55, vl);
+ vint16m2_t vw2 = __riscv_vwadd_wx_i16m2 (v2, 55, vl);
+ vint16m2_t vw3 = __riscv_vwadd_wx_i16m2 (v3, 55, vl);
+ vint16m2_t vw4 = __riscv_vwadd_wx_i16m2 (v4, 55, vl);
+ vint16m2_t vw5 = __riscv_vwadd_wx_i16m2 (v5, 55, vl);
+ vint16m2_t vw6 = __riscv_vwadd_wx_i16m2 (v6, 55, vl);
+ vint16m2_t vw7 = __riscv_vwadd_wx_i16m2 (v7, 55, vl);
+ vint16m2_t vw8 = __riscv_vwadd_wx_i16m2 (v8, 55, vl);
+ vint16m2_t vw9 = __riscv_vwadd_wx_i16m2 (v9, 55, vl);
+ vint16m2_t vw10 = __riscv_vwadd_wx_i16m2 (v10, 55, vl);
+ vint16m2_t vw11 = __riscv_vwadd_wx_i16m2 (v11, 55, vl);
+ vint16m2_t vw12 = __riscv_vwadd_wx_i16m2 (v12, 55, vl);
+ vint16m2_t vw13 = __riscv_vwadd_wx_i16m2 (v13, 55, vl);
+ vint16m2_t vw14 = __riscv_vwadd_wx_i16m2 (v14, 55, vl);
+ vint16m2_t vw15 = __riscv_vwadd_wx_i16m2 (v15, 55, vl);
+
+ asm volatile ("" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m2_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m2_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m2_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m2_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m2_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m2_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m2_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m2_i16 (vw7);
+ size_t sum8 = __riscv_vmv_x_s_i16m2_i16 (vw8);
+ size_t sum9 = __riscv_vmv_x_s_i16m2_i16 (vw9);
+ size_t sum10 = __riscv_vmv_x_s_i16m2_i16 (vw10);
+ size_t sum11 = __riscv_vmv_x_s_i16m2_i16 (vw11);
+ size_t sum12 = __riscv_vmv_x_s_i16m2_i16 (vw12);
+ size_t sum13 = __riscv_vmv_x_s_i16m2_i16 (vw13);
+ size_t sum14 = __riscv_vmv_x_s_i16m2_i16 (vw14);
+ size_t sum15 = __riscv_vmv_x_s_i16m2_i16 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+int
+main (int in, char **out)
+{
+ short const buf[1000];
+ int i = foo (buf, 4);
+ **out = i;
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-22.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-22.c
new file mode 100644
index 0000000..90db182
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-22.c
@@ -0,0 +1,188 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m1_t v0 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v1 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v2 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v3 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v4 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v5 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v6 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v7 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v8 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v9 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v10 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v11 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v12 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v13 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v14 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v15 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m2_t vw0 = __riscv_vwadd_vx_i16m2 (v0, 33, vl);
+ vint16m2_t vw1 = __riscv_vwadd_vx_i16m2 (v1, 33, vl);
+ vint16m2_t vw2 = __riscv_vwadd_vx_i16m2 (v2, 33, vl);
+ vint16m2_t vw3 = __riscv_vwadd_vx_i16m2 (v3, 33, vl);
+ vint16m2_t vw4 = __riscv_vwadd_vx_i16m2 (v4, 33, vl);
+ vint16m2_t vw5 = __riscv_vwadd_vx_i16m2 (v5, 33, vl);
+ vint16m2_t vw6 = __riscv_vwadd_vx_i16m2 (v6, 33, vl);
+ vint16m2_t vw7 = __riscv_vwadd_vx_i16m2 (v7, 33, vl);
+ vint16m2_t vw8 = __riscv_vwadd_vx_i16m2 (v8, 33, vl);
+ vint16m2_t vw9 = __riscv_vwadd_vx_i16m2 (v9, 33, vl);
+ vint16m2_t vw10 = __riscv_vwadd_vx_i16m2 (v10, 33, vl);
+ vint16m2_t vw11 = __riscv_vwadd_vx_i16m2 (v11, 33, vl);
+ vint16m2_t vw12 = __riscv_vwadd_vx_i16m2 (v12, 33, vl);
+ vint16m2_t vw13 = __riscv_vwadd_vx_i16m2 (v13, 33, vl);
+ vint16m2_t vw14 = __riscv_vwadd_vx_i16m2 (v14, 33, vl);
+ vint16m2_t vw15 = __riscv_vwadd_vx_i16m2 (v15, 33, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m2_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m2_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m2_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m2_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m2_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m2_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m2_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m2_i16 (vw7);
+ size_t sum8 = __riscv_vmv_x_s_i16m2_i16 (vw8);
+ size_t sum9 = __riscv_vmv_x_s_i16m2_i16 (vw9);
+ size_t sum10 = __riscv_vmv_x_s_i16m2_i16 (vw10);
+ size_t sum11 = __riscv_vmv_x_s_i16m2_i16 (vw11);
+ size_t sum12 = __riscv_vmv_x_s_i16m2_i16 (vw12);
+ size_t sum13 = __riscv_vmv_x_s_i16m2_i16 (vw13);
+ size_t sum14 = __riscv_vmv_x_s_i16m2_i16 (vw14);
+ size_t sum15 = __riscv_vmv_x_s_i16m2_i16 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+size_t
+foo2 (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m1_t v0 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v1 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v2 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v3 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v4 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v5 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v6 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v7 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v8 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v9 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v10 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v11 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v12 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v13 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v14 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v15 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m2_t vw0 = __riscv_vwmulsu_vx_i16m2 (v0, 33, vl);
+ vint16m2_t vw1 = __riscv_vwmulsu_vx_i16m2 (v1, 33, vl);
+ vint16m2_t vw2 = __riscv_vwmulsu_vx_i16m2 (v2, 33, vl);
+ vint16m2_t vw3 = __riscv_vwmulsu_vx_i16m2 (v3, 33, vl);
+ vint16m2_t vw4 = __riscv_vwmulsu_vx_i16m2 (v4, 33, vl);
+ vint16m2_t vw5 = __riscv_vwmulsu_vx_i16m2 (v5, 33, vl);
+ vint16m2_t vw6 = __riscv_vwmulsu_vx_i16m2 (v6, 33, vl);
+ vint16m2_t vw7 = __riscv_vwmulsu_vx_i16m2 (v7, 33, vl);
+ vint16m2_t vw8 = __riscv_vwmulsu_vx_i16m2 (v8, 33, vl);
+ vint16m2_t vw9 = __riscv_vwmulsu_vx_i16m2 (v9, 33, vl);
+ vint16m2_t vw10 = __riscv_vwmulsu_vx_i16m2 (v10, 33, vl);
+ vint16m2_t vw11 = __riscv_vwmulsu_vx_i16m2 (v11, 33, vl);
+ vint16m2_t vw12 = __riscv_vwmulsu_vx_i16m2 (v12, 33, vl);
+ vint16m2_t vw13 = __riscv_vwmulsu_vx_i16m2 (v13, 33, vl);
+ vint16m2_t vw14 = __riscv_vwmulsu_vx_i16m2 (v14, 33, vl);
+ vint16m2_t vw15 = __riscv_vwmulsu_vx_i16m2 (v15, 33, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m2_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m2_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m2_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m2_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m2_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m2_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m2_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m2_i16 (vw7);
+ size_t sum8 = __riscv_vmv_x_s_i16m2_i16 (vw8);
+ size_t sum9 = __riscv_vmv_x_s_i16m2_i16 (vw9);
+ size_t sum10 = __riscv_vmv_x_s_i16m2_i16 (vw10);
+ size_t sum11 = __riscv_vmv_x_s_i16m2_i16 (vw11);
+ size_t sum12 = __riscv_vmv_x_s_i16m2_i16 (vw12);
+ size_t sum13 = __riscv_vmv_x_s_i16m2_i16 (vw13);
+ size_t sum14 = __riscv_vmv_x_s_i16m2_i16 (vw14);
+ size_t sum15 = __riscv_vmv_x_s_i16m2_i16 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-23.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-23.c
new file mode 100644
index 0000000..ee0b928
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-23.c
@@ -0,0 +1,119 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m2_t v0 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v1 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v2 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v3 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v4 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v5 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v6 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v7 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m4_t vw0 = __riscv_vwadd_vx_i16m4 (v0, 55, vl);
+ vint16m4_t vw1 = __riscv_vwadd_vx_i16m4 (v1, 55, vl);
+ vint16m4_t vw2 = __riscv_vwadd_vx_i16m4 (v2, 55, vl);
+ vint16m4_t vw3 = __riscv_vwadd_vx_i16m4 (v3, 55, vl);
+ vint16m4_t vw4 = __riscv_vwadd_vx_i16m4 (v4, 55, vl);
+ vint16m4_t vw5 = __riscv_vwadd_vx_i16m4 (v5, 55, vl);
+ vint16m4_t vw6 = __riscv_vwadd_vx_i16m4 (v6, 55, vl);
+ vint16m4_t vw7 = __riscv_vwadd_vx_i16m4 (v7, 55, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m4_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m4_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m4_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m4_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m4_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m4_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m4_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m4_i16 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+size_t
+foo2 (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m2_t v0 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v1 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v2 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v3 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v4 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v5 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v6 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v7 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m4_t vw0 = __riscv_vwmulsu_vx_i16m4 (v0, 55, vl);
+ vint16m4_t vw1 = __riscv_vwmulsu_vx_i16m4 (v1, 55, vl);
+ vint16m4_t vw2 = __riscv_vwmulsu_vx_i16m4 (v2, 55, vl);
+ vint16m4_t vw3 = __riscv_vwmulsu_vx_i16m4 (v3, 55, vl);
+ vint16m4_t vw4 = __riscv_vwmulsu_vx_i16m4 (v4, 55, vl);
+ vint16m4_t vw5 = __riscv_vwmulsu_vx_i16m4 (v5, 55, vl);
+ vint16m4_t vw6 = __riscv_vwmulsu_vx_i16m4 (v6, 55, vl);
+ vint16m4_t vw7 = __riscv_vwmulsu_vx_i16m4 (v7, 55, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m4_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m4_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m4_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m4_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m4_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m4_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m4_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m4_i16 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-24.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-24.c
new file mode 100644
index 0000000..603e294
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-24.c
@@ -0,0 +1,86 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m4_t v0 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v1 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v2 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v3 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m8_t vw0 = __riscv_vwadd_vx_i16m8 (v0, 66, vl);
+ vint16m8_t vw1 = __riscv_vwadd_vx_i16m8 (v1, 66, vl);
+ vint16m8_t vw2 = __riscv_vwadd_vx_i16m8 (v2, 66, vl);
+ vint16m8_t vw3 = __riscv_vwadd_vx_i16m8 (v3, 66, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m8_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m8_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m8_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m8_i16 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+size_t
+foo2 (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m4_t v0 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v1 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v2 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v3 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m8_t vw0 = __riscv_vwmulsu_vx_i16m8 (v0, 66, vl);
+ vint16m8_t vw1 = __riscv_vwmulsu_vx_i16m8 (v1, 66, vl);
+ vint16m8_t vw2 = __riscv_vwmulsu_vx_i16m8 (v2, 66, vl);
+ vint16m8_t vw3 = __riscv_vwmulsu_vx_i16m8 (v3, 66, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m8_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m8_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m8_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m8_i16 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-25.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-25.c
new file mode 100644
index 0000000..0b52b9f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-25.c
@@ -0,0 +1,104 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m2_t vw0 = __riscv_vfwadd_vf_f64m2 (v0, 33, vl);
+ vfloat64m2_t vw1 = __riscv_vfwadd_vf_f64m2 (v1, 33, vl);
+ vfloat64m2_t vw2 = __riscv_vfwadd_vf_f64m2 (v2, 33, vl);
+ vfloat64m2_t vw3 = __riscv_vfwadd_vf_f64m2 (v3, 33, vl);
+ vfloat64m2_t vw4 = __riscv_vfwadd_vf_f64m2 (v4, 33, vl);
+ vfloat64m2_t vw5 = __riscv_vfwadd_vf_f64m2 (v5, 33, vl);
+ vfloat64m2_t vw6 = __riscv_vfwadd_vf_f64m2 (v6, 33, vl);
+ vfloat64m2_t vw7 = __riscv_vfwadd_vf_f64m2 (v7, 33, vl);
+ vfloat64m2_t vw8 = __riscv_vfwadd_vf_f64m2 (v8, 33, vl);
+ vfloat64m2_t vw9 = __riscv_vfwadd_vf_f64m2 (v9, 33, vl);
+ vfloat64m2_t vw10 = __riscv_vfwadd_vf_f64m2 (v10, 33, vl);
+ vfloat64m2_t vw11 = __riscv_vfwadd_vf_f64m2 (v11, 33, vl);
+ vfloat64m2_t vw12 = __riscv_vfwadd_vf_f64m2 (v12, 33, vl);
+ vfloat64m2_t vw13 = __riscv_vfwadd_vf_f64m2 (v13, 33, vl);
+ vfloat64m2_t vw14 = __riscv_vfwadd_vf_f64m2 (v14, 33, vl);
+ vfloat64m2_t vw15 = __riscv_vfwadd_vf_f64m2 (v15, 33, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0);
+ size_t sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1);
+ size_t sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2);
+ size_t sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3);
+ size_t sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4);
+ size_t sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5);
+ size_t sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6);
+ size_t sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7);
+ size_t sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8);
+ size_t sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9);
+ size_t sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10);
+ size_t sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11);
+ size_t sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12);
+ size_t sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13);
+ size_t sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14);
+ size_t sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-26.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-26.c
new file mode 100644
index 0000000..d21a737
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-26.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m2_t v0 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v1 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v2 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v3 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v4 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v5 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v6 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v7 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m4_t vw0 = __riscv_vfwadd_vf_f64m4 (v0, 33, vl);
+ vfloat64m4_t vw1 = __riscv_vfwadd_vf_f64m4 (v1, 33, vl);
+ vfloat64m4_t vw2 = __riscv_vfwadd_vf_f64m4 (v2, 33, vl);
+ vfloat64m4_t vw3 = __riscv_vfwadd_vf_f64m4 (v3, 33, vl);
+ vfloat64m4_t vw4 = __riscv_vfwadd_vf_f64m4 (v4, 33, vl);
+ vfloat64m4_t vw5 = __riscv_vfwadd_vf_f64m4 (v5, 33, vl);
+ vfloat64m4_t vw6 = __riscv_vfwadd_vf_f64m4 (v6, 33, vl);
+ vfloat64m4_t vw7 = __riscv_vfwadd_vf_f64m4 (v7, 33, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0);
+ size_t sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1);
+ size_t sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2);
+ size_t sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3);
+ size_t sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4);
+ size_t sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5);
+ size_t sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6);
+ size_t sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-27.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-27.c
new file mode 100644
index 0000000..2423f7b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-27.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m4_t v0 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v1 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v2 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v3 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m8_t vw0 = __riscv_vfwadd_vf_f64m8 (v0, 33, vl);
+ vfloat64m8_t vw1 = __riscv_vfwadd_vf_f64m8 (v1, 33, vl);
+ vfloat64m8_t vw2 = __riscv_vfwadd_vf_f64m8 (v2, 33, vl);
+ vfloat64m8_t vw3 = __riscv_vfwadd_vf_f64m8 (v3, 33, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0);
+ size_t sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1);
+ size_t sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2);
+ size_t sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-28.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-28.c
new file mode 100644
index 0000000..d81afd2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-28.c
@@ -0,0 +1,104 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m1_t v0 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v1 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v2 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v3 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v4 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v5 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v6 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v7 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v8 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v9 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v10 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v11 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v12 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v13 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v14 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v15 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m2_t vw0 = __riscv_vluxei8_v_i16m2 ((void *) it, v0, vl);
+ vint16m2_t vw1 = __riscv_vluxei8_v_i16m2 ((void *) it, v1, vl);
+ vint16m2_t vw2 = __riscv_vluxei8_v_i16m2 ((void *) it, v2, vl);
+ vint16m2_t vw3 = __riscv_vluxei8_v_i16m2 ((void *) it, v3, vl);
+ vint16m2_t vw4 = __riscv_vluxei8_v_i16m2 ((void *) it, v4, vl);
+ vint16m2_t vw5 = __riscv_vluxei8_v_i16m2 ((void *) it, v5, vl);
+ vint16m2_t vw6 = __riscv_vluxei8_v_i16m2 ((void *) it, v6, vl);
+ vint16m2_t vw7 = __riscv_vluxei8_v_i16m2 ((void *) it, v7, vl);
+ vint16m2_t vw8 = __riscv_vluxei8_v_i16m2 ((void *) it, v8, vl);
+ vint16m2_t vw9 = __riscv_vluxei8_v_i16m2 ((void *) it, v9, vl);
+ vint16m2_t vw10 = __riscv_vluxei8_v_i16m2 ((void *) it, v10, vl);
+ vint16m2_t vw11 = __riscv_vluxei8_v_i16m2 ((void *) it, v11, vl);
+ vint16m2_t vw12 = __riscv_vluxei8_v_i16m2 ((void *) it, v12, vl);
+ vint16m2_t vw13 = __riscv_vluxei8_v_i16m2 ((void *) it, v13, vl);
+ vint16m2_t vw14 = __riscv_vluxei8_v_i16m2 ((void *) it, v14, vl);
+ vint16m2_t vw15 = __riscv_vluxei8_v_i16m2 ((void *) it, v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m2_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m2_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m2_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m2_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m2_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m2_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m2_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m2_i16 (vw7);
+ size_t sum8 = __riscv_vmv_x_s_i16m2_i16 (vw8);
+ size_t sum9 = __riscv_vmv_x_s_i16m2_i16 (vw9);
+ size_t sum10 = __riscv_vmv_x_s_i16m2_i16 (vw10);
+ size_t sum11 = __riscv_vmv_x_s_i16m2_i16 (vw11);
+ size_t sum12 = __riscv_vmv_x_s_i16m2_i16 (vw12);
+ size_t sum13 = __riscv_vmv_x_s_i16m2_i16 (vw13);
+ size_t sum14 = __riscv_vmv_x_s_i16m2_i16 (vw14);
+ size_t sum15 = __riscv_vmv_x_s_i16m2_i16 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-29.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-29.c
new file mode 100644
index 0000000..2f8adb8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-29.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m2_t v0 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v1 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v2 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v3 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v4 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v5 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v6 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v7 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m4_t vw0 = __riscv_vluxei8_v_i16m4 ((void *) it, v0, vl);
+ vint16m4_t vw1 = __riscv_vluxei8_v_i16m4 ((void *) it, v1, vl);
+ vint16m4_t vw2 = __riscv_vluxei8_v_i16m4 ((void *) it, v2, vl);
+ vint16m4_t vw3 = __riscv_vluxei8_v_i16m4 ((void *) it, v3, vl);
+ vint16m4_t vw4 = __riscv_vluxei8_v_i16m4 ((void *) it, v4, vl);
+ vint16m4_t vw5 = __riscv_vluxei8_v_i16m4 ((void *) it, v5, vl);
+ vint16m4_t vw6 = __riscv_vluxei8_v_i16m4 ((void *) it, v6, vl);
+ vint16m4_t vw7 = __riscv_vluxei8_v_i16m4 ((void *) it, v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m4_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m4_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m4_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m4_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m4_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m4_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m4_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m4_i16 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-3.c
new file mode 100644
index 0000000..46f93a9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-3.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m4_t v0 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v1 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v2 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v3 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m8_t vw0 = __riscv_vwcvt_x_x_v_i16m8 (v0, vl);
+ vint16m8_t vw1 = __riscv_vwcvt_x_x_v_i16m8 (v1, vl);
+ vint16m8_t vw2 = __riscv_vwcvt_x_x_v_i16m8 (v2, vl);
+ vint16m8_t vw3 = __riscv_vwcvt_x_x_v_i16m8 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m8_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m8_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m8_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m8_i16 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-30.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-30.c
new file mode 100644
index 0000000..d3ce988
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-30.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m4_t v0 = __riscv_vle8_v_u8m4 ((void *) it, vl);
+ it += vl;
+ vuint8m4_t v1 = __riscv_vle8_v_u8m4 ((void *) it, vl);
+ it += vl;
+ vuint8m4_t v2 = __riscv_vle8_v_u8m4 ((void *) it, vl);
+ it += vl;
+ vuint8m4_t v3 = __riscv_vle8_v_u8m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m8_t vw0 = __riscv_vluxei8_v_i16m8 ((void *) it, v0, vl);
+ vint16m8_t vw1 = __riscv_vluxei8_v_i16m8 ((void *) it, v1, vl);
+ vint16m8_t vw2 = __riscv_vluxei8_v_i16m8 ((void *) it, v2, vl);
+ vint16m8_t vw3 = __riscv_vluxei8_v_i16m8 ((void *) it, v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m8_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m8_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m8_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m8_i16 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-31.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-31.c
new file mode 100644
index 0000000..72b928a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-31.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m1_t v0 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v1 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v2 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v3 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v4 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v5 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v6 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v7 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint32m4_t vw0 = __riscv_vluxei8_v_i32m4 ((void *) it, v0, vl);
+ vint32m4_t vw1 = __riscv_vluxei8_v_i32m4 ((void *) it, v1, vl);
+ vint32m4_t vw2 = __riscv_vluxei8_v_i32m4 ((void *) it, v2, vl);
+ vint32m4_t vw3 = __riscv_vluxei8_v_i32m4 ((void *) it, v3, vl);
+ vint32m4_t vw4 = __riscv_vluxei8_v_i32m4 ((void *) it, v4, vl);
+ vint32m4_t vw5 = __riscv_vluxei8_v_i32m4 ((void *) it, v5, vl);
+ vint32m4_t vw6 = __riscv_vluxei8_v_i32m4 ((void *) it, v6, vl);
+ vint32m4_t vw7 = __riscv_vluxei8_v_i32m4 ((void *) it, v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i32m4_i32 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i32m4_i32 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i32m4_i32 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i32m4_i32 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i32m4_i32 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i32m4_i32 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i32m4_i32 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i32m4_i32 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-32.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-32.c
new file mode 100644
index 0000000..273c5fc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-32.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m2_t v0 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v1 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v2 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+ vuint8m2_t v3 = __riscv_vle8_v_u8m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint32m8_t vw0 = __riscv_vluxei8_v_i32m8 ((void *) it, v0, vl);
+ vint32m8_t vw1 = __riscv_vluxei8_v_i32m8 ((void *) it, v1, vl);
+ vint32m8_t vw2 = __riscv_vluxei8_v_i32m8 ((void *) it, v2, vl);
+ vint32m8_t vw3 = __riscv_vluxei8_v_i32m8 ((void *) it, v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i32m8_i32 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i32m8_i32 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i32m8_i32 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i32m8_i32 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-33.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-33.c
new file mode 100644
index 0000000..a5c2ad1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-33.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m1_t v0 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v1 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v2 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v3 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint64m8_t vw0 = __riscv_vluxei8_v_i64m8 ((void *) it, v0, vl);
+ vint64m8_t vw1 = __riscv_vluxei8_v_i64m8 ((void *) it, v1, vl);
+ vint64m8_t vw2 = __riscv_vluxei8_v_i64m8 ((void *) it, v2, vl);
+ vint64m8_t vw3 = __riscv_vluxei8_v_i64m8 ((void *) it, v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i64m8_i64 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i64m8_i64 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i64m8_i64 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i64m8_i64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-34.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-34.c
new file mode 100644
index 0000000..80ea65b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-34.c
@@ -0,0 +1,101 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vuint8m1_t v0 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v1 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v2 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v3 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v4 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v5 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v6 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v7 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v8 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v9 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v10 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v11 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v12 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v13 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v14 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+ vuint8m1_t v15 = __riscv_vle8_v_u8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m2_t vw0 = __riscv_vluxei8_v_i16m2 ((void *) it, v0, vl);
+ vint16m2_t vw1 = __riscv_vluxei8_v_i16m2 ((void *) it, v1, vl);
+ vint16m2_t vw2 = __riscv_vluxei8_v_i16m2 ((void *) it, v2, vl);
+ vint16m2_t vw3 = __riscv_vluxei8_v_i16m2 ((void *) it, v3, vl);
+ vint16m2_t vw4 = __riscv_vluxei8_v_i16m2 ((void *) it, v4, vl);
+ vint16m2_t vw5 = __riscv_vluxei8_v_i16m2 ((void *) it, v5, vl);
+ vint16m2_t vw6 = __riscv_vluxei8_v_i16m2 ((void *) it, v6, vl);
+ vint16m2_t vw7 = __riscv_vluxei8_v_i16m2 ((void *) it, v7, vl);
+ vint16m2_t vw8 = __riscv_vluxei8_v_i16m2 ((void *) it, v8, vl);
+ vint16m2_t vw9 = __riscv_vluxei8_v_i16m2 ((void *) it, v9, vl);
+ vint16m2_t vw10 = __riscv_vluxei8_v_i16m2 ((void *) it, v10, vl);
+ vint16m2_t vw11 = __riscv_vluxei8_v_i16m2 ((void *) it, v11, vl);
+ vint16m2_t vw12 = __riscv_vluxei8_v_i16m2 ((void *) it, v12, vl);
+ vint16m2_t vw13 = __riscv_vluxei8_v_i16m2 ((void *) it, v13, vl);
+ vint16m2_t vw14 = __riscv_vluxei8_v_i16m2 ((void *) it, v14, vl);
+ vbool8_t mask = *(vbool8_t*)it;
+ vint16m2_t vw15 = __riscv_vluxei8_v_i16m2_m (mask, (void *) it, v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m2_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m2_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m2_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m2_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m2_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m2_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m2_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m2_i16 (vw7);
+ size_t sum8 = __riscv_vmv_x_s_i16m2_i16 (vw8);
+ size_t sum9 = __riscv_vmv_x_s_i16m2_i16 (vw9);
+ size_t sum10 = __riscv_vmv_x_s_i16m2_i16 (vw10);
+ size_t sum11 = __riscv_vmv_x_s_i16m2_i16 (vw11);
+ size_t sum12 = __riscv_vmv_x_s_i16m2_i16 (vw12);
+ size_t sum13 = __riscv_vmv_x_s_i16m2_i16 (vw13);
+ size_t sum14 = __riscv_vmv_x_s_i16m2_i16 (vw14);
+ size_t sum15 = __riscv_vmv_x_s_i16m2_i16 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vluxei8\.v\tv0,\s*\([a-x0-9]+\),\s*v[0-9]+,\s*v0.t} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-35.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-35.c
new file mode 100644
index 0000000..6f72e93
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-35.c
@@ -0,0 +1,107 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7,
+ size_t sum0_2, size_t sum1_2, size_t sum2_2, size_t sum3_2, size_t sum4_2,
+ size_t sum5_2, size_t sum6_2, size_t sum7_2)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7
+ + sum0_2 + sum1_2 + sum2_2 + sum3_2 + sum4_2 + sum5_2 + sum6_2 + sum7_2;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m2_t v0 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v1 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v2 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v3 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v4 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v5 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v6 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v7 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+
+ vint16m1_t vw0 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+ it += vl;
+ vint16m1_t vw1 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+ it += vl;
+ vint16m1_t vw2 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+ it += vl;
+ vint16m1_t vw3 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+ it += vl;
+ vint16m1_t vw4 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+ it += vl;
+ vint16m1_t vw5 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+ it += vl;
+ vint16m1_t vw6 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+ it += vl;
+ vint16m1_t vw7 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m1_t vw0_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v0, vw0, vl);
+ vint16m1_t vw1_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v1, vw1, vl);
+ vint16m1_t vw2_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v2, vw2, vl);
+ vint16m1_t vw3_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v3, vw3, vl);
+ vint16m1_t vw4_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v4, vw4, vl);
+ vint16m1_t vw5_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v5, vw5, vl);
+ vint16m1_t vw6_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v6, vw6, vl);
+ vint16m1_t vw7_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v7, vw7, vl);
+
+ vw0 = __riscv_vwredsum_vs_i8m2_i16m1 (v0, vw0_2, vl);
+ vw1 = __riscv_vwredsum_vs_i8m2_i16m1 (v1, vw1_2, vl);
+ vw2 = __riscv_vwredsum_vs_i8m2_i16m1 (v2, vw2_2, vl);
+ vw3 = __riscv_vwredsum_vs_i8m2_i16m1 (v3, vw3_2, vl);
+ vw4 = __riscv_vwredsum_vs_i8m2_i16m1 (v4, vw4_2, vl);
+ vw5 = __riscv_vwredsum_vs_i8m2_i16m1 (v5, vw5_2, vl);
+ vw6 = __riscv_vwredsum_vs_i8m2_i16m1 (v6, vw6_2, vl);
+ vw7 = __riscv_vwredsum_vs_i8m2_i16m1 (v7, vw7_2, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m1_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m1_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m1_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m1_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m1_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m1_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m1_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m1_i16 (vw7);
+
+ size_t sum0_2 = __riscv_vmv_x_s_i16m1_i16 (vw0_2);
+ size_t sum1_2 = __riscv_vmv_x_s_i16m1_i16 (vw1_2);
+ size_t sum2_2 = __riscv_vmv_x_s_i16m1_i16 (vw2_2);
+ size_t sum3_2 = __riscv_vmv_x_s_i16m1_i16 (vw3_2);
+ size_t sum4_2 = __riscv_vmv_x_s_i16m1_i16 (vw4_2);
+ size_t sum5_2 = __riscv_vmv_x_s_i16m1_i16 (vw5_2);
+ size_t sum6_2 = __riscv_vmv_x_s_i16m1_i16 (vw6_2);
+ size_t sum7_2 = __riscv_vmv_x_s_i16m1_i16 (vw7_2);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7,
+ sum0_2, sum1_2, sum2_2, sum3_2, sum4_2, sum5_2, sum6_2, sum7_2);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-36.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-36.c
new file mode 100644
index 0000000..7756bdb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-36.c
@@ -0,0 +1,107 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7,
+ size_t sum0_2, size_t sum1_2, size_t sum2_2, size_t sum3_2, size_t sum4_2,
+ size_t sum5_2, size_t sum6_2, size_t sum7_2)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7
+ + sum0_2 + sum1_2 + sum2_2 + sum3_2 + sum4_2 + sum5_2 + sum6_2 + sum7_2;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m2_t v0 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v1 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v2 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v3 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v4 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v5 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v6 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v7 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+
+ vfloat64m1_t vw0 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+ it += vl;
+ vfloat64m1_t vw1 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+ it += vl;
+ vfloat64m1_t vw2 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+ it += vl;
+ vfloat64m1_t vw3 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+ it += vl;
+ vfloat64m1_t vw4 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+ it += vl;
+ vfloat64m1_t vw5 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+ it += vl;
+ vfloat64m1_t vw6 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+ it += vl;
+ vfloat64m1_t vw7 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m1_t vw0_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v0, vw0, vl);
+ vfloat64m1_t vw1_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v1, vw1, vl);
+ vfloat64m1_t vw2_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v2, vw2, vl);
+ vfloat64m1_t vw3_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v3, vw3, vl);
+ vfloat64m1_t vw4_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v4, vw4, vl);
+ vfloat64m1_t vw5_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v5, vw5, vl);
+ vfloat64m1_t vw6_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v6, vw6, vl);
+ vfloat64m1_t vw7_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v7, vw7, vl);
+
+ vw0 = __riscv_vfwredusum_vs_f32m2_f64m1 (v0, vw0_2, vl);
+ vw1 = __riscv_vfwredusum_vs_f32m2_f64m1 (v1, vw1_2, vl);
+ vw2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v2, vw2_2, vl);
+ vw3 = __riscv_vfwredusum_vs_f32m2_f64m1 (v3, vw3_2, vl);
+ vw4 = __riscv_vfwredusum_vs_f32m2_f64m1 (v4, vw4_2, vl);
+ vw5 = __riscv_vfwredusum_vs_f32m2_f64m1 (v5, vw5_2, vl);
+ vw6 = __riscv_vfwredusum_vs_f32m2_f64m1 (v6, vw6_2, vl);
+ vw7 = __riscv_vfwredusum_vs_f32m2_f64m1 (v7, vw7_2, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vfmv_f_s_f64m1_f64 (vw0);
+ size_t sum1 = __riscv_vfmv_f_s_f64m1_f64 (vw1);
+ size_t sum2 = __riscv_vfmv_f_s_f64m1_f64 (vw2);
+ size_t sum3 = __riscv_vfmv_f_s_f64m1_f64 (vw3);
+ size_t sum4 = __riscv_vfmv_f_s_f64m1_f64 (vw4);
+ size_t sum5 = __riscv_vfmv_f_s_f64m1_f64 (vw5);
+ size_t sum6 = __riscv_vfmv_f_s_f64m1_f64 (vw6);
+ size_t sum7 = __riscv_vfmv_f_s_f64m1_f64 (vw7);
+
+ size_t sum0_2 = __riscv_vfmv_f_s_f64m1_f64 (vw0_2);
+ size_t sum1_2 = __riscv_vfmv_f_s_f64m1_f64 (vw1_2);
+ size_t sum2_2 = __riscv_vfmv_f_s_f64m1_f64 (vw2_2);
+ size_t sum3_2 = __riscv_vfmv_f_s_f64m1_f64 (vw3_2);
+ size_t sum4_2 = __riscv_vfmv_f_s_f64m1_f64 (vw4_2);
+ size_t sum5_2 = __riscv_vfmv_f_s_f64m1_f64 (vw5_2);
+ size_t sum6_2 = __riscv_vfmv_f_s_f64m1_f64 (vw6_2);
+ size_t sum7_2 = __riscv_vfmv_f_s_f64m1_f64 (vw7_2);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7,
+ sum0_2, sum1_2, sum2_2, sum3_2, sum4_2, sum5_2, sum6_2, sum7_2);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-37.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-37.c
new file mode 100644
index 0000000..6337ff8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-37.c
@@ -0,0 +1,103 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+void
+foo (void *in, void *out)
+{
+ vint16m2_t accum = __riscv_vle16_v_i16m2 (in, 4);
+ vint16m1_t high_eew16 = __riscv_vget_v_i16m2_i16m1 (accum, 1);
+ vint8m1_t high_eew8 = __riscv_vreinterpret_v_i16m1_i8m1 (high_eew16);
+ vint16m2_t result = __riscv_vwmacc_vx_i16m2 (accum, 16, high_eew8, 4);
+ __riscv_vse16_v_i16m2 (out, result, 4);
+}
+
+void
+foo2 (void *in, void *out)
+{
+ vint16m4_t accum = __riscv_vle16_v_i16m4 (in, 4);
+ vint16m2_t high_eew16 = __riscv_vget_v_i16m4_i16m2 (accum, 1);
+ vint8m2_t high_eew8 = __riscv_vreinterpret_v_i16m2_i8m2 (high_eew16);
+ vint16m4_t result = __riscv_vwmacc_vx_i16m4 (accum, 16, high_eew8, 4);
+ __riscv_vse16_v_i16m4 (out, result, 4);
+}
+
+void
+foo3 (void *in, void *out)
+{
+ vint16m8_t accum = __riscv_vle16_v_i16m8 (in, 4);
+ vint16m4_t high_eew16 = __riscv_vget_v_i16m8_i16m4 (accum, 1);
+ vint8m4_t high_eew8 = __riscv_vreinterpret_v_i16m4_i8m4 (high_eew16);
+ vint16m8_t result = __riscv_vwmacc_vx_i16m8 (accum, 16, high_eew8, 4);
+ __riscv_vse16_v_i16m8 (out, result, 4);
+}
+
+void
+foo4 (void *in, void *out)
+{
+ vint16m2_t accum = __riscv_vle16_v_i16m2 (in, 4);
+ vint16m1_t high_eew16 = __riscv_vget_v_i16m2_i16m1 (accum, 1);
+ vint8m1_t high_eew8 = __riscv_vreinterpret_v_i16m1_i8m1 (high_eew16);
+ vint16m2_t result = __riscv_vwmaccus_vx_i16m2 (accum, 16, high_eew8, 4);
+ __riscv_vse16_v_i16m2 (out, result, 4);
+}
+
+void
+foo5 (void *in, void *out)
+{
+ vint16m4_t accum = __riscv_vle16_v_i16m4 (in, 4);
+ vint16m2_t high_eew16 = __riscv_vget_v_i16m4_i16m2 (accum, 1);
+ vint8m2_t high_eew8 = __riscv_vreinterpret_v_i16m2_i8m2 (high_eew16);
+ vint16m4_t result = __riscv_vwmaccus_vx_i16m4 (accum, 16, high_eew8, 4);
+ __riscv_vse16_v_i16m4 (out, result, 4);
+}
+
+void
+foo6 (void *in, void *out)
+{
+ vint16m8_t accum = __riscv_vle16_v_i16m8 (in, 4);
+ vint16m4_t high_eew16 = __riscv_vget_v_i16m8_i16m4 (accum, 1);
+ vint8m4_t high_eew8 = __riscv_vreinterpret_v_i16m4_i8m4 (high_eew16);
+ vint16m8_t result = __riscv_vwmaccus_vx_i16m8 (accum, 16, high_eew8, 4);
+ __riscv_vse16_v_i16m8 (out, result, 4);
+}
+
+void
+foo7 (void *in, void *out)
+{
+ vint16m2_t accum = __riscv_vle16_v_i16m2 (in, 4);
+ vint16m1_t high_eew16 = __riscv_vget_v_i16m2_i16m1 (accum, 1);
+ vint8m1_t high_eew8 = __riscv_vreinterpret_v_i16m1_i8m1 (high_eew16);
+ vuint8m1_t high_ueew8 = __riscv_vreinterpret_v_i8m1_u8m1 (high_eew8);
+ vint16m2_t result = __riscv_vwmaccsu_vx_i16m2 (accum, 16, high_ueew8, 4);
+ __riscv_vse16_v_i16m2 (out, result, 4);
+}
+
+void
+foo8 (void *in, void *out)
+{
+ vint16m4_t accum = __riscv_vle16_v_i16m4 (in, 4);
+ vint16m2_t high_eew16 = __riscv_vget_v_i16m4_i16m2 (accum, 1);
+ vint8m2_t high_eew8 = __riscv_vreinterpret_v_i16m2_i8m2 (high_eew16);
+ vuint8m2_t high_ueew8 = __riscv_vreinterpret_v_i8m2_u8m2 (high_eew8);
+ vint16m4_t result = __riscv_vwmaccsu_vx_i16m4 (accum, 16, high_ueew8, 4);
+ __riscv_vse16_v_i16m4 (out, result, 4);
+}
+
+void
+foo9 (void *in, void *out)
+{
+ vint16m8_t accum = __riscv_vle16_v_i16m8 (in, 4);
+ vint16m4_t high_eew16 = __riscv_vget_v_i16m8_i16m4 (accum, 1);
+ vint8m4_t high_eew8 = __riscv_vreinterpret_v_i16m4_i8m4 (high_eew16);
+ vuint8m4_t high_ueew8 = __riscv_vreinterpret_v_i8m4_u8m4 (high_eew8);
+ vint16m8_t result = __riscv_vwmaccsu_vx_i16m8 (accum, 16, high_ueew8, 4);
+ __riscv_vse16_v_i16m8 (out, result, 4);
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-38.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-38.c
new file mode 100644
index 0000000..7b7d6cc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-38.c
@@ -0,0 +1,82 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+void
+foo (void *in, void *out)
+{
+ vfloat64m2_t accum = __riscv_vle64_v_f64m2 (in, 4);
+ vfloat64m1_t high_eew64 = __riscv_vget_v_f64m2_f64m1 (accum, 1);
+ vint64m1_t high_eew64_i = __riscv_vreinterpret_v_f64m1_i64m1 (high_eew64);
+ vint32m1_t high_eew32_i = __riscv_vreinterpret_v_i64m1_i32m1 (high_eew64_i);
+ vfloat32m1_t high_eew32 = __riscv_vreinterpret_v_i32m1_f32m1 (high_eew32_i);
+ vfloat64m2_t result = __riscv_vfwmacc_vf_f64m2 (accum, 64, high_eew32, 4);
+ __riscv_vse64_v_f64m2 (out, result, 4);
+}
+
+void
+foo2 (void *in, void *out)
+{
+ vfloat64m4_t accum = __riscv_vle64_v_f64m4 (in, 4);
+ vfloat64m2_t high_eew64 = __riscv_vget_v_f64m4_f64m2 (accum, 1);
+ vint64m2_t high_eew64_i = __riscv_vreinterpret_v_f64m2_i64m2 (high_eew64);
+ vint32m2_t high_eew32_i = __riscv_vreinterpret_v_i64m2_i32m2 (high_eew64_i);
+ vfloat32m2_t high_eew32 = __riscv_vreinterpret_v_i32m2_f32m2 (high_eew32_i);
+ vfloat64m4_t result = __riscv_vfwmacc_vf_f64m4 (accum, 64, high_eew32, 4);
+ __riscv_vse64_v_f64m4 (out, result, 4);
+}
+
+void
+foo3 (void *in, void *out)
+{
+ vfloat64m8_t accum = __riscv_vle64_v_f64m8 (in, 4);
+ vfloat64m4_t high_eew64 = __riscv_vget_v_f64m8_f64m4 (accum, 1);
+ vint64m4_t high_eew64_i = __riscv_vreinterpret_v_f64m4_i64m4 (high_eew64);
+ vint32m4_t high_eew32_i = __riscv_vreinterpret_v_i64m4_i32m4 (high_eew64_i);
+ vfloat32m4_t high_eew32 = __riscv_vreinterpret_v_i32m4_f32m4 (high_eew32_i);
+ vfloat64m8_t result = __riscv_vfwmacc_vf_f64m8 (accum, 64, high_eew32, 4);
+ __riscv_vse64_v_f64m8 (out, result, 4);
+}
+
+void
+foo4 (void *in, void *out)
+{
+ vfloat64m2_t accum = __riscv_vle64_v_f64m2 (in, 4);
+ vfloat64m1_t high_eew64 = __riscv_vget_v_f64m2_f64m1 (accum, 1);
+ vint64m1_t high_eew64_i = __riscv_vreinterpret_v_f64m1_i64m1 (high_eew64);
+ vint32m1_t high_eew32_i = __riscv_vreinterpret_v_i64m1_i32m1 (high_eew64_i);
+ vfloat32m1_t high_eew32 = __riscv_vreinterpret_v_i32m1_f32m1 (high_eew32_i);
+ vfloat64m2_t result = __riscv_vfwnmsac_vf_f64m2 (accum, 64, high_eew32, 4);
+ __riscv_vse64_v_f64m2 (out, result, 4);
+}
+
+void
+foo5 (void *in, void *out)
+{
+ vfloat64m4_t accum = __riscv_vle64_v_f64m4 (in, 4);
+ vfloat64m2_t high_eew64 = __riscv_vget_v_f64m4_f64m2 (accum, 1);
+ vint64m2_t high_eew64_i = __riscv_vreinterpret_v_f64m2_i64m2 (high_eew64);
+ vint32m2_t high_eew32_i = __riscv_vreinterpret_v_i64m2_i32m2 (high_eew64_i);
+ vfloat32m2_t high_eew32 = __riscv_vreinterpret_v_i32m2_f32m2 (high_eew32_i);
+ vfloat64m4_t result = __riscv_vfwnmsac_vf_f64m4 (accum, 64, high_eew32, 4);
+ __riscv_vse64_v_f64m4 (out, result, 4);
+}
+
+void
+foo6 (void *in, void *out)
+{
+ vfloat64m8_t accum = __riscv_vle64_v_f64m8 (in, 4);
+ vfloat64m4_t high_eew64 = __riscv_vget_v_f64m8_f64m4 (accum, 1);
+ vint64m4_t high_eew64_i = __riscv_vreinterpret_v_f64m4_i64m4 (high_eew64);
+ vint32m4_t high_eew32_i = __riscv_vreinterpret_v_i64m4_i32m4 (high_eew64_i);
+ vfloat32m4_t high_eew32 = __riscv_vreinterpret_v_i32m4_f32m4 (high_eew32_i);
+ vfloat64m8_t result = __riscv_vfwnmsac_vf_f64m8 (accum, 64, high_eew32, 4);
+ __riscv_vse64_v_f64m8 (out, result, 4);
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-4.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-4.c
new file mode 100644
index 0000000..4cc6aa6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-4.c
@@ -0,0 +1,104 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7, size_t sum8, size_t sum9,
+ size_t sum10, size_t sum11, size_t sum12, size_t sum13, size_t sum14,
+ size_t sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m1_t v0 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v1 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v2 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v3 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v4 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v5 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v6 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v7 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v8 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v9 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v10 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v11 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v12 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v13 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v14 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+ vint8m1_t v15 = __riscv_vle8_v_i8m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m2_t vw0 = __riscv_vsext_vf2_i16m2 (v0, vl);
+ vint16m2_t vw1 = __riscv_vsext_vf2_i16m2 (v1, vl);
+ vint16m2_t vw2 = __riscv_vsext_vf2_i16m2 (v2, vl);
+ vint16m2_t vw3 = __riscv_vsext_vf2_i16m2 (v3, vl);
+ vint16m2_t vw4 = __riscv_vsext_vf2_i16m2 (v4, vl);
+ vint16m2_t vw5 = __riscv_vsext_vf2_i16m2 (v5, vl);
+ vint16m2_t vw6 = __riscv_vsext_vf2_i16m2 (v6, vl);
+ vint16m2_t vw7 = __riscv_vsext_vf2_i16m2 (v7, vl);
+ vint16m2_t vw8 = __riscv_vsext_vf2_i16m2 (v8, vl);
+ vint16m2_t vw9 = __riscv_vsext_vf2_i16m2 (v9, vl);
+ vint16m2_t vw10 = __riscv_vsext_vf2_i16m2 (v10, vl);
+ vint16m2_t vw11 = __riscv_vsext_vf2_i16m2 (v11, vl);
+ vint16m2_t vw12 = __riscv_vsext_vf2_i16m2 (v12, vl);
+ vint16m2_t vw13 = __riscv_vsext_vf2_i16m2 (v13, vl);
+ vint16m2_t vw14 = __riscv_vsext_vf2_i16m2 (v14, vl);
+ vint16m2_t vw15 = __riscv_vsext_vf2_i16m2 (v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m2_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m2_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m2_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m2_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m2_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m2_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m2_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m2_i16 (vw7);
+ size_t sum8 = __riscv_vmv_x_s_i16m2_i16 (vw8);
+ size_t sum9 = __riscv_vmv_x_s_i16m2_i16 (vw9);
+ size_t sum10 = __riscv_vmv_x_s_i16m2_i16 (vw10);
+ size_t sum11 = __riscv_vmv_x_s_i16m2_i16 (vw11);
+ size_t sum12 = __riscv_vmv_x_s_i16m2_i16 (vw12);
+ size_t sum13 = __riscv_vmv_x_s_i16m2_i16 (vw13);
+ size_t sum14 = __riscv_vmv_x_s_i16m2_i16 (vw14);
+ size_t sum15 = __riscv_vmv_x_s_i16m2_i16 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-5.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-5.c
new file mode 100644
index 0000000..f7d668c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-5.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+ size_t sum5, size_t sum6, size_t sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m2_t v0 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v1 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v2 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v3 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v4 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v5 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v6 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+ vint8m2_t v7 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m4_t vw0 = __riscv_vsext_vf2_i16m4 (v0, vl);
+ vint16m4_t vw1 = __riscv_vsext_vf2_i16m4 (v1, vl);
+ vint16m4_t vw2 = __riscv_vsext_vf2_i16m4 (v2, vl);
+ vint16m4_t vw3 = __riscv_vsext_vf2_i16m4 (v3, vl);
+ vint16m4_t vw4 = __riscv_vsext_vf2_i16m4 (v4, vl);
+ vint16m4_t vw5 = __riscv_vsext_vf2_i16m4 (v5, vl);
+ vint16m4_t vw6 = __riscv_vsext_vf2_i16m4 (v6, vl);
+ vint16m4_t vw7 = __riscv_vsext_vf2_i16m4 (v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m4_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m4_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m4_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m4_i16 (vw3);
+ size_t sum4 = __riscv_vmv_x_s_i16m4_i16 (vw4);
+ size_t sum5 = __riscv_vmv_x_s_i16m4_i16 (vw5);
+ size_t sum6 = __riscv_vmv_x_s_i16m4_i16 (vw6);
+ size_t sum7 = __riscv_vmv_x_s_i16m4_i16 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-6.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-6.c
new file mode 100644
index 0000000..3eed119
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-6.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vint8m4_t v0 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v1 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v2 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+ vint8m4_t v3 = __riscv_vle8_v_i8m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vint16m8_t vw0 = __riscv_vsext_vf2_i16m8 (v0, vl);
+ vint16m8_t vw1 = __riscv_vsext_vf2_i16m8 (v1, vl);
+ vint16m8_t vw2 = __riscv_vsext_vf2_i16m8 (v2, vl);
+ vint16m8_t vw3 = __riscv_vsext_vf2_i16m8 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ size_t sum0 = __riscv_vmv_x_s_i16m8_i16 (vw0);
+ size_t sum1 = __riscv_vmv_x_s_i16m8_i16 (vw1);
+ size_t sum2 = __riscv_vmv_x_s_i16m8_i16 (vw2);
+ size_t sum3 = __riscv_vmv_x_s_i16m8_i16 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c
new file mode 100644
index 0000000..7064471
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c
@@ -0,0 +1,106 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7, double sum8, double sum9,
+ double sum10, double sum11, double sum12, double sum13, double sum14,
+ double sum15)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9
+ + sum10 + sum11 + sum12 + sum13 + sum14 + sum15;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+ vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m2_t vw0 = __riscv_vfwcvt_f_f_v_f64m2 (v0, vl);
+ vfloat64m2_t vw1 = __riscv_vfwcvt_f_f_v_f64m2 (v1, vl);
+ vfloat64m2_t vw2 = __riscv_vfwcvt_f_f_v_f64m2 (v2, vl);
+ vfloat64m2_t vw3 = __riscv_vfwcvt_f_f_v_f64m2 (v3, vl);
+ vfloat64m2_t vw4 = __riscv_vfwcvt_f_f_v_f64m2 (v4, vl);
+ vfloat64m2_t vw5 = __riscv_vfwcvt_f_f_v_f64m2 (v5, vl);
+ vfloat64m2_t vw6 = __riscv_vfwcvt_f_f_v_f64m2 (v6, vl);
+ vfloat64m2_t vw7 = __riscv_vfwcvt_f_f_v_f64m2 (v7, vl);
+ vfloat64m2_t vw8 = __riscv_vfwcvt_f_f_v_f64m2 (v8, vl);
+ vfloat64m2_t vw9 = __riscv_vfwcvt_f_f_v_f64m2 (v9, vl);
+ vfloat64m2_t vw10 = __riscv_vfwcvt_f_f_v_f64m2 (v10, vl);
+ vfloat64m2_t vw11 = __riscv_vfwcvt_f_f_v_f64m2 (v11, vl);
+ vfloat64m2_t vw12 = __riscv_vfwcvt_f_f_v_f64m2 (v12, vl);
+ vfloat64m2_t vw13 = __riscv_vfwcvt_f_f_v_f64m2 (v13, vl);
+ vfloat64m2_t vw14 = __riscv_vfwcvt_f_f_v_f64m2 (v14, vl);
+ vfloat64m2_t vw15 = __riscv_vfwcvt_f_f_v_f64m2 (v15, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3);
+ double sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4);
+ double sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5);
+ double sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6);
+ double sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7);
+ double sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8);
+ double sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9);
+ double sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10);
+ double sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11);
+ double sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12);
+ double sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13);
+ double sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14);
+ double sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8,
+ sum9, sum10, sum11, sum12, sum13, sum14, sum15);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
+
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c
new file mode 100644
index 0000000..ab56d0d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3, double sum4,
+ double sum5, double sum6, double sum7)
+{
+ return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m2_t v0 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v1 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v2 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v3 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v4 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v5 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v6 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+ vfloat32m2_t v7 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m4_t vw0 = __riscv_vfwcvt_f_f_v_f64m4 (v0, vl);
+ vfloat64m4_t vw1 = __riscv_vfwcvt_f_f_v_f64m4 (v1, vl);
+ vfloat64m4_t vw2 = __riscv_vfwcvt_f_f_v_f64m4 (v2, vl);
+ vfloat64m4_t vw3 = __riscv_vfwcvt_f_f_v_f64m4 (v3, vl);
+ vfloat64m4_t vw4 = __riscv_vfwcvt_f_f_v_f64m4 (v4, vl);
+ vfloat64m4_t vw5 = __riscv_vfwcvt_f_f_v_f64m4 (v5, vl);
+ vfloat64m4_t vw6 = __riscv_vfwcvt_f_f_v_f64m4 (v6, vl);
+ vfloat64m4_t vw7 = __riscv_vfwcvt_f_f_v_f64m4 (v7, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3);
+ double sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4);
+ double sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5);
+ double sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6);
+ double sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7);
+
+ sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c
new file mode 100644
index 0000000..82f369c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+double __attribute__ ((noinline))
+sumation (double sum0, double sum1, double sum2, double sum3)
+{
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+double
+foo (char const *buf, size_t len)
+{
+ double sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8 ();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for (; it + step <= end;)
+ {
+ vfloat32m4_t v0 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v1 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v2 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+ vfloat32m4_t v3 = __riscv_vle32_v_f32m4 ((void *) it, vl);
+ it += vl;
+
+ asm volatile("nop" ::: "memory");
+ vfloat64m8_t vw0 = __riscv_vfwcvt_f_f_v_f64m8 (v0, vl);
+ vfloat64m8_t vw1 = __riscv_vfwcvt_f_f_v_f64m8 (v1, vl);
+ vfloat64m8_t vw2 = __riscv_vfwcvt_f_f_v_f64m8 (v2, vl);
+ vfloat64m8_t vw3 = __riscv_vfwcvt_f_f_v_f64m8 (v3, vl);
+
+ asm volatile("nop" ::: "memory");
+ double sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0);
+ double sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1);
+ double sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2);
+ double sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3);
+
+ sum += sumation (sum0, sum1, sum2, sum3);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112743-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112743-1.c
new file mode 100644
index 0000000..0f6d005
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112743-1.c
@@ -0,0 +1,16 @@
+/* Test that we do not have ice when compile */
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zve32f_zvfh_zfh -mabi=lp64d -O2" } */
+
+typedef struct test_a {
+ void *x;
+ char a[10];
+ short b[2];
+ int c[1];
+} test_type_t;
+
+void
+test_copy_memory (test_type_t *out, test_type_t *in)
+{
+ *out = *in;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112743-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112743-2.c
new file mode 100644
index 0000000..27eefc6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112743-2.c
@@ -0,0 +1,52 @@
+/* Test that we do not have ice when compile */
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zve32f_zvfh_zfh -mabi=lp64d -O2" } */
+
+#include <sys/types.h>
+
+union double_union
+{
+ double d;
+ __uint32_t i[2];
+};
+
+#define word0(x) (x.i[1])
+#define word1(x) (x.i[0])
+
+#define P 53
+#define Exp_shift 20
+#define Exp_msk1 ((__uint32_t)0x100000L)
+#define Exp_mask ((__uint32_t)0x7ff00000L)
+
+double ulp (double _x)
+{
+ union double_union x, a;
+ register int L;
+
+ x.d = _x;
+ L = (word0 (x) & Exp_mask) - (P - 1) * Exp_msk1;
+
+ if (L > 0)
+ {
+ L |= Exp_msk1 >> 4;
+ word0 (a) = L;
+ word1 (a) = 0;
+ }
+ else
+ {
+ L = -L >> Exp_shift;
+ if (L < Exp_shift)
+ {
+ word0 (a) = 0x80000 >> L;
+ word1 (a) = 0;
+ }
+ else
+ {
+ word0 (a) = 0;
+ L -= Exp_shift;
+ word1 (a) = L >= 31 ? 1 : 1 << (31 - L);
+ }
+ }
+
+ return a.d;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/unop_v_constraint-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/unop_v_constraint-2.c
index 3344d42..f7a9e58 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/unop_v_constraint-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/unop_v_constraint-2.c
@@ -30,7 +30,7 @@ void f1 (void * in, void *out)
** vle16\.v\tv[0-9]+,0\([a-x0-9]+\)
** vsext\.vf2\tv[0-9]+,\s*v[0-9]+
** ...
-** vsext\.vf2\tv[1-9][0-9]?,\s*v[0-9]+,\s*v0.t
+** vsext\.vf2\tv[0-9]+,\s*v[0-9]+,\s*v0.t
** vse64\.v\tv[0-9]+,0\([a-x0-9]+\)
** ret
*/
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-84.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-84.c
index a584dd9..5cd0f28 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-84.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-84.c
@@ -17,6 +17,6 @@ double f0 (int8_t * restrict in, int8_t * restrict out, int n, int m, unsigned c
}
/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*m2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */
-/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*zero,\s*e64,\s*m1,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */
-/* { dg-final { scan-assembler-times {vsetvli} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */
-
+/* { dg-final { scan-assembler-not {vsetvli\s+zero,\s*zero} { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */
+/* { dg-final { scan-assembler-times {vsetvli} 1 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */
+/* { dg-final { scan-assembler-times {vsetivli} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-3.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-3.c
index 0f40642..13344ec 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-3.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-3.c
@@ -13,4 +13,4 @@ void foo(_Float16 y, int16_t z, int64_t *i64p)
}
/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*1,\s*e64,\s*m1,\s*t[au],\s*m[au]} 1 } } */
-/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*zero,\s*e16,\s*m1,\s*t[au],\s*m[au]} 1 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*1,\s*e16,\s*m1,\s*t[au],\s*m[au]} 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112713-1.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112713-1.c
new file mode 100644
index 0000000..76402ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112713-1.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t
+foo (char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for(; it + step <= end; ) {
+ it += vl;
+ vint8m8_t v3 = __riscv_vle8_v_i8m8((void*)it, vl); it += vl;
+ vbool1_t m3 = __riscv_vmsgt_vx_i8m8_b1(v3, -65, vl);
+ sum += __riscv_vcpop_m_b1(m3, vl);
+ }
+ return sum;
+}
+
+/* { dg-final { scan-assembler-times {vsetvli} 1 } } */
+/* { dg-final { scan-assembler-not {vsetivli} } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*m8,\s*t[au],\s*m[au]} 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112713-2.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112713-2.c
new file mode 100644
index 0000000..04539d9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112713-2.c
@@ -0,0 +1,47 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+static size_t
+utf8_count_rvv(char const *buf, size_t len)
+{
+ size_t sum = 0;
+ for (size_t vl; len > 0; len -= vl, buf += vl) {
+ vl = __riscv_vsetvl_e8m8(len);
+ vint8m8_t v = __riscv_vle8_v_i8m8((void*)buf, vl);
+ vbool1_t mask = __riscv_vmsgt_vx_i8m8_b1(v, -65, vl);
+ sum += __riscv_vcpop_m_b1(mask, vl);
+ }
+ return sum;
+}
+
+size_t
+utf8_count_rvv_4x_tail(char const *buf, size_t len)
+{
+ size_t sum = 0;
+ size_t vl = __riscv_vsetvlmax_e8m8();
+ size_t step = vl * 4;
+ const char *it = buf, *end = buf + len;
+ for(; it + step <= end; ) {
+ vint8m8_t v0 = __riscv_vle8_v_i8m8((void*)it, vl); it += vl;
+ vint8m8_t v1 = __riscv_vle8_v_i8m8((void*)it, vl); it += vl;
+ vint8m8_t v2 = __riscv_vle8_v_i8m8((void*)it, vl); it += vl;
+ vint8m8_t v3 = __riscv_vle8_v_i8m8((void*)it, vl); it += vl;
+ vbool1_t m0 = __riscv_vmsgt_vx_i8m8_b1(v0, -65, vl);
+ vbool1_t m1 = __riscv_vmsgt_vx_i8m8_b1(v1, -65, vl);
+ vbool1_t m2 = __riscv_vmsgt_vx_i8m8_b1(v2, -65, vl);
+ vbool1_t m3 = __riscv_vmsgt_vx_i8m8_b1(v3, -65, vl);
+ sum += __riscv_vcpop_m_b1(m0, vl);
+ sum += __riscv_vcpop_m_b1(m1, vl);
+ sum += __riscv_vcpop_m_b1(m2, vl);
+ sum += __riscv_vcpop_m_b1(m3, vl);
+ }
+ return sum + utf8_count_rvv(it, end - it);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli} 2 } } */
+/* { dg-final { scan-assembler-not {vsetivli} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*m8,\s*t[au],\s*m[au]} 1 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]} 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112776.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112776.c
new file mode 100644
index 0000000..8536901
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112776.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+void
+foo (float *r, const float *x)
+{
+ int i, k;
+
+ vfloat32m4_t x_vec;
+ vfloat32m4_t x_forward_vec;
+ vfloat32m4_t temp_vec;
+ vfloat32m1_t dst_vec;
+ vfloat32m1_t src_vec;
+
+ float result = 0.0f;
+ float shift_prev = 0.0f;
+
+ size_t n = 64;
+ for (size_t vl; n > 0; n -= vl)
+ {
+ vl = __riscv_vsetvl_e32m4 (n);
+ x_vec = __riscv_vle32_v_f32m4 (&x[0], vl);
+ x_forward_vec = __riscv_vle32_v_f32m4 (&x[0], vl);
+ temp_vec = __riscv_vfmul_vv_f32m4 (x_vec, x_forward_vec, vl);
+ src_vec = __riscv_vfmv_s_tu (src_vec, 0.0f, vl);
+ dst_vec = __riscv_vfmv_s_tu (dst_vec, 0.0f, vl);
+ dst_vec = __riscv_vfredosum_tu (dst_vec, temp_vec, src_vec, vl);
+ r[0] = __riscv_vfmv_f_s_f32m1_f32 (dst_vec);
+ }
+}
+
+/* { dg-final { scan-assembler-times {vsetvli} 1 } } */
+/* { dg-final { scan-assembler-not {vsetivli} } } */
+/* { dg-final { scan-assembler-times {vsetvli\t[a-x0-9]+,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]} 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112813-1.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112813-1.c
new file mode 100644
index 0000000..5aab9c2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr112813-1.c
@@ -0,0 +1,32 @@
+/* Test that we do not have ice when compile */
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv_zvl256b -mabi=ilp32d -O3" } */
+
+int a, c, d, f, j;
+int b[7];
+long e;
+char *g;
+int *h;
+long long *i;
+
+void k() {
+ int l[][1] = {{}, {1}, {1}};
+ int *m = &d, *n = &l[0][0];
+
+ for (; e;)
+ {
+ f = 3;
+
+ for (; f >= 0; f--)
+ {
+ *m &= b[f] >= 0;
+ j = a >= 2 ? 0 : 1 >> a;
+ *i |= j;
+ }
+
+ for (; c;)
+ *g = 0;
+ }
+
+ h = n;
+}
diff --git a/gcc/testsuite/gcc.target/riscv/zicond-sfb-primitiveSemantics.c b/gcc/testsuite/gcc.target/riscv/zicond-sfb-primitiveSemantics.c
new file mode 100644
index 0000000..2c60656
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zicond-sfb-primitiveSemantics.c
@@ -0,0 +1,50 @@
+/* { dg-do compile } */
+/* { dg-options "-mtune=sifive-7-series -march=rv64gc_zicond -mabi=lp64d" { target { rv64 } } } */
+/* { dg-options "-mtune=sifive-7-series -march=rv32gc_zicond -mabi=ilp32f" { target { rv32 } } } */
+/* { dg-skip-if "" { *-*-* } {"-O0" "-Og"} } */
+
+long primitiveSemantics_00(long a, long b) { return a == 0 ? 0 : b; }
+
+long primitiveSemantics_01(long a, long b) { return a != 0 ? 0 : b; }
+
+long primitiveSemantics_02(long a, long b) { return a == 0 ? b : 0; }
+
+long primitiveSemantics_03(long a, long b) { return a != 0 ? b : 0; }
+
+long primitiveSemantics_04(long a, long b) {
+ if (a)
+ b = 0;
+ return b;
+}
+
+long primitiveSemantics_05(long a, long b) {
+ if (!a)
+ b = 0;
+ return b;
+}
+
+int primitiveSemantics_06(int a, int b) { return a == 0 ? 0 : b; }
+
+int primitiveSemantics_07(int a, int b) { return a != 0 ? 0 : b; }
+
+int primitiveSemantics_08(int a, int b) { return a == 0 ? b : 0; }
+
+int primitiveSemantics_09(int a, int b) { return a != 0 ? b : 0; }
+
+int primitiveSemantics_10(int a, int b) {
+ if (a)
+ b = 0;
+ return b;
+}
+
+int primitiveSemantics_11(int a, int b) {
+ if (!a)
+ b = 0;
+ return b;
+}
+
+/* { dg-final { scan-assembler-times {\mczero\.eqz\M} 6 } } */
+/* { dg-final { scan-assembler-times {\mczero\.nez\M} 6 } } */
+/* { dg-final { scan-assembler-not {\mbeq\M} } } */
+/* { dg-final { scan-assembler-not {\mbne\M} } } */
+/* { dg-final { scan-assembler-not {\mmovcc\M} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvkn-1.c b/gcc/testsuite/gcc.target/riscv/zvkn-1.c
index 23b255b..069a8f6 100644
--- a/gcc/testsuite/gcc.target/riscv/zvkn-1.c
+++ b/gcc/testsuite/gcc.target/riscv/zvkn-1.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gc_zvkned_zvknhb_zvbb_zvkt" { target { rv64 } } } */
-/* { dg-options "-march=rv32gc_zvkned_zvknhb_zvbb_zvkt" { target { rv32 } } } */
+/* { dg-options "-march=rv64gc_zvkned_zvknhb_zvkb_zvkt" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zvkned_zvknhb_zvkb_zvkt" { target { rv32 } } } */
#ifndef __riscv_zvkn
#error "Feature macro for `Zvkn' not defined"
@@ -14,8 +14,8 @@
#error "Feature macro for `Zvknhb' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvkn.c b/gcc/testsuite/gcc.target/riscv/zvkn.c
index 0047ebd..bcecbcc 100644
--- a/gcc/testsuite/gcc.target/riscv/zvkn.c
+++ b/gcc/testsuite/gcc.target/riscv/zvkn.c
@@ -14,8 +14,8 @@
#error "Feature macro for `Zvknhb' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvknc-1.c b/gcc/testsuite/gcc.target/riscv/zvknc-1.c
index d8a84c0..64cae2b 100644
--- a/gcc/testsuite/gcc.target/riscv/zvknc-1.c
+++ b/gcc/testsuite/gcc.target/riscv/zvknc-1.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gc_zvkned_zvknhb_zvbb_zvkt_zvbc" { target { rv64 } } } */
-/* { dg-options "-march=rv32gc_zvkned_zvknhb_zvbb_zvkt_zvbc" { target { rv32 } } } */
+/* { dg-options "-march=rv64gc_zvkned_zvknhb_zvkb_zvkt_zvbc" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zvkned_zvknhb_zvkb_zvkt_zvbc" { target { rv32 } } } */
#ifndef __riscv_zvknc
#error "Feature macro for `Zvknc' not defined"
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvknhb' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `ZvKb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvknc-2.c b/gcc/testsuite/gcc.target/riscv/zvknc-2.c
index 36cc633..1f8b510 100644
--- a/gcc/testsuite/gcc.target/riscv/zvknc-2.c
+++ b/gcc/testsuite/gcc.target/riscv/zvknc-2.c
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvknhb' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvknc.c b/gcc/testsuite/gcc.target/riscv/zvknc.c
index a177f17..64dda5f 100644
--- a/gcc/testsuite/gcc.target/riscv/zvknc.c
+++ b/gcc/testsuite/gcc.target/riscv/zvknc.c
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvknhb' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvkng-1.c b/gcc/testsuite/gcc.target/riscv/zvkng-1.c
index d996b42..5419585 100644
--- a/gcc/testsuite/gcc.target/riscv/zvkng-1.c
+++ b/gcc/testsuite/gcc.target/riscv/zvkng-1.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gc_zvkned_zvknhb_zvbb_zvkt_zvkg" { target { rv64 } } } */
-/* { dg-options "-march=rv32gc_zvkned_zvknhb_zvbb_zvkt_zvkg" { target { rv32 } } } */
+/* { dg-options "-march=rv64gc_zvkned_zvknhb_zvkb_zvkt_zvkg" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zvkned_zvknhb_zvkb_zvkt_zvkg" { target { rv32 } } } */
#ifndef __riscv_zvkng
#error "Feature macro for `Zvkng' not defined"
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvknhb' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvkng-2.c b/gcc/testsuite/gcc.target/riscv/zvkng-2.c
index 0019999..6c7b239 100644
--- a/gcc/testsuite/gcc.target/riscv/zvkng-2.c
+++ b/gcc/testsuite/gcc.target/riscv/zvkng-2.c
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvknhb' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvkng.c b/gcc/testsuite/gcc.target/riscv/zvkng.c
index 4605faf..6c516a4 100644
--- a/gcc/testsuite/gcc.target/riscv/zvkng.c
+++ b/gcc/testsuite/gcc.target/riscv/zvkng.c
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvknhb' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvks-1.c b/gcc/testsuite/gcc.target/riscv/zvks-1.c
index a576cdb..180b3f3 100644
--- a/gcc/testsuite/gcc.target/riscv/zvks-1.c
+++ b/gcc/testsuite/gcc.target/riscv/zvks-1.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gc_zvksed_zvksh_zvbb_zvkt" { target { rv64 } } } */
-/* { dg-options "-march=rv32gc_zvksed_zvksh_zvbb_zvkt" { target { rv32 } } } */
+/* { dg-options "-march=rv64gc_zvksed_zvksh_zvkb_zvkt" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zvksed_zvksh_zvkb_zvkt" { target { rv32 } } } */
#ifndef __riscv_zvks
#error "Feature macro for `Zvks' not defined"
@@ -14,8 +14,8 @@
#error "Feature macro for `Zvksh' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvks.c b/gcc/testsuite/gcc.target/riscv/zvks.c
index d31b261..2022d16 100644
--- a/gcc/testsuite/gcc.target/riscv/zvks.c
+++ b/gcc/testsuite/gcc.target/riscv/zvks.c
@@ -14,8 +14,8 @@
#error "Feature macro for `Zvksh' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvksc-1.c b/gcc/testsuite/gcc.target/riscv/zvksc-1.c
index 3b76e6c..0c81b4e 100644
--- a/gcc/testsuite/gcc.target/riscv/zvksc-1.c
+++ b/gcc/testsuite/gcc.target/riscv/zvksc-1.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gc_zvksed_zvksh_zvbb_zvkt_zvbc" { target { rv64 } } } */
-/* { dg-options "-march=rv32gc_zvksed_zvksh_zvbb_zvkt_zvbc" { target { rv32 } } } */
+/* { dg-options "-march=rv64gc_zvksed_zvksh_zvkb_zvkt_zvbc" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zvksed_zvksh_zvkb_zvkt_zvbc" { target { rv32 } } } */
#ifndef __riscv_zvksc
#error "Feature macro for `Zvksc' not defined"
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvksh' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvksc-2.c b/gcc/testsuite/gcc.target/riscv/zvksc-2.c
index b95e34e..0cee358 100644
--- a/gcc/testsuite/gcc.target/riscv/zvksc-2.c
+++ b/gcc/testsuite/gcc.target/riscv/zvksc-2.c
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvksh' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvksc.c b/gcc/testsuite/gcc.target/riscv/zvksc.c
index 983f1aa..6c33a86 100644
--- a/gcc/testsuite/gcc.target/riscv/zvksc.c
+++ b/gcc/testsuite/gcc.target/riscv/zvksc.c
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvksh' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvksg-1.c b/gcc/testsuite/gcc.target/riscv/zvksg-1.c
index e6b3055..a81c8fa 100644
--- a/gcc/testsuite/gcc.target/riscv/zvksg-1.c
+++ b/gcc/testsuite/gcc.target/riscv/zvksg-1.c
@@ -1,6 +1,6 @@
/* { dg-do compile } */
-/* { dg-options "-march=rv64gc_zvksed_zvksh_zvbb_zvkt_zvkg" { target { rv64 } } } */
-/* { dg-options "-march=rv32gc_zvksed_zvksh_zvbb_zvkt_zvkg" { target { rv32 } } } */
+/* { dg-options "-march=rv64gc_zvksed_zvksh_zvkb_zvkt_zvkg" { target { rv64 } } } */
+/* { dg-options "-march=rv32gc_zvksed_zvksh_zvkb_zvkt_zvkg" { target { rv32 } } } */
#ifndef __riscv_zvksg
#error "Feature macro for `Zvksg' not defined"
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvksh' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `ZvKb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvksg-2.c b/gcc/testsuite/gcc.target/riscv/zvksg-2.c
index 2475e8b..f6aa06b 100644
--- a/gcc/testsuite/gcc.target/riscv/zvksg-2.c
+++ b/gcc/testsuite/gcc.target/riscv/zvksg-2.c
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvksh' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/riscv/zvksg.c b/gcc/testsuite/gcc.target/riscv/zvksg.c
index 4db9b5d..af2cd6e 100644
--- a/gcc/testsuite/gcc.target/riscv/zvksg.c
+++ b/gcc/testsuite/gcc.target/riscv/zvksg.c
@@ -18,8 +18,8 @@
#error "Feature macro for `Zvksh' not defined"
#endif
-#ifndef __riscv_zvbb
-#error "Feature macro for `Zvbb' not defined"
+#ifndef __riscv_zvkb
+#error "Feature macro for `Zvkb' not defined"
#endif
#ifndef __riscv_zvkt
diff --git a/gcc/testsuite/gcc.target/s390/pr112753.c b/gcc/testsuite/gcc.target/s390/pr112753.c
new file mode 100644
index 0000000..7183b3f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/pr112753.c
@@ -0,0 +1,8 @@
+/* This caused an ICE on s390x due to a bug in s390_md_asm_adjust when no
+ vector extension is available. */
+
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=zEC12" } */
+
+long double ____strtold_l_internal___x;
+void ____strtold_l_internal() { __asm__("" : : "fm"(____strtold_l_internal___x)); }
diff --git a/gcc/testsuite/gcc.target/x86_64/abi/avx512fp16/m256h/test_passing_m256.c b/gcc/testsuite/gcc.target/x86_64/abi/avx512fp16/m256h/test_passing_m256.c
index bfa80d6..6138eee 100644
--- a/gcc/testsuite/gcc.target/x86_64/abi/avx512fp16/m256h/test_passing_m256.c
+++ b/gcc/testsuite/gcc.target/x86_64/abi/avx512fp16/m256h/test_passing_m256.c
@@ -25,6 +25,7 @@ int failed = 0;
assert (memcmp (&X1, &X2, sizeof (T)) == 0); \
} while (0)
+void
fun_check_passing_m256_8_values (__m256 i0 ATTRIBUTE_UNUSED,
__m256 i1 ATTRIBUTE_UNUSED,
__m256 i2 ATTRIBUTE_UNUSED,
@@ -45,6 +46,7 @@ fun_check_passing_m256_8_values (__m256 i0 ATTRIBUTE_UNUSED,
compare (values.i7, i7, __m256);
}
+void
fun_check_passing_m256h_8_values (__m256h i0 ATTRIBUTE_UNUSED,
__m256h i1 ATTRIBUTE_UNUSED,
__m256h i2 ATTRIBUTE_UNUSED,
diff --git a/gcc/testsuite/gcc.target/x86_64/abi/avx512fp16/m512h/test_passing_m512.c b/gcc/testsuite/gcc.target/x86_64/abi/avx512fp16/m512h/test_passing_m512.c
index ad5ba2e..7c5f03b 100644
--- a/gcc/testsuite/gcc.target/x86_64/abi/avx512fp16/m512h/test_passing_m512.c
+++ b/gcc/testsuite/gcc.target/x86_64/abi/avx512fp16/m512h/test_passing_m512.c
@@ -25,6 +25,7 @@ int failed = 0;
assert (memcmp (&X1, &X2, sizeof (T)) == 0); \
} while (0)
+void
fun_check_passing_m512_8_values (__m512 i0 ATTRIBUTE_UNUSED,
__m512 i1 ATTRIBUTE_UNUSED,
__m512 i2 ATTRIBUTE_UNUSED,
@@ -45,6 +46,7 @@ fun_check_passing_m512_8_values (__m512 i0 ATTRIBUTE_UNUSED,
compare (values.i7, i7, __m512);
}
+void
fun_check_passing_m512h_8_values (__m512h i0 ATTRIBUTE_UNUSED,
__m512h i1 ATTRIBUTE_UNUSED,
__m512h i2 ATTRIBUTE_UNUSED,
diff --git a/gcc/testsuite/gfortran.dg/asan/pr110415-2.f90 b/gcc/testsuite/gfortran.dg/asan/pr110415-2.f90
new file mode 100755
index 0000000..f4ff182
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/asan/pr110415-2.f90
@@ -0,0 +1,45 @@
+! { dg-do run }
+!
+! Contributed by Brad Richardson <everythingfunctional@protonmail.com>
+!
+implicit none
+ type, abstract :: p
+ integer :: a = 4
+ end type p
+
+ type, extends(p) :: c
+ integer :: b = 7
+ character(len=:), allocatable :: str, str2(:)
+ end type c
+
+ type, extends(p) :: d
+ integer :: ef = 7
+ end type d
+
+ class(p), allocatable :: a
+
+ a = func()
+
+ a = func2()
+
+ a = func()
+
+ deallocate(a)
+
+contains
+ function func2() result(a)
+ class(p), allocatable :: a
+ a = d()
+ end function func2
+
+ function func() result(a)
+ class(p), allocatable :: a
+
+ a = c()
+ select type(a)
+ type is (c)
+ a%str = 'abcd'
+ a%str2 = ['abcd','efgh']
+ end select
+ end function func
+end program
diff --git a/gcc/testsuite/gfortran.dg/asan/pr110415-3.f90 b/gcc/testsuite/gfortran.dg/asan/pr110415-3.f90
new file mode 100755
index 0000000..65c018d
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/asan/pr110415-3.f90
@@ -0,0 +1,49 @@
+! { dg-do run }
+!
+! Contributed by Brad Richardson <everythingfunctional@protonmail.com>
+!
+implicit none
+ type, abstract :: p
+ integer :: a = 4
+ end type p
+
+ type, extends(p) :: c
+ integer :: b = 7
+ character(len=:), allocatable :: str, str2(:)
+ end type c
+
+ type, extends(p) :: d
+ integer :: ef = 7
+ end type d
+
+ class(p), allocatable :: a(:)
+
+ a = func()
+
+ a = func2()
+
+ a = func()
+
+ deallocate(a)
+
+contains
+ function func2() result(a)
+ class(p), allocatable :: a(:)
+ a = [d(),d()]
+ end function func2
+
+ function func() result(a)
+ class(p), allocatable :: a(:)
+
+ a = [c(),c(),c()]
+ select type(a)
+ type is (c)
+ a(1)%str = 'abcd'
+ a(2)%str = 'abc'
+ a(3)%str = 'abcd4'
+ a(1)%str2 = ['abcd','efgh']
+ a(2)%str2 = ['bcd','fgh']
+ a(3)%str2 = ['abcd6','efgh7']
+ end select
+ end function func
+end program
diff --git a/gcc/testsuite/gfortran.dg/associate_62.f90 b/gcc/testsuite/gfortran.dg/associate_62.f90
new file mode 100644
index 0000000..ce5bf28
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/associate_62.f90
@@ -0,0 +1,25 @@
+! { dg-do compile }
+! PR fortran/112764
+! Contributed by martin <mscfd@gmx.net>
+
+program assoc_target
+ implicit none
+ integer, dimension(:,:), pointer :: x
+ integer, pointer :: j
+ integer, allocatable, target :: z(:)
+ allocate (x(1:100,1:2), source=1)
+ associate (i1 => x(:,1))
+ j => i1(1)
+ print *, j
+ if (j /= 1) stop 1
+ end associate
+ deallocate (x)
+ allocate (z(3))
+ z(:) = [1,2,3]
+ associate (i2 => z(2:3))
+ j => i2(1)
+ print *, j
+ if (j /= 2) stop 2
+ end associate
+ deallocate (z)
+end program assoc_target
diff --git a/gcc/testsuite/gfortran.dg/coarray_poly_6.f90 b/gcc/testsuite/gfortran.dg/coarray_poly_6.f90
index 53b80e4..344e12b 100644
--- a/gcc/testsuite/gfortran.dg/coarray_poly_6.f90
+++ b/gcc/testsuite/gfortran.dg/coarray_poly_6.f90
@@ -16,6 +16,6 @@ contains
end subroutine foo
end
! { dg-final { scan-tree-dump-times "foo \\(struct __class_MAIN___T_0_1t & restrict x, void \\* restrict caf_token.., integer\\(kind=\[48\]\\) caf_offset..\\)" 1 "original" } }
-! { dg-final { scan-tree-dump-times "bar \\(struct __class_MAIN___T_0_1t \\* x, void \\* restrict caf_token.., integer\\(kind=\[48\]\\) caf_offset..\\)" 1 "original" } }
+! { dg-final { scan-tree-dump-times "bar \\(struct __class_MAIN___T_0_1t \\* restrict x, void \\* restrict caf_token.., integer\\(kind=\[48\]\\) caf_offset..\\)" 1 "original" } }
! { dg-final { scan-tree-dump-times "bar \\(0B, 0B, 0\\);" 1 "original" } }
! { dg-final { scan-tree-dump-times "foo \\(&class.., y._data.token, \\(integer\\(kind=\[48\]\\)\\) class..._data.data - \\(integer\\(kind=\[48\]\\)\\) y._data.data\\);" 1 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/coarray_poly_7.f90 b/gcc/testsuite/gfortran.dg/coarray_poly_7.f90
index 44f98e1..d8d83ae 100644
--- a/gcc/testsuite/gfortran.dg/coarray_poly_7.f90
+++ b/gcc/testsuite/gfortran.dg/coarray_poly_7.f90
@@ -16,6 +16,6 @@ contains
end subroutine foo
end
! { dg-final { scan-tree-dump-times "foo \\(struct __class_MAIN___T_1_1t & restrict x, void \\* restrict caf_token.., integer\\(kind=\[48\]\\) caf_offset..\\)" 1 "original" } }
-! { dg-final { scan-tree-dump-times "bar \\(struct __class_MAIN___T_1_1t \\* x, void \\* restrict caf_token.., integer\\(kind=\[48\]\\) caf_offset..\\)" 1 "original" } }
+! { dg-final { scan-tree-dump-times "bar \\(struct __class_MAIN___T_1_1t \\* restrict x, void \\* restrict caf_token.., integer\\(kind=\[48\]\\) caf_offset..\\)" 1 "original" } }
! { dg-final { scan-tree-dump-times "bar \\(0B, 0B, 0\\);" 1 "original" } }
! { dg-final { scan-tree-dump-times "foo \\(&class.., y._data.token, \\(integer\\(kind=\[48\]\\)\\) class..._data.data - \\(integer\\(kind=\[48\]\\)\\) y._data.data\\);" 1 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/coarray_poly_8.f90 b/gcc/testsuite/gfortran.dg/coarray_poly_8.f90
index cac305f..abdfc0c 100644
--- a/gcc/testsuite/gfortran.dg/coarray_poly_8.f90
+++ b/gcc/testsuite/gfortran.dg/coarray_poly_8.f90
@@ -16,6 +16,6 @@ contains
end subroutine foo
end
! { dg-final { scan-tree-dump-times "foo \\(struct __class_MAIN___T_1_1t & restrict x, void \\* restrict caf_token.., integer\\(kind=\[48\]\\) caf_offset..\\)" 1 "original" } }
-! { dg-final { scan-tree-dump-times "bar \\(struct __class_MAIN___T_1_1t \\* x, void \\* restrict caf_token.., integer\\(kind=\[48\]\\) caf_offset..\\)" 1 "original" } }
+! { dg-final { scan-tree-dump-times "bar \\(struct __class_MAIN___T_1_1t \\* restrict x, void \\* restrict caf_token.., integer\\(kind=\[48\]\\) caf_offset..\\)" 1 "original" } }
! { dg-final { scan-tree-dump-times "bar \\(0B, 0B, 0\\);" 1 "original" } }
! { dg-final { scan-tree-dump-times "foo \\(&class.., y._data.token, \\(integer\\(kind=\[48\]\\)\\) class..._data.data - \\(integer\\(kind=\[48\]\\)\\) y._data.data\\);" 1 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/missing_optional_dummy_6a.f90 b/gcc/testsuite/gfortran.dg/missing_optional_dummy_6a.f90
index c08c97a..c6a7905 100644
--- a/gcc/testsuite/gfortran.dg/missing_optional_dummy_6a.f90
+++ b/gcc/testsuite/gfortran.dg/missing_optional_dummy_6a.f90
@@ -47,7 +47,7 @@ contains
end program test
-! { dg-final { scan-tree-dump-times "scalar2 \\(slr1" 1 "original" } }
+! { dg-final { scan-tree-dump-times "scalar2 \\(.* slr1" 1 "original" } }
! { dg-final { scan-tree-dump-times "= es1 != 0B" 1 "original" } }
! { dg-final { scan-tree-dump-times "assumed_shape2 \\(es1" 0 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/missing_optional_dummy_7.f90 b/gcc/testsuite/gfortran.dg/missing_optional_dummy_7.f90
new file mode 100644
index 0000000..ad9ecd8
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/missing_optional_dummy_7.f90
@@ -0,0 +1,64 @@
+! { dg-do run }
+! PR fortran/112772 - test absent OPTIONAL, ALLOCATABLE/POINTER class dummies
+
+program main
+ implicit none
+ type t
+ end type t
+ call test_c_a ()
+ call test_u_a ()
+ call test_c_p ()
+ call test_u_p ()
+contains
+ ! class, allocatable
+ subroutine test_c_a (msg1)
+ class(t), optional, allocatable :: msg1(:)
+ if (present (msg1)) stop 1
+ call assert_c_a ()
+ call assert_c_a (msg1)
+ end
+
+ subroutine assert_c_a (msg2)
+ class(t), optional, allocatable :: msg2(:)
+ if (present (msg2)) stop 2
+ end
+
+ ! unlimited polymorphic, allocatable
+ subroutine test_u_a (msg1)
+ class(*), optional, allocatable :: msg1(:)
+ if (present (msg1)) stop 3
+ call assert_u_a ()
+ call assert_u_a (msg1)
+ end
+
+ subroutine assert_u_a (msg2)
+ class(*), optional, allocatable :: msg2(:)
+ if (present (msg2)) stop 4
+ end
+
+ ! class, pointer
+ subroutine test_c_p (msg1)
+ class(t), optional, pointer :: msg1(:)
+ if (present (msg1)) stop 5
+ call assert_c_p ()
+ call assert_c_p (msg1)
+ end
+
+ subroutine assert_c_p (msg2)
+ class(t), optional, pointer :: msg2(:)
+ if (present (msg2)) stop 6
+ end
+
+ ! unlimited polymorphic, pointer
+ subroutine test_u_p (msg1)
+ class(*), optional, pointer :: msg1(:)
+ if (present (msg1)) stop 7
+ call assert_u_p ()
+ call assert_u_p (msg1)
+ end
+
+ subroutine assert_u_p (msg2)
+ class(*), optional, pointer :: msg2(:)
+ if (present (msg2)) stop 8
+ end
+end
diff --git a/gcc/testsuite/gfortran.dg/optional_deferred_char_1.f90 b/gcc/testsuite/gfortran.dg/optional_deferred_char_1.f90
new file mode 100644
index 0000000..d399dd1
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/optional_deferred_char_1.f90
@@ -0,0 +1,100 @@
+! { dg-do run }
+! PR fortran/93762
+! PR fortran/100651 - deferred-length character as optional dummy argument
+
+program main
+ implicit none
+ character(:), allocatable :: err_msg, msg3(:)
+ character(:), pointer :: err_msg2 => NULL()
+
+ ! Subroutines with optional arguments
+ call to_int ()
+ call to_int_p ()
+ call test_rank1 ()
+ call assert_code ()
+ call assert_p ()
+ call assert_rank1 ()
+
+ ! Test passing of optional arguments
+ call to_int (err_msg)
+ if (.not. allocated (err_msg)) stop 1
+ if (len (err_msg) /= 7) stop 2
+ if (err_msg(1:7) /= "foo bar") stop 3
+
+ call to_int2 (err_msg)
+ if (.not. allocated (err_msg)) stop 4
+ if (len (err_msg) /= 7) stop 5
+ if (err_msg(1:7) /= "foo bar") stop 6
+ deallocate (err_msg)
+
+ call to_int_p (err_msg2)
+ if (.not. associated (err_msg2)) stop 11
+ if (len (err_msg2) /= 8) stop 12
+ if (err_msg2(1:8) /= "poo bla ") stop 13
+ deallocate (err_msg2)
+
+ call to_int2_p (err_msg2)
+ if (.not. associated (err_msg2)) stop 14
+ if (len (err_msg2) /= 8) stop 15
+ if (err_msg2(1:8) /= "poo bla ") stop 16
+ deallocate (err_msg2)
+
+ call test_rank1 (msg3)
+ if (.not. allocated (msg3)) stop 21
+ if (len (msg3) /= 2) stop 22
+ if (size (msg3) /= 42) stop 23
+ if (any (msg3 /= "ok")) stop 24
+ deallocate (msg3)
+
+contains
+
+ ! Deferred-length character, allocatable:
+ subroutine assert_code (err_msg0)
+ character(:), optional, allocatable :: err_msg0
+ if (present (err_msg0)) err_msg0 = 'foo bar'
+ end
+ ! Test: optional argument
+ subroutine to_int (err_msg1)
+ character(:), optional, allocatable :: err_msg1
+ call assert_code (err_msg1)
+ end
+ ! Control: non-optional argument
+ subroutine to_int2 (err_msg2)
+ character(:), allocatable :: err_msg2
+ call assert_code (err_msg2)
+ end
+
+ ! Rank-1:
+ subroutine assert_rank1 (msg)
+ character(:), optional, allocatable, intent(out) :: msg(:)
+ if (present (msg)) then
+ allocate (character(2) :: msg(42))
+ msg(:) = "ok"
+ end if
+ end
+
+ subroutine test_rank1 (msg1)
+ character(:), optional, allocatable, intent(out) :: msg1(:)
+ call assert_rank1 (msg1)
+ end
+
+ ! Deferred-length character, pointer:
+ subroutine assert_p (err_msg0)
+ character(:), optional, pointer :: err_msg0
+ if (present (err_msg0)) then
+ if (associated (err_msg0)) deallocate (err_msg0)
+ allocate (character(8) :: err_msg0)
+ err_msg0 = 'poo bla'
+ end if
+ end
+
+ subroutine to_int_p (err_msg1)
+ character(:), optional, pointer :: err_msg1
+ call assert_p (err_msg1)
+ end
+
+ subroutine to_int2_p (err_msg2)
+ character(:), pointer :: err_msg2
+ call assert_p (err_msg2)
+ end
+end
diff --git a/gcc/testsuite/gfortran.dg/pr100988.f90 b/gcc/testsuite/gfortran.dg/pr100988.f90
new file mode 100644
index 0000000..b7e1ae4
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr100988.f90
@@ -0,0 +1,61 @@
+! { dg-do compile }
+! { dg-options "-fdump-tree-original" }
+! PR fortran/100988 - RESTRICT was missing for optional arguments
+
+ ! There should be restrict qualifiers for a AND b: (4 cases)
+ subroutine plain (a, b)
+ integer :: a, b
+ optional :: b
+ end subroutine
+
+ subroutine alloc (a, b)
+ integer :: a, b
+ allocatable :: a, b
+ optional :: b
+ end subroutine
+
+ subroutine upoly (a, b)
+ class(*) :: a, b
+ optional :: b
+ end subroutine
+
+ subroutine upoly_a (a, b)
+ class(*) :: a, b
+ allocatable :: a, b
+ optional :: b
+ end subroutine
+
+! { dg-final { scan-tree-dump "plain .* restrict a, .* restrict b\\)" "original" } }
+! { dg-final { scan-tree-dump "alloc .* restrict a, .* restrict b\\)" "original" } }
+! { dg-final { scan-tree-dump "upoly .* restrict a, .* restrict b\\)" "original" } }
+! { dg-final { scan-tree-dump "upoly_a .* restrict a, .* restrict b\\)" "original" } }
+
+ ! There should be no restrict qualifiers for the below 4 cases:
+ subroutine ptr (a, b)
+ integer :: a, b
+ pointer :: a, b
+ optional :: b
+ end subroutine
+
+ subroutine tgt (a, b)
+ integer :: a, b
+ target :: a, b
+ optional :: b
+ end subroutine
+
+ subroutine upoly_p (a, b)
+ class(*) :: a, b
+ pointer :: a, b
+ optional :: b
+ end subroutine
+
+ subroutine upoly_t (a, b)
+ class(*) :: a, b
+ target :: a, b
+ optional :: b
+ end subroutine
+
+! { dg-final { scan-tree-dump-not "ptr .* restrict " "original" } }
+! { dg-final { scan-tree-dump-not "tgt .* restrict " "original" } }
+! { dg-final { scan-tree-dump-not "upoly_p .* restrict " "original" } }
+! { dg-final { scan-tree-dump-not "upoly_t .* restrict " "original" } }
diff --git a/gcc/testsuite/gfortran.dg/pr110415.f90 b/gcc/testsuite/gfortran.dg/pr110415.f90
new file mode 100644
index 0000000..f647cc4
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr110415.f90
@@ -0,0 +1,20 @@
+! { dg-do run }
+!
+! Contributed by Brad Richardson <everythingfunctional@protonmail.com>
+!
+ type, abstract :: p
+ end type p
+
+ type, extends(p) :: c
+ end type c
+
+ class(p), allocatable :: a
+
+ a = func()
+contains
+ function func() result(a)
+ class(p), allocatable :: a
+
+ a = c()
+ end function func
+end program
diff --git a/gcc/testsuite/gm2/link/externalscaffold/pass/scaffold.c b/gcc/testsuite/gm2/link/externalscaffold/pass/scaffold.c
index 2bd3587..2df0368 100644
--- a/gcc/testsuite/gm2/link/externalscaffold/pass/scaffold.c
+++ b/gcc/testsuite/gm2/link/externalscaffold/pass/scaffold.c
@@ -6,6 +6,7 @@ extern void m2pim_M2_M2RTS_init (int argc, char *argv[]);
extern void m2pim_M2_M2RTS_fini (void);
extern void m2pim_M2_RTExceptions_init (int argc, char *argv[]);
extern void m2pim_M2_RTExceptions_fini (void);
+extern void m2pim_M2RTS_Terminate (void);
extern void _M2_hello_init (int argc, char *argv[]);
extern void _M2_hello_fini (void);
diff --git a/gcc/testsuite/gnat.dg/strub_access.adb b/gcc/testsuite/gnat.dg/strub_access.adb
new file mode 100644
index 0000000..29e6996
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_access.adb
@@ -0,0 +1,21 @@
+-- { dg-do compile }
+-- { dg-options "-fstrub=relaxed -fdump-ipa-strubm" }
+
+-- The main subprogram doesn't read from the automatic variable, but
+-- being an automatic variable, its presence should be enough for the
+-- procedure to get strub enabled.
+
+procedure Strub_Access is
+ type Strub_Int is new Integer;
+ pragma Machine_Attribute (Strub_Int, "strub");
+
+ X : aliased Strub_Int := 0;
+
+ function F (P : access Strub_Int) return Strub_Int is (P.all);
+
+begin
+ X := F (X'Access);
+end Strub_Access;
+
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]internal\[)\]\[)\]" 1 "strubm" } }
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]at-calls-opt\[)\]\[)\]" 1 "strubm" } }
diff --git a/gcc/testsuite/gnat.dg/strub_access1.adb b/gcc/testsuite/gnat.dg/strub_access1.adb
new file mode 100644
index 0000000..dae4706
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_access1.adb
@@ -0,0 +1,16 @@
+-- { dg-do compile }
+-- { dg-options "-fstrub=relaxed" }
+
+-- Check that we reject 'Access of a strub variable whose type does
+-- not carry a strub modifier.
+
+procedure Strub_Access1 is
+ X : aliased Integer := 0;
+ pragma Machine_Attribute (X, "strub");
+
+ function F (P : access Integer) return Integer is (P.all);
+
+begin
+ X := F (X'Unchecked_access); -- OK.
+ X := F (X'Access); -- { dg-error "target access type drops .strub. mode" }
+end Strub_Access1;
diff --git a/gcc/testsuite/gnat.dg/strub_attr.adb b/gcc/testsuite/gnat.dg/strub_attr.adb
new file mode 100644
index 0000000..10445d7
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_attr.adb
@@ -0,0 +1,37 @@
+-- { dg-do compile }
+-- { dg-options "-fstrub=strict -fdump-ipa-strubm -fdump-ipa-strub" }
+
+package body Strub_Attr is
+ E : exception;
+
+ procedure P (X : Integer) is
+ begin
+ raise E;
+ end;
+
+ function F (X : Integer) return Integer is
+ begin
+ return X * X;
+ end;
+
+ function G return Integer is (F (X));
+ -- function G return Integer is (FP (X));
+ -- Calling G would likely raise an exception, because although FP
+ -- carries the strub at-calls attribute needed to call F, the
+ -- attribute is dropped from the type used for the call proper.
+end Strub_Attr;
+
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]internal\[)\]\[)\]" 2 "strubm" } }
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]at-calls\[)\]\[)\]" 0 "strubm" } }
+-- { dg-final { scan-ipa-dump-times "\[(\]strub\[)\]" 1 "strubm" } }
+
+-- { dg-final { scan-ipa-dump-times "strub.watermark_ptr" 6 "strub" } }
+-- We have 1 at-calls subprogram (F) and 2 wrapped (P and G).
+-- For each of them, there's one match for the wrapped signature,
+-- and one for the update call.
+
+-- { dg-final { scan-ipa-dump-times "strub.watermark" 27 "strub" } }
+-- The 6 matches above, plus:
+-- 5*2: wm var decl, enter, call, leave and clobber for each wrapper;
+-- 2*1: an extra leave and clobber for the exception paths in the wrappers.
+-- 7*1: for the F call in G, including EH path.
diff --git a/gcc/testsuite/gnat.dg/strub_attr.ads b/gcc/testsuite/gnat.dg/strub_attr.ads
new file mode 100644
index 0000000..a94c23b
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_attr.ads
@@ -0,0 +1,12 @@
+package Strub_Attr is
+ procedure P (X : Integer);
+ pragma Machine_Attribute (P, "strub", "internal");
+
+ function F (X : Integer) return Integer;
+ pragma Machine_Attribute (F, "strub");
+
+ X : Integer := 0;
+ pragma Machine_Attribute (X, "strub");
+
+ function G return Integer;
+end Strub_Attr;
diff --git a/gcc/testsuite/gnat.dg/strub_disp.adb b/gcc/testsuite/gnat.dg/strub_disp.adb
new file mode 100644
index 0000000..3dbcc4a
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_disp.adb
@@ -0,0 +1,64 @@
+-- { dg-do compile }
+
+procedure Strub_Disp is
+ package Foo is
+ type A is tagged null record;
+
+ procedure P (I : Integer; X : A);
+ pragma Machine_Attribute (P, "strub", "at-calls");
+
+ function F (X : access A) return Integer;
+
+ type B is new A with null record;
+
+ overriding
+ procedure P (I : Integer; X : B); -- { dg-error "requires the same .strub. mode" }
+
+ overriding
+ function F (X : access B) return Integer;
+ pragma Machine_Attribute (F, "strub", "at-calls"); -- { dg-error "requires the same .strub. mode" }
+
+ end Foo;
+
+ package body Foo is
+ procedure P (I : Integer; X : A) is
+ begin
+ null;
+ end;
+
+ function F (X : access A) return Integer is (0);
+
+ overriding
+ procedure P (I : Integer; X : B) is
+ begin
+ P (I, A (X));
+ end;
+
+ overriding
+ function F (X : access B) return Integer is (1);
+ end Foo;
+
+ use Foo;
+
+ procedure Q (X : A'Class) is
+ begin
+ P (-1, X);
+ end;
+
+ XA : aliased A;
+ XB : aliased B;
+ I : Integer := 0;
+ XC : access A'Class;
+begin
+ Q (XA);
+ Q (XB);
+
+ I := I + F (XA'Access);
+ I := I + F (XB'Access);
+
+ XC := XA'Access;
+ I := I + F (XC);
+
+ XC := XB'Access;
+ I := I + F (XC);
+end Strub_Disp;
diff --git a/gcc/testsuite/gnat.dg/strub_disp1.adb b/gcc/testsuite/gnat.dg/strub_disp1.adb
new file mode 100644
index 0000000..09756a7
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_disp1.adb
@@ -0,0 +1,79 @@
+-- { dg-do compile }
+-- { dg-options "-fdump-ipa-strub" }
+
+-- Check that at-calls dispatching calls are transformed.
+
+procedure Strub_Disp1 is
+ package Foo is
+ type A is tagged null record;
+
+ procedure P (I : Integer; X : A);
+ pragma Machine_Attribute (P, "strub", "at-calls");
+
+ function F (X : access A) return Integer;
+ pragma Machine_Attribute (F, "strub", "at-calls");
+
+ type B is new A with null record;
+
+ overriding
+ procedure P (I : Integer; X : B);
+ pragma Machine_Attribute (P, "strub", "at-calls");
+
+ overriding
+ function F (X : access B) return Integer;
+ pragma Machine_Attribute (F, "strub", "at-calls");
+
+ end Foo;
+
+ package body Foo is
+ procedure P (I : Integer; X : A) is
+ begin
+ null;
+ end;
+
+ function F (X : access A) return Integer is (0);
+
+ overriding
+ procedure P (I : Integer; X : B) is
+ begin
+ P (I, A (X)); -- strub-at-calls non-dispatching call
+ end;
+
+ overriding
+ function F (X : access B) return Integer is (1);
+ end Foo;
+
+ use Foo;
+
+ procedure Q (X : A'Class) is
+ begin
+ P (-1, X); -- strub-at-calls dispatching call.
+ end;
+
+ XA : aliased A;
+ XB : aliased B;
+ I : Integer := 0;
+ XC : access A'Class;
+begin
+ Q (XA);
+ Q (XB);
+
+ I := I + F (XA'Access); -- strub-at-calls non-dispatching call
+ I := I + F (XB'Access); -- strub-at-calls non-dispatching call
+
+ XC := XA'Access;
+ I := I + F (XC); -- strub-at-calls dispatching call.
+
+ XC := XB'Access;
+ I := I + F (XC); -- strub-at-calls dispatching call.
+end Strub_Disp1;
+
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]at-calls\[)\]\[)\]" 4 "strub" } }
+
+-- Count the strub-at-calls non-dispatching calls
+-- (+ 2 each, for the matching prototypes)
+-- { dg-final { scan-ipa-dump-times "foo\.p \[(\]\[^\n\]*watermark" 3 "strub" } }
+-- { dg-final { scan-ipa-dump-times "foo\.f \[(\]\[^\n\]*watermark" 4 "strub" } }
+
+-- Count the strub-at-calls dispatching calls.
+-- { dg-final { scan-ipa-dump-times "_\[0-9\]* \[(\]\[^\n\]*watermark" 3 "strub" } }
diff --git a/gcc/testsuite/gnat.dg/strub_ind.adb b/gcc/testsuite/gnat.dg/strub_ind.adb
new file mode 100644
index 0000000..da56aca
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_ind.adb
@@ -0,0 +1,33 @@
+-- { dg-do compile }
+-- { dg-options "-fstrub=strict" }
+
+-- This is essentially the same test as strub_attr.adb,
+-- but applying attributes to access types as well.
+-- That doesn't quite work yet, so we get an error we shouldn't get.
+
+package body Strub_Ind is
+ E : exception;
+
+ function G return Integer;
+
+ procedure P (X : Integer) is
+ begin
+ raise E;
+ end;
+
+ function F (X : Integer) return Integer is
+ begin
+ return X * X;
+ end;
+
+ function G return Integer is (FP (X));
+
+ type GT is access function return Integer;
+
+ type GT_SAC is access function return Integer;
+ pragma Machine_Attribute (GT_SAC, "strub", "at-calls");
+
+ GP : GT_SAC := GT_SAC (GT'(G'Access)); -- { dg-error "incompatible" }
+ -- pragma Machine_Attribute (GP, "strub", "at-calls");
+
+end Strub_Ind;
diff --git a/gcc/testsuite/gnat.dg/strub_ind.ads b/gcc/testsuite/gnat.dg/strub_ind.ads
new file mode 100644
index 0000000..99a65fc
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_ind.ads
@@ -0,0 +1,17 @@
+package Strub_Ind is
+ procedure P (X : Integer);
+ pragma Machine_Attribute (P, "strub", "internal");
+
+ function F (X : Integer) return Integer;
+ pragma Machine_Attribute (F, "strub");
+
+ X : Integer := 0;
+ pragma Machine_Attribute (X, "strub");
+
+ type FT is access function (X : Integer) return Integer;
+ pragma Machine_Attribute (FT, "strub", "at-calls");
+
+ FP : FT := F'Access;
+ -- pragma Machine_Attribute (FP, "strub", "at-calls"); -- not needed
+
+end Strub_Ind;
diff --git a/gcc/testsuite/gnat.dg/strub_ind1.adb b/gcc/testsuite/gnat.dg/strub_ind1.adb
new file mode 100644
index 0000000..825e395
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_ind1.adb
@@ -0,0 +1,41 @@
+-- { dg-do compile }
+-- { dg-options "-fstrub=strict -fdump-ipa-strubm" }
+
+-- This is essentially the same test as strub_attr.adb,
+-- but with an explicit conversion.
+
+package body Strub_Ind1 is
+ E : exception;
+
+ type Strub_Int is New Integer;
+ pragma Machine_Attribute (Strub_Int, "strub");
+
+ function G return Integer;
+ pragma Machine_Attribute (G, "strub", "disabled");
+
+ procedure P (X : Integer) is
+ begin
+ raise E;
+ end;
+
+ function G return Integer is (FP (X));
+
+ type GT is access function return Integer;
+ pragma Machine_Attribute (GT, "strub", "disabled");
+
+ type GT_SC is access function return Integer;
+ pragma Machine_Attribute (GT_SC, "strub", "callable");
+
+ GP : GT_SC := GT_SC (GT'(G'Access));
+ -- pragma Machine_Attribute (GP, "strub", "callable"); -- not needed.
+
+ function F (X : Integer) return Integer is
+ begin
+ return X * GP.all;
+ end;
+
+end Strub_Ind1;
+
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]disabled\[)\]\[)\]" 1 "strubm" } }
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]internal\[)\]\[)\]" 1 "strubm" } }
+-- { dg-final { scan-ipa-dump-times "\[(\]strub\[)\]" 1 "strubm" } }
diff --git a/gcc/testsuite/gnat.dg/strub_ind1.ads b/gcc/testsuite/gnat.dg/strub_ind1.ads
new file mode 100644
index 0000000..d3f1273
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_ind1.ads
@@ -0,0 +1,17 @@
+package Strub_Ind1 is
+ procedure P (X : Integer);
+ pragma Machine_Attribute (P, "strub", "internal");
+
+ function F (X : Integer) return Integer;
+ pragma Machine_Attribute (F, "strub");
+
+ X : aliased Integer := 0;
+ pragma Machine_Attribute (X, "strub");
+
+ type FT is access function (X : Integer) return Integer;
+ pragma Machine_Attribute (FT, "strub", "at-calls");
+
+ FP : FT := F'Access;
+ pragma Machine_Attribute (FP, "strub", "at-calls");
+
+end Strub_Ind1;
diff --git a/gcc/testsuite/gnat.dg/strub_ind2.adb b/gcc/testsuite/gnat.dg/strub_ind2.adb
new file mode 100644
index 0000000..e918b39
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_ind2.adb
@@ -0,0 +1,34 @@
+-- { dg-do compile }
+-- { dg-options "-fstrub=strict" }
+
+-- This is essentially the same test as strub_attr.adb,
+-- but with an explicit conversion.
+
+package body Strub_Ind2 is
+ E : exception;
+
+ function G return Integer;
+ pragma Machine_Attribute (G, "strub", "callable");
+
+ procedure P (X : Integer) is
+ begin
+ raise E;
+ end;
+
+ function G return Integer is (FP (X));
+
+ type GT is access function return Integer;
+ pragma Machine_Attribute (GT, "strub", "callable");
+
+ type GT_SD is access function return Integer;
+ pragma Machine_Attribute (GT_SD, "strub", "disabled");
+
+ GP : GT_SD := GT_SD (GT'(G'Access));
+ -- pragma Machine_Attribute (GP, "strub", "disabled"); -- not needed.
+
+ function F (X : Integer) return Integer is
+ begin
+ return X * GP.all; -- { dg-error "using non-.strub. type" }
+ end;
+
+end Strub_Ind2;
diff --git a/gcc/testsuite/gnat.dg/strub_ind2.ads b/gcc/testsuite/gnat.dg/strub_ind2.ads
new file mode 100644
index 0000000..e13865e
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_ind2.ads
@@ -0,0 +1,17 @@
+package Strub_Ind2 is
+ procedure P (X : Integer);
+ pragma Machine_Attribute (P, "strub", "internal");
+
+ function F (X : Integer) return Integer;
+ pragma Machine_Attribute (F, "strub");
+
+ X : Integer := 0;
+ pragma Machine_Attribute (X, "strub");
+
+ type FT is access function (X : Integer) return Integer;
+ pragma Machine_Attribute (FT, "strub", "at-calls");
+
+ FP : FT := F'Access;
+ pragma Machine_Attribute (FP, "strub", "at-calls");
+
+end Strub_Ind2;
diff --git a/gcc/testsuite/gnat.dg/strub_intf.adb b/gcc/testsuite/gnat.dg/strub_intf.adb
new file mode 100644
index 0000000..8f0212a
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_intf.adb
@@ -0,0 +1,93 @@
+-- { dg-do compile }
+
+-- Check that strub mode mismatches between overrider and overridden
+-- subprograms are reported.
+
+procedure Strub_Intf is
+ package Foo is
+ type TP is interface;
+ procedure P (I : Integer; X : TP) is abstract;
+ pragma Machine_Attribute (P, "strub", "at-calls"); -- { dg-error "requires the same .strub. mode" }
+
+ type TF is interface;
+ function F (X : access TF) return Integer is abstract;
+
+ type TX is interface;
+ procedure P (I : Integer; X : TX) is abstract;
+
+ type TI is interface and TP and TF and TX;
+ -- When we freeze TI, we detect the mismatch between the
+ -- inherited P and another parent's P. Because TP appears
+ -- before TX, we inherit P from TP, and report the mismatch at
+ -- the pragma inherited from TP against TX's P. In contrast,
+ -- when we freeze TII below, since TX appears before TP, we
+ -- report the error at the line in which the inherited
+ -- subprogram is synthesized, namely the line below, against
+ -- the line of the pragma.
+
+ type TII is interface and TX and TP and TF; -- { dg-error "requires the same .strub. mode" }
+
+ function F (X : access TI) return Integer is abstract;
+ pragma Machine_Attribute (F, "strub", "at-calls"); -- { dg-error "requires the same .strub. mode" }
+
+ type A is new TI with null record;
+
+ procedure P (I : Integer; X : A);
+ pragma Machine_Attribute (P, "strub", "at-calls"); -- { dg-error "requires the same .strub. mode" }
+
+ function F (X : access A) return Integer; -- { dg-error "requires the same .strub. mode" }
+
+ type B is new TI with null record;
+
+ overriding
+ procedure P (I : Integer; X : B); -- { dg-error "requires the same .strub. mode" }
+
+ overriding
+ function F (X : access B) return Integer;
+ pragma Machine_Attribute (F, "strub", "at-calls"); -- { dg-error "requires the same .strub. mode" }
+
+ end Foo;
+
+ package body Foo is
+ procedure P (I : Integer; X : A) is
+ begin
+ null;
+ end;
+
+ function F (X : access A) return Integer is (0);
+
+ overriding
+ procedure P (I : Integer; X : B) is
+ begin
+ null;
+ end;
+
+ overriding
+ function F (X : access B) return Integer is (1);
+
+ end Foo;
+
+ use Foo;
+
+ procedure Q (X : TX'Class) is
+ begin
+ P (-1, X);
+ end;
+
+ XA : aliased A;
+ XB : aliased B;
+ I : Integer := 0;
+ XC : access TI'Class;
+begin
+ Q (XA);
+ Q (XB);
+
+ I := I + F (XA'Access);
+ I := I + F (XB'Access);
+
+ XC := XA'Access;
+ I := I + F (XC);
+
+ XC := XB'Access;
+ I := I + F (XC);
+end Strub_Intf;
diff --git a/gcc/testsuite/gnat.dg/strub_intf1.adb b/gcc/testsuite/gnat.dg/strub_intf1.adb
new file mode 100644
index 0000000..bf77321
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_intf1.adb
@@ -0,0 +1,86 @@
+-- { dg-do compile }
+-- { dg-options "-fdump-ipa-strub" }
+
+-- Check that at-calls dispatching calls to interfaces are transformed.
+
+procedure Strub_Intf1 is
+ package Foo is
+ type TX is Interface;
+ procedure P (I : Integer; X : TX) is abstract;
+ pragma Machine_Attribute (P, "strub", "at-calls");
+ function F (X : access TX) return Integer is abstract;
+ pragma Machine_Attribute (F, "strub", "at-calls");
+
+ type A is new TX with null record;
+
+ procedure P (I : Integer; X : A);
+ pragma Machine_Attribute (P, "strub", "at-calls");
+
+ function F (X : access A) return Integer;
+ pragma Machine_Attribute (F, "strub", "at-calls");
+
+ type B is new TX with null record;
+
+ overriding
+ procedure P (I : Integer; X : B);
+ pragma Machine_Attribute (P, "strub", "at-calls");
+
+ overriding
+ function F (X : access B) return Integer;
+ pragma Machine_Attribute (F, "strub", "at-calls");
+
+ end Foo;
+
+ package body Foo is
+ procedure P (I : Integer; X : A) is
+ begin
+ null;
+ end;
+
+ function F (X : access A) return Integer is (0);
+
+ overriding
+ procedure P (I : Integer; X : B) is
+ begin
+ null;
+ end;
+
+ overriding
+ function F (X : access B) return Integer is (1);
+
+ end Foo;
+
+ use Foo;
+
+ procedure Q (X : TX'Class) is
+ begin
+ P (-1, X);
+ end;
+
+ XA : aliased A;
+ XB : aliased B;
+ I : Integer := 0;
+ XC : access TX'Class;
+begin
+ Q (XA);
+ Q (XB);
+
+ I := I + F (XA'Access);
+ I := I + F (XB'Access);
+
+ XC := XA'Access;
+ I := I + F (XC);
+
+ XC := XB'Access;
+ I := I + F (XC);
+end Strub_Intf1;
+
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]at-calls\[)\]\[)\]" 4 "strub" } }
+
+-- Count the strub-at-calls non-dispatching calls
+-- (+ 2 each, for the matching prototypes)
+-- { dg-final { scan-ipa-dump-times "foo\.p \[(\]\[^\n\]*watermark" 2 "strub" } }
+-- { dg-final { scan-ipa-dump-times "foo\.f \[(\]\[^\n\]*watermark" 4 "strub" } }
+
+-- Count the strub-at-calls dispatching calls.
+-- { dg-final { scan-ipa-dump-times "_\[0-9\]* \[(\]\[^\n\]*watermark" 3 "strub" } }
diff --git a/gcc/testsuite/gnat.dg/strub_intf2.adb b/gcc/testsuite/gnat.dg/strub_intf2.adb
new file mode 100644
index 0000000..e8880db
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_intf2.adb
@@ -0,0 +1,55 @@
+-- { dg-do compile }
+
+-- Check that strub mode mismatches between overrider and overridden
+-- subprograms are reported even when the overriders for an
+-- interface's subprograms are inherited from a type that is not a
+-- descendent of the interface.
+
+procedure Strub_Intf2 is
+ package Foo is
+ type A is tagged null record;
+
+ procedure P (I : Integer; X : A);
+ pragma Machine_Attribute (P, "strub", "at-calls"); -- { dg-error "requires the same .strub. mode" }
+
+ function F (X : access A) return Integer;
+
+ type TX is Interface;
+
+ procedure P (I : Integer; X : TX) is abstract;
+
+ function F (X : access TX) return Integer is abstract;
+ pragma Machine_Attribute (F, "strub", "at-calls");
+
+ type B is new A and TX with null record; -- { dg-error "requires the same .strub. mode" }
+
+ end Foo;
+
+ package body Foo is
+ procedure P (I : Integer; X : A) is
+ begin
+ null;
+ end;
+
+ function F (X : access A) return Integer is (0);
+
+ end Foo;
+
+ use Foo;
+
+ procedure Q (X : TX'Class) is
+ begin
+ P (-1, X);
+ end;
+
+ XB : aliased B;
+ I : Integer := 0;
+ XC : access TX'Class;
+begin
+ Q (XB);
+
+ I := I + F (XB'Access);
+
+ XC := XB'Access;
+ I := I + F (XC);
+end Strub_Intf2;
diff --git a/gcc/testsuite/gnat.dg/strub_renm.adb b/gcc/testsuite/gnat.dg/strub_renm.adb
new file mode 100644
index 0000000..217367e
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_renm.adb
@@ -0,0 +1,21 @@
+-- { dg-do compile }
+
+procedure Strub_Renm is
+ procedure P (X : Integer);
+ pragma Machine_Attribute (P, "strub", "at-calls");
+
+ function F return Integer;
+ pragma Machine_Attribute (F, "strub", "internal");
+
+ procedure Q (X : Integer) renames P; -- { dg-error "requires the same .strub. mode" }
+
+ function G return Integer renames F;
+ pragma Machine_Attribute (G, "strub", "callable"); -- { dg-error "requires the same .strub. mode" }
+
+ procedure P (X : Integer) is null;
+ function F return Integer is (0);
+
+begin
+ P (F);
+ Q (G);
+end Strub_Renm;
diff --git a/gcc/testsuite/gnat.dg/strub_renm1.adb b/gcc/testsuite/gnat.dg/strub_renm1.adb
new file mode 100644
index 0000000..a11adbf
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_renm1.adb
@@ -0,0 +1,32 @@
+-- { dg-do compile }
+-- { dg-options "-fstrub=relaxed -fdump-ipa-strub" }
+
+procedure Strub_Renm1 is
+ V : Integer := 0;
+ pragma Machine_Attribute (V, "strub");
+
+ procedure P (X : Integer);
+ pragma Machine_Attribute (P, "strub", "at-calls");
+
+ function F return Integer;
+
+ procedure Q (X : Integer) renames P;
+ pragma Machine_Attribute (Q, "strub", "at-calls");
+
+ function G return Integer renames F;
+ pragma Machine_Attribute (G, "strub", "internal");
+
+ procedure P (X : Integer) is null;
+ function F return Integer is (0);
+
+begin
+ P (F);
+ Q (G);
+end Strub_Renm1;
+
+-- This is for P; Q is an alias.
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]at-calls\[)\]\[)\]" 1 "strub" } }
+
+-- This is *not* for G, but for Strub_Renm1.
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]wrapped\[)\]\[)\]" 1 "strub" } }
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]wrapper\[)\]\[)\]" 1 "strub" } }
diff --git a/gcc/testsuite/gnat.dg/strub_renm2.adb b/gcc/testsuite/gnat.dg/strub_renm2.adb
new file mode 100644
index 0000000..c488c20
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_renm2.adb
@@ -0,0 +1,32 @@
+-- { dg-do compile }
+-- { dg-options "-fstrub=strict -fdump-ipa-strub" }
+
+procedure Strub_Renm2 is
+ V : Integer := 0;
+ pragma Machine_Attribute (V, "strub");
+
+ procedure P (X : Integer);
+ pragma Machine_Attribute (P, "strub", "at-calls");
+
+ function F return Integer;
+
+ procedure Q (X : Integer) renames P;
+ pragma Machine_Attribute (Q, "strub", "at-calls");
+
+ type T is access function return Integer;
+
+ type TC is access function return Integer;
+ pragma Machine_Attribute (TC, "strub", "callable");
+
+ FCptr : constant TC := TC (T'(F'Access));
+
+ function G return Integer renames FCptr.all;
+ pragma Machine_Attribute (G, "strub", "callable");
+
+ procedure P (X : Integer) is null;
+ function F return Integer is (0);
+
+begin
+ P (F); -- { dg-error "calling non-.strub." }
+ Q (G); -- ok, G is callable.
+end Strub_Renm2;
diff --git a/gcc/testsuite/gnat.dg/strub_var.adb b/gcc/testsuite/gnat.dg/strub_var.adb
new file mode 100644
index 0000000..3d158de
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_var.adb
@@ -0,0 +1,16 @@
+-- { dg-do compile }
+-- { dg-options "-fstrub=strict -fdump-ipa-strubm" }
+
+-- We don't read from the automatic variable, but being an automatic
+-- variable, its presence should be enough for the procedure to get
+-- strub enabled.
+
+with Strub_Attr;
+procedure Strub_Var is
+ X : Integer := 0;
+ pragma Machine_Attribute (X, "strub");
+begin
+ X := Strub_Attr.F (0);
+end Strub_Var;
+
+-- { dg-final { scan-ipa-dump-times "\[(\]strub \[(\]internal\[)\]\[)\]" 1 "strubm" } }
diff --git a/gcc/testsuite/gnat.dg/strub_var1.adb b/gcc/testsuite/gnat.dg/strub_var1.adb
new file mode 100644
index 0000000..6a504e0
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/strub_var1.adb
@@ -0,0 +1,20 @@
+-- { dg-do compile }
+
+with Strub_Attr;
+procedure Strub_Var1 is
+ type TA -- { dg-warning "does not apply to elements" }
+ is array (1..2) of Integer;
+ pragma Machine_Attribute (TA, "strub");
+
+ A : TA := (0, 0); -- { dg-warning "does not apply to elements" }
+
+ type TR is record -- { dg-warning "does not apply to fields" }
+ M, N : Integer;
+ end record;
+ pragma Machine_Attribute (TR, "strub");
+
+ R : TR := (0, 0);
+
+begin
+ A(2) := Strub_Attr.F (A(1));
+end Strub_Var1;
diff --git a/gcc/testsuite/lib/scanasm.exp b/gcc/testsuite/lib/scanasm.exp
index d30e361..a48794c 100644
--- a/gcc/testsuite/lib/scanasm.exp
+++ b/gcc/testsuite/lib/scanasm.exp
@@ -82,8 +82,8 @@ proc dg-scan { name positive testcase output_file orig_args } {
if { [string compare -length 14 $name scan-assembler] == 0 } {
# Remove LTO sections.
# ??? Somehow, .*? is still greedy.
- # regsub -all {(^|\n)[[:space:]]*\.section[[:space:]]*\.gnu\.lto_.*?\n(?=[[:space:]]*\.text\n)} $text {\1} text
- regsub -all {(^|\n)[[:space:]]*\.section[[:space:]]*\.gnu\.lto_(?:[^\n]*\n(?![[:space:]]*\.(section|text|data|bss)))*[^\n]*\n} $text {\1} text
+ # regsub -all {(^|\n)[[:space:]]*\.section[[:space:]]*"?\.gnu\.lto_.*?\n(?=[[:space:]]*\.text\n)} $text {\1} text
+ regsub -all {(^|\n)[[:space:]]*\.section[[:space:]]*"?\.gnu\.lto_(?:[^\n]*\n(?![[:space:]]*\.(section|text|data|bss)))*[^\n]*\n} $text {\1} text
}
set match [regexp -- $pattern $text]
@@ -503,7 +503,7 @@ proc scan-assembler-times { args } {
set fd [open $output_file r]
set text [read $fd]
close $fd
- regsub -all {(^|\n)[[:space:]]*\.section[[:space:]]*\.gnu\.lto_(?:[^\n]*\n(?![[:space:]]*\.(section|text|data|bss)))*[^\n]*\n} $text {\1} text
+ regsub -all {(^|\n)[[:space:]]*\.section[[:space:]]*"?\.gnu\.lto_(?:[^\n]*\n(?![[:space:]]*\.(section|text|data|bss)))*[^\n]*\n} $text {\1} text
set result_count [regexp -all -- $pattern $text]
if {$result_count == $times} {
@@ -565,7 +565,7 @@ proc scan-assembler-dem { args } {
set output [remote_exec host "$cxxfilt" "" "$output_file"]
set text [lindex $output 1]
- regsub -all {(^|\n)[[:space:]]*\.section[[:space:]]*\.gnu\.lto_(?:[^\n]*\n(?![[:space:]]*\.(section|text|data|bss)))*[^\n]*\n} $text {\1} text
+ regsub -all {(^|\n)[[:space:]]*\.section[[:space:]]*"?\.gnu\.lto_(?:[^\n]*\n(?![[:space:]]*\.(section|text|data|bss)))*[^\n]*\n} $text {\1} text
if [regexp -- $pattern $text] {
pass "$testcase scan-assembler-dem $pp_pattern"
@@ -622,7 +622,7 @@ proc scan-assembler-dem-not { args } {
set output [remote_exec host "$cxxfilt" "" "$output_file"]
set text [lindex $output 1]
- regsub -all {(^|\n)[[:space:]]*\.section[[:space:]]*\.gnu\.lto_(?:[^\n]*\n(?![[:space:]]*\.(section|text|data|bss)))*[^\n]*\n} $text {\1} text
+ regsub -all {(^|\n)[[:space:]]*\.section[[:space:]]*"?\.gnu\.lto_(?:[^\n]*\n(?![[:space:]]*\.(section|text|data|bss)))*[^\n]*\n} $text {\1} text
if ![regexp -- $pattern $text] {
pass "$testcase scan-assembler-dem-not $pp_pattern"
diff --git a/gcc/testsuite/lib/scanoffload.exp b/gcc/testsuite/lib/scanoffload.exp
index 8315820..c199798 100644
--- a/gcc/testsuite/lib/scanoffload.exp
+++ b/gcc/testsuite/lib/scanoffload.exp
@@ -38,6 +38,8 @@ proc scoff-adjust { args idx target } {
# Wrapper for scan procs.
# Argument 0 is the index of the argument to replace when calling
# argument 1 with the remaining arguments. Use end-1 or end or so.
+# If set, the 'global offload_target' specifies one specific offload target to
+# test, otherwise iterate over all 'global offload_targets'.
proc scoff { args } {
set idx [lindex $args 0]
set prc [lindex $args 1]
@@ -59,3 +61,22 @@ proc scoff { args } {
}
}
}
+
+# Wrapper so that only for a specific offload target (first argument) we
+# execute a 'dg-final' command (remaining arguments).
+proc only_for_offload_target { args } {
+ set override_offload_target [lindex $args 0]
+ set cmd [lreplace $args 0 0]
+
+ global offload_target
+ if [info exists offload_target] {
+ set original_offload_target $offload_target
+ }
+ set offload_target $override_offload_target
+ eval $cmd
+ if [info exists original_offload_target] {
+ set offload_target $original_offload_target
+ } else {
+ unset offload_target
+ }
+}
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 87b2ae5..3fcce6b 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -1823,55 +1823,6 @@ proc check_linker_plugin_available { } {
} "-flto -fuse-linker-plugin"]
}
-# Return 1 if the we can build a vector example with proper -march flags
-# and the current target can execute it, 0 otherwise. Cache the result.
-
-proc check_effective_target_riscv_vector_hw { } {
-
- return [check_runtime riscv_vector_hw32 {
- int main (void)
- {
- asm ("vsetivli zero,8,e16,m1,ta,ma");
- asm ("vadd.vv v8,v8,v16" : : : "v8");
- return 0;
- }
- } ""] || [check_runtime riscv_vector_hw64 {
- int main (void)
- {
- asm ("vsetivli zero,8,e16,m1,ta,ma");
- asm ("vadd.vv v8,v8,v16" : : : "v8");
- return 0;
- }
- } ""]
-}
-
-# Return 1 if the we can build a Zvfh vector example with proper -march flags
-# and the current target can execute it, 0 otherwise. Cache the result.
-
-proc check_effective_target_riscv_zvfh_hw { } {
- if ![check_effective_target_riscv_vector_hw] then {
- return 0
- }
-
- return [check_runtime riscv_zvfh_hw32 {
- int main (void)
- {
- asm ("vsetivli zero,8,e16,m1,ta,ma");
- asm ("vfadd.vv v8,v8,v16" : : : "v8");
- return 0;
- }
- } "-march=rv32gcv_zvfh -mabi=ilp32d"]
- || [check_runtime riscv_zvfh_hw64 {
- int main (void)
- {
- asm ("vsetivli zero,8,e16,m1,ta,ma");
- asm ("vfadd.vv v8,v8,v16" : : : "v8");
- return 0;
- }
- } "-march=rv64gcv_zvfh -mabi=lp64d"]
-}
-
-
# Return 1 if the target is RV32, 0 otherwise. Cache the result.
proc check_effective_target_rv32 { } {
@@ -4435,6 +4386,30 @@ proc aarch64_sve_bits { } {
}]
}
+# Return 1 if this is an AArch64 target that generates instructions for SME.
+proc check_effective_target_aarch64_sme { } {
+ if { ![istarget aarch64*-*-*] } {
+ return 0
+ }
+ return [check_no_compiler_messages aarch64_sme assembly {
+ #if !defined (__ARM_FEATURE_SME)
+ #error FOO
+ #endif
+ }]
+}
+
+# Return 1 if this is an AArch64 target that generates instructions for SME.
+proc check_effective_target_aarch64_sme2 { } {
+ if { ![istarget aarch64*-*-*] } {
+ return 0
+ }
+ return [check_no_compiler_messages aarch64_sme2 assembly {
+ #if !defined (__ARM_FEATURE_SME2)
+ #error FOO
+ #endif
+ }]
+}
+
# Return 1 if this is a compiler supporting ARC atomic operations
proc check_effective_target_arc_atomic { } {
return [check_no_compiler_messages arc_atomic assembly {
@@ -10082,6 +10057,18 @@ proc check_effective_target_c99_runtime { } {
}]
}
+# Return 1 if the target supports DWARF CFI directives.
+
+proc check_effective_target_cfi { } {
+ return [check_no_compiler_messages cfi assembly {
+ #ifdef __GCC_HAVE_DWARF2_CFI_ASM
+ /* ok */
+ #else
+ #error unsupported
+ #endif
+ } ""]
+}
+
# Return 1 if the target provides the D runtime.
proc check_effective_target_d_runtime { } {
@@ -10212,6 +10199,7 @@ proc check_effective_target_avx512fp16 { } {
void foo (void)
{
asm volatile ("vmovw %edi, %xmm0");
+ asm volatile ("vfcmulcph %xmm1, %xmm2, %xmm3{%k1}");
}
} "-O2 -mavx512fp16" ]
}
@@ -11574,7 +11562,7 @@ proc check_vect_support_and_set_flags { } {
} elseif [istarget amdgcn-*-*] {
set dg-do-what-default run
} elseif [istarget riscv64-*-*] {
- if [check_effective_target_riscv_vector_hw] {
+ if [check_effective_target_riscv_v] {
lappend DEFAULT_VECTCFLAGS "--param" "riscv-vector-abi"
set dg-do-what-default run
} else {
@@ -11651,7 +11639,8 @@ proc check_effective_target_aarch64_tiny { } {
# various architecture extensions via the .arch_extension pseudo-op.
foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve"
- "i8mm" "f32mm" "f64mm" "bf16" "sb" "sve2" "ls64" } {
+ "i8mm" "f32mm" "f64mm" "bf16" "sb" "sve2" "ls64"
+ "sme" "sme-i16i64" "sme2" } {
eval [string map [list FUNC $aarch64_ext] {
proc check_effective_target_aarch64_asm_FUNC_ok { } {
if { [istarget aarch64*-*-*] } {
diff --git a/gcc/tree-cfg.cc b/gcc/tree-cfg.cc
index a30a2de..d784b91 100644
--- a/gcc/tree-cfg.cc
+++ b/gcc/tree-cfg.cc
@@ -4673,6 +4673,16 @@ verify_gimple_assign_single (gassign *stmt)
error ("%qs in gimple IL", code_name);
return true;
+ case WITH_SIZE_EXPR:
+ if (!is_gimple_val (TREE_OPERAND (rhs1, 1)))
+ {
+ error ("invalid %qs size argument in load", code_name);
+ debug_generic_stmt (lhs);
+ debug_generic_stmt (rhs1);
+ return true;
+ }
+ rhs1 = TREE_OPERAND (rhs1, 0);
+ /* Fallthru. */
case COMPONENT_REF:
case BIT_FIELD_REF:
case ARRAY_REF:
@@ -4810,12 +4820,6 @@ verify_gimple_assign_single (gassign *stmt)
}
return res;
- case WITH_SIZE_EXPR:
- error ("%qs RHS in assignment statement",
- get_tree_code_name (rhs_code));
- debug_generic_expr (rhs1);
- return true;
-
case OBJ_TYPE_REF:
/* FIXME. */
return res;
@@ -5790,6 +5794,7 @@ gimple_verify_flow_info (void)
{
gimple *stmt = gsi_stmt (gsi);
+ /* Do NOT disregard debug stmts after found_ctrl_stmt. */
if (found_ctrl_stmt)
{
error ("control flow in the middle of basic block %d",
@@ -6595,7 +6600,7 @@ gimple_duplicate_bb (basic_block bb, copy_bb_data *id)
if (!existed)
{
gcc_assert (MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique);
- newc = ++cfun->last_clique;
+ newc = get_new_clique (cfun);
}
MR_DEPENDENCE_CLIQUE (op) = newc;
}
diff --git a/gcc/tree-inline.cc b/gcc/tree-inline.cc
index 5984716..a4fc839 100644
--- a/gcc/tree-inline.cc
+++ b/gcc/tree-inline.cc
@@ -1002,7 +1002,7 @@ remap_dependence_clique (copy_body_data *id, unsigned short clique)
/* Clique 1 is reserved for local ones set by PTA. */
if (cfun->last_clique == 0)
cfun->last_clique = 1;
- newc = ++cfun->last_clique;
+ newc = get_new_clique (cfun);
}
return newc;
}
@@ -4098,17 +4098,16 @@ inline_forbidden_p (tree fndecl)
static bool
function_attribute_inlinable_p (const_tree fndecl)
{
- if (targetm.attribute_table)
+ for (auto scoped_attributes : targetm.attribute_table)
{
const_tree a;
for (a = DECL_ATTRIBUTES (fndecl); a; a = TREE_CHAIN (a))
{
const_tree name = get_attribute_name (a);
- int i;
- for (i = 0; targetm.attribute_table[i].name != NULL; i++)
- if (is_attribute_p (targetm.attribute_table[i].name, name))
+ for (const attribute_spec &attribute : scoped_attributes->attributes)
+ if (is_attribute_p (attribute.name, name))
return targetm.function_attribute_inlinable_p (fndecl);
}
}
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index 09e6ada..de2820b 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -510,8 +510,9 @@ extern gimple_opt_pass *make_pass_adjust_alignment (gcc::context *ctxt);
/* IPA Passes */
extern simple_ipa_opt_pass *make_pass_ipa_lower_emutls (gcc::context *ctxt);
-extern simple_ipa_opt_pass
- *make_pass_ipa_function_and_variable_visibility (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_function_and_variable_visibility (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_strub_mode (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_strub (gcc::context *ctxt);
extern simple_ipa_opt_pass *make_pass_ipa_tree_profile (gcc::context *ctxt);
extern simple_ipa_opt_pass *make_pass_ipa_auto_profile (gcc::context *ctxt);
@@ -616,6 +617,8 @@ extern rtl_opt_pass *make_pass_gcse2 (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_split_after_reload (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_thread_prologue_and_epilogue (gcc::context
*ctxt);
+extern rtl_opt_pass *make_pass_late_thread_prologue_and_epilogue (gcc::context
+ *ctxt);
extern rtl_opt_pass *make_pass_zero_call_used_regs (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_stack_adjustments (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_sched_fusion (gcc::context *ctxt);
diff --git a/gcc/tree-scalar-evolution.cc b/gcc/tree-scalar-evolution.cc
index f61277c..385fc64 100644
--- a/gcc/tree-scalar-evolution.cc
+++ b/gcc/tree-scalar-evolution.cc
@@ -3739,7 +3739,6 @@ final_value_replacement_loop (class loop *loop)
split_loop_exit_edge (exit);
/* Set stmt insertion pointer. All stmts are inserted before this point. */
- gimple_stmt_iterator gsi = gsi_after_labels (exit->dest);
class loop *ex_loop
= superloop_at_depth (loop,
@@ -3841,11 +3840,17 @@ final_value_replacement_loop (class loop *loop)
print_gimple_stmt (dump_file, phi, 0);
fprintf (dump_file, " with expr: ");
print_generic_expr (dump_file, def);
+ fprintf (dump_file, "\n");
}
any = true;
def = unshare_expr (def);
remove_phi_node (&psi, false);
+ /* Propagate constants immediately, but leave an unused initialization
+ around to avoid invalidating the SCEV cache. */
+ if (CONSTANT_CLASS_P (def))
+ replace_uses_by (rslt, def);
+
/* Create the replacement statements. */
gimple_seq stmts;
def = force_gimple_operand (def, &stmts, false, NULL_TREE);
@@ -3874,10 +3879,11 @@ final_value_replacement_loop (class loop *loop)
gsi_next (&gsi2);
}
}
+ gimple_stmt_iterator gsi = gsi_after_labels (exit->dest);
gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
if (dump_file)
{
- fprintf (dump_file, "\n final stmt:\n ");
+ fprintf (dump_file, " final stmt:\n ");
print_gimple_stmt (dump_file, SSA_NAME_DEF_STMT (rslt), 0);
fprintf (dump_file, "\n");
}
diff --git a/gcc/tree-sra.cc b/gcc/tree-sra.cc
index 0349410..3bd0c7a 100644
--- a/gcc/tree-sra.cc
+++ b/gcc/tree-sra.cc
@@ -1268,18 +1268,27 @@ abnormal_edge_after_stmt_p (gimple *stmt, enum out_edge_check *oe_check)
}
/* Scan expression EXPR which is an argument of a call and create access
- structures for all accesses to candidates for scalarization. Return true if
- any access has been inserted. STMT must be the statement from which the
- expression is taken. */
+ structures for all accesses to candidates for scalarization. Return true
+ if any access has been inserted. STMT must be the statement from which the
+ expression is taken. CAN_BE_RETURNED must be true if call argument flags
+ do not rule out that the argument is directly returned. OE_CHECK is used
+ to remember result of a test for abnormal outgoing edges after this
+ statement. */
static bool
-build_access_from_call_arg (tree expr, gimple *stmt,
+build_access_from_call_arg (tree expr, gimple *stmt, bool can_be_returned,
enum out_edge_check *oe_check)
{
if (TREE_CODE (expr) == ADDR_EXPR)
{
tree base = get_base_address (TREE_OPERAND (expr, 0));
+ if (can_be_returned)
+ {
+ disqualify_base_of_expr (base, "Address possibly returned, "
+ "leading to an alis SRA may not know.");
+ return false;
+ }
if (abnormal_edge_after_stmt_p (stmt, oe_check))
{
disqualify_base_of_expr (base, "May lead to need to add statements "
@@ -1508,12 +1517,25 @@ scan_function (void)
case GIMPLE_CALL:
{
enum out_edge_check oe_check = SRA_OUTGOING_EDGES_UNCHECKED;
- for (i = 0; i < gimple_call_num_args (stmt); i++)
- ret |= build_access_from_call_arg (gimple_call_arg (stmt, i),
- stmt, &oe_check);
+ gcall *call = as_a <gcall *> (stmt);
+ for (i = 0; i < gimple_call_num_args (call); i++)
+ {
+ bool can_be_returned;
+ if (gimple_call_lhs (call))
+ {
+ int af = gimple_call_arg_flags (call, i);
+ can_be_returned = !(af & EAF_NOT_RETURNED_DIRECTLY);
+ }
+ else
+ can_be_returned = false;
+ ret |= build_access_from_call_arg (gimple_call_arg (call,
+ i),
+ stmt, can_be_returned,
+ &oe_check);
+ }
if (gimple_call_chain(stmt))
- ret |= build_access_from_call_arg (gimple_call_chain(stmt),
- stmt, &oe_check);
+ ret |= build_access_from_call_arg (gimple_call_chain(call),
+ stmt, false, &oe_check);
}
t = gimple_call_lhs (stmt);
diff --git a/gcc/tree-ssa-ccp.cc b/gcc/tree-ssa-ccp.cc
index 1a555ae..03ff88a 100644
--- a/gcc/tree-ssa-ccp.cc
+++ b/gcc/tree-ssa-ccp.cc
@@ -3073,7 +3073,9 @@ optimize_stack_restore (gimple_stmt_iterator i)
if (!callee
|| !fndecl_built_in_p (callee, BUILT_IN_NORMAL)
/* All regular builtins are ok, just obviously not alloca. */
- || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee)))
+ || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee))
+ /* Do not remove stack updates before strub leave. */
+ || fndecl_built_in_p (callee, BUILT_IN___STRUB_LEAVE))
return NULL_TREE;
if (fndecl_built_in_p (callee, BUILT_IN_STACK_RESTORE))
diff --git a/gcc/tree-ssa-dce.cc b/gcc/tree-ssa-dce.cc
index bbdf931..4e371b2 100644
--- a/gcc/tree-ssa-dce.cc
+++ b/gcc/tree-ssa-dce.cc
@@ -469,7 +469,8 @@ find_obviously_necessary_stmts (bool aggressive)
static bool
ref_may_be_aliased (tree ref)
{
- gcc_assert (TREE_CODE (ref) != WITH_SIZE_EXPR);
+ if (TREE_CODE (ref) == WITH_SIZE_EXPR)
+ ref = TREE_OPERAND (ref, 0);
while (handled_component_p (ref))
ref = TREE_OPERAND (ref, 0);
if ((TREE_CODE (ref) == MEM_REF || TREE_CODE (ref) == TARGET_MEM_REF)
diff --git a/gcc/tree-ssa-loop-ch.cc b/gcc/tree-ssa-loop-ch.cc
index 461416e..dd9ee40 100644
--- a/gcc/tree-ssa-loop-ch.cc
+++ b/gcc/tree-ssa-loop-ch.cc
@@ -40,6 +40,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-range-path.h"
#include "gimple-pretty-print.h"
#include "cfganal.h"
+#include "tree-ssa-loop-manip.h"
/* Return path query insteance for testing ranges of statements
in headers of LOOP contained in basic block BB.
@@ -1149,7 +1150,13 @@ ch_base::copy_headers (function *fun)
if (!loops_to_unloop.is_empty ())
{
bool irred_invalidated;
- unloop_loops (loops_to_unloop, loops_to_unloop_nunroll, NULL, &irred_invalidated);
+ auto_bitmap lc_invalidated;
+ auto_vec<edge> edges_to_remove;
+ unloop_loops (loops_to_unloop, loops_to_unloop_nunroll, edges_to_remove,
+ lc_invalidated, &irred_invalidated);
+ if (loops_state_satisfies_p (fun, LOOP_CLOSED_SSA)
+ && !bitmap_empty_p (lc_invalidated))
+ rewrite_into_loop_closed_ssa (NULL, 0);
changed = true;
}
free (bbs);
diff --git a/gcc/tree-ssa-loop-ivcanon.cc b/gcc/tree-ssa-loop-ivcanon.cc
index 5856f76..67f2318 100644
--- a/gcc/tree-ssa-loop-ivcanon.cc
+++ b/gcc/tree-ssa-loop-ivcanon.cc
@@ -667,6 +667,7 @@ static bitmap peeled_loops;
void
unloop_loops (vec<class loop *> &loops_to_unloop,
vec<int> &loops_to_unloop_nunroll,
+ vec<edge> &edges_to_remove,
bitmap loop_closed_ssa_invalidated,
bool *irred_invalidated)
{
@@ -1361,7 +1362,7 @@ canonicalize_induction_variables (void)
}
gcc_assert (!need_ssa_update_p (cfun));
- unloop_loops (loops_to_unloop, loops_to_unloop_nunroll,
+ unloop_loops (loops_to_unloop, loops_to_unloop_nunroll, edges_to_remove,
loop_closed_ssa_invalidated, &irred_invalidated);
loops_to_unloop.release ();
loops_to_unloop_nunroll.release ();
@@ -1511,9 +1512,8 @@ tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
{
unsigned i;
- unloop_loops (loops_to_unloop,
- loops_to_unloop_nunroll,
- loop_closed_ssa_invalidated,
+ unloop_loops (loops_to_unloop, loops_to_unloop_nunroll,
+ edges_to_remove, loop_closed_ssa_invalidated,
&irred_invalidated);
loops_to_unloop.release ();
loops_to_unloop_nunroll.release ();
diff --git a/gcc/tree-ssa-operands.cc b/gcc/tree-ssa-operands.cc
index 57e393ae..b0516a0 100644
--- a/gcc/tree-ssa-operands.cc
+++ b/gcc/tree-ssa-operands.cc
@@ -30,7 +30,6 @@ along with GCC; see the file COPYING3. If not see
#include "stmt.h"
#include "print-tree.h"
#include "dumpfile.h"
-#include "value-query.h"
/* This file contains the code required to manage the operands cache of the
@@ -1146,8 +1145,6 @@ update_stmt_operands (struct function *fn, gimple *stmt)
gcc_assert (gimple_modified_p (stmt));
operands_scanner (fn, stmt).build_ssa_operands ();
gimple_set_modified (stmt, false);
- // Inform the active range query an update has happened.
- get_range_query (fn)->update_stmt (stmt);
timevar_pop (TV_TREE_OPS);
}
diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
index 3df020d..dd584ab 100644
--- a/gcc/tree-vect-loop.cc
+++ b/gcc/tree-vect-loop.cc
@@ -8504,11 +8504,11 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
gcc_assert (single_defuse_cycle
&& (reduc_index == 1 || reduc_index == 2));
vect_get_vec_defs (loop_vinfo, stmt_info, slp_node, ncopies,
- op.ops[0], &vec_oprnds0, truth_type_for (vectype_in),
+ op.ops[0], truth_type_for (vectype_in), &vec_oprnds0,
reduc_index == 1 ? NULL_TREE : op.ops[1],
- &vec_oprnds1, NULL_TREE,
+ NULL_TREE, &vec_oprnds1,
reduc_index == 2 ? NULL_TREE : op.ops[2],
- &vec_oprnds2, NULL_TREE);
+ NULL_TREE, &vec_oprnds2);
}
/* For single def-use cycles get one copy of the vectorized reduction
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index bf8c997..390c847 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -1267,10 +1267,10 @@ vect_get_vec_defs_for_operand (vec_info *vinfo, stmt_vec_info stmt_vinfo,
void
vect_get_vec_defs (vec_info *vinfo, stmt_vec_info stmt_info, slp_tree slp_node,
unsigned ncopies,
- tree op0, vec<tree> *vec_oprnds0, tree vectype0,
- tree op1, vec<tree> *vec_oprnds1, tree vectype1,
- tree op2, vec<tree> *vec_oprnds2, tree vectype2,
- tree op3, vec<tree> *vec_oprnds3, tree vectype3)
+ tree op0, tree vectype0, vec<tree> *vec_oprnds0,
+ tree op1, tree vectype1, vec<tree> *vec_oprnds1,
+ tree op2, tree vectype2, vec<tree> *vec_oprnds2,
+ tree op3, tree vectype3, vec<tree> *vec_oprnds3)
{
if (slp_node)
{
@@ -1309,10 +1309,10 @@ vect_get_vec_defs (vec_info *vinfo, stmt_vec_info stmt_info, slp_tree slp_node,
tree op3, vec<tree> *vec_oprnds3)
{
vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
- op0, vec_oprnds0, NULL_TREE,
- op1, vec_oprnds1, NULL_TREE,
- op2, vec_oprnds2, NULL_TREE,
- op3, vec_oprnds3, NULL_TREE);
+ op0, NULL_TREE, vec_oprnds0,
+ op1, NULL_TREE, vec_oprnds1,
+ op2, NULL_TREE, vec_oprnds2,
+ op3, NULL_TREE, vec_oprnds3);
}
/* Helper function called by vect_finish_replace_stmt and
@@ -2976,6 +2976,15 @@ vectorizable_bswap (vec_info *vinfo,
gcc_assert (ncopies >= 1);
+ if (TYPE_SIZE (vectype_in) != TYPE_SIZE (vectype))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "mismatched vector sizes %T and %T\n",
+ vectype_in, vectype);
+ return false;
+ }
+
tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
if (! char_vectype)
return false;
@@ -5657,7 +5666,7 @@ vectorizable_conversion (vec_info *vinfo,
{
case NONE:
vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
- op0, &vec_oprnds0);
+ op0, vectype_in, &vec_oprnds0);
/* vec_dest is intermediate type operand when multi_step_cvt. */
if (multi_step_cvt)
{
@@ -5696,9 +5705,9 @@ vectorizable_conversion (vec_info *vinfo,
generate more than one vector stmt - i.e - we need to "unroll"
the vector stmt by a factor VF/nunits. */
vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies * ninputs,
- op0, &vec_oprnds0,
+ op0, vectype_in, &vec_oprnds0,
code == WIDEN_LSHIFT_EXPR ? NULL_TREE : op1,
- &vec_oprnds1);
+ vectype_in, &vec_oprnds1);
if (code == WIDEN_LSHIFT_EXPR)
{
int oprnds_size = vec_oprnds0.length ();
@@ -5753,7 +5762,7 @@ vectorizable_conversion (vec_info *vinfo,
generate more than one vector stmt - i.e - we need to "unroll"
the vector stmt by a factor VF/nunits. */
vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies * ninputs,
- op0, &vec_oprnds0);
+ op0, vectype_in, &vec_oprnds0);
/* Arguments are ready. Create the new vector stmts. */
if (cvt_type && modifier == NARROW_DST)
FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
@@ -12248,17 +12257,17 @@ vectorizable_condition (vec_info *vinfo,
/* Handle cond expr. */
if (masked)
vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
- cond_expr, &vec_oprnds0, comp_vectype,
- then_clause, &vec_oprnds2, vectype,
+ cond_expr, comp_vectype, &vec_oprnds0,
+ then_clause, vectype, &vec_oprnds2,
reduction_type != EXTRACT_LAST_REDUCTION
- ? else_clause : NULL, &vec_oprnds3, vectype);
+ ? else_clause : NULL, vectype, &vec_oprnds3);
else
vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
- cond_expr0, &vec_oprnds0, comp_vectype,
- cond_expr1, &vec_oprnds1, comp_vectype,
- then_clause, &vec_oprnds2, vectype,
+ cond_expr0, comp_vectype, &vec_oprnds0,
+ cond_expr1, comp_vectype, &vec_oprnds1,
+ then_clause, vectype, &vec_oprnds2,
reduction_type != EXTRACT_LAST_REDUCTION
- ? else_clause : NULL, &vec_oprnds3, vectype);
+ ? else_clause : NULL, vectype, &vec_oprnds3);
/* Arguments are ready. Create the new vector stmt. */
FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
@@ -12621,8 +12630,8 @@ vectorizable_comparison_1 (vec_info *vinfo, tree vectype,
mask = vect_create_destination_var (lhs, mask_type);
vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
- rhs1, &vec_oprnds0, vectype,
- rhs2, &vec_oprnds1, vectype);
+ rhs1, vectype, &vec_oprnds0,
+ rhs2, vectype, &vec_oprnds1);
if (swap_p)
std::swap (vec_oprnds0, vec_oprnds1);
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index e4d7ab4..1810833 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -2263,10 +2263,10 @@ void vect_get_vec_defs (vec_info *, stmt_vec_info, slp_tree, unsigned,
tree = NULL, vec<tree> * = NULL,
tree = NULL, vec<tree> * = NULL);
void vect_get_vec_defs (vec_info *, stmt_vec_info, slp_tree, unsigned,
- tree, vec<tree> *, tree,
- tree = NULL, vec<tree> * = NULL, tree = NULL,
- tree = NULL, vec<tree> * = NULL, tree = NULL,
- tree = NULL, vec<tree> * = NULL, tree = NULL);
+ tree, tree, vec<tree> *,
+ tree = NULL, tree = NULL, vec<tree> * = NULL,
+ tree = NULL, tree = NULL, vec<tree> * = NULL,
+ tree = NULL, tree = NULL, vec<tree> * = NULL);
extern tree vect_init_vector (vec_info *, stmt_vec_info, tree, tree,
gimple_stmt_iterator *);
extern tree vect_get_slp_vect_def (slp_tree, unsigned);
diff --git a/gcc/tree.cc b/gcc/tree.cc
index e9f703e..10c6e1e 100644
--- a/gcc/tree.cc
+++ b/gcc/tree.cc
@@ -10274,6 +10274,8 @@ build_opaque_vector_type (tree innertype, poly_int64 nunits)
TYPE_NEXT_VARIANT (cand) = TYPE_NEXT_VARIANT (t);
TYPE_NEXT_VARIANT (t) = cand;
TYPE_MAIN_VARIANT (cand) = TYPE_MAIN_VARIANT (t);
+ /* Type variants have no alias set defined. */
+ TYPE_ALIAS_SET (cand) = -1;
return cand;
}
diff --git a/gcc/tsystem.h b/gcc/tsystem.h
index 081c733..c49ff57 100644
--- a/gcc/tsystem.h
+++ b/gcc/tsystem.h
@@ -50,6 +50,14 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
extern void *malloc (size_t);
#endif
+#ifndef calloc
+extern void *calloc(size_t, size_t);
+#endif
+
+#ifndef realloc
+extern void *realloc(void *, size_t);
+#endif
+
#ifndef free
extern void free (void *);
#endif
diff --git a/gcc/value-query.h b/gcc/value-query.h
index 429446b..0a6f18b 100644
--- a/gcc/value-query.h
+++ b/gcc/value-query.h
@@ -71,9 +71,6 @@ public:
virtual bool range_on_edge (vrange &r, edge, tree expr);
virtual bool range_of_stmt (vrange &r, gimple *, tree name = NULL);
- // When the IL in a stmt is changed, call this for better results.
- virtual void update_stmt (gimple *) { }
-
// Query if there is any relation between SSA1 and SSA2.
relation_kind query_relation (gimple *s, tree ssa1, tree ssa2,
bool get_range = true);
diff --git a/gcc/value-range.h b/gcc/value-range.h
index 330e6f7..33f204a 100644
--- a/gcc/value-range.h
+++ b/gcc/value-range.h
@@ -1550,4 +1550,15 @@ void frange_arithmetic (enum tree_code, tree, REAL_VALUE_TYPE &,
const REAL_VALUE_TYPE &, const REAL_VALUE_TYPE &,
const REAL_VALUE_TYPE &);
+// Return true if TYPE1 and TYPE2 are compatible range types.
+
+inline bool
+range_compatible_p (tree type1, tree type2)
+{
+ // types_compatible_p requires conversion in both directions to be useless.
+ // GIMPLE only requires a cast one way in order to be compatible.
+ // Ranges really only need the sign and precision to be the same.
+ return (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
+ && TYPE_SIGN (type1) == TYPE_SIGN (type2));
+}
#endif // GCC_VALUE_RANGE_H
diff --git a/gcc/varasm.cc b/gcc/varasm.cc
index 6ae35ed..167aea8 100644
--- a/gcc/varasm.cc
+++ b/gcc/varasm.cc
@@ -2461,6 +2461,10 @@ contains_pointers_p (tree type)
it all the way to final. See PR 17982 for further discussion. */
static GTY(()) tree pending_assemble_externals;
+/* A similar list of pending libcall symbols. We only want to declare
+ symbols that are actually used in the final assembly. */
+static GTY(()) rtx pending_libcall_symbols;
+
#ifdef ASM_OUTPUT_EXTERNAL
/* Some targets delay some output to final using TARGET_ASM_FILE_END.
As a result, assemble_external can be called after the list of externals
@@ -2520,8 +2524,17 @@ process_pending_assemble_externals (void)
for (list = pending_assemble_externals; list; list = TREE_CHAIN (list))
assemble_external_real (TREE_VALUE (list));
+ for (rtx list = pending_libcall_symbols; list; list = XEXP (list, 1))
+ {
+ rtx symbol = XEXP (list, 0);
+ tree id = get_identifier (XSTR (symbol, 0));
+ if (TREE_SYMBOL_REFERENCED (id))
+ targetm.asm_out.external_libcall (symbol);
+ }
+
pending_assemble_externals = 0;
pending_assemble_externals_processed = true;
+ pending_libcall_symbols = NULL_RTX;
delete pending_assemble_externals_set;
#endif
}
@@ -2594,8 +2607,17 @@ assemble_external_libcall (rtx fun)
/* Declare library function name external when first used, if nec. */
if (! SYMBOL_REF_USED (fun))
{
+#ifdef ASM_OUTPUT_EXTERNAL
+ gcc_assert (!pending_assemble_externals_processed);
+#endif
SYMBOL_REF_USED (fun) = 1;
- targetm.asm_out.external_libcall (fun);
+ /* Make sure the libcall symbol is in the symtab so any
+ reference to it will mark its tree node as referenced, via
+ assemble_name_resolve. These are eventually emitted, if
+ used, in process_pending_assemble_externals. */
+ get_identifier (XSTR (fun, 0));
+ pending_libcall_symbols = gen_rtx_EXPR_LIST (VOIDmode, fun,
+ pending_libcall_symbols);
}
}
diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc
index 5426766..51c91db 100644
--- a/gcc/wide-int.cc
+++ b/gcc/wide-int.cc
@@ -1477,10 +1477,10 @@ wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
if (UNLIKELY (prec > WIDE_INT_MAX_INL_PRECISION))
{
unsigned HOST_HALF_WIDE_INT *buf
- = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT, 4 * 4 * blocks_needed);
+ = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT, 4 * half_blocks_needed);
u = buf;
- v = u + 4 * blocks_needed;
- r = v + 4 * blocks_needed;
+ v = u + half_blocks_needed;
+ r = v + half_blocks_needed;
}
/* We do unsigned mul and then correct it. */
@@ -1675,8 +1675,9 @@ wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
Delight by Warren, which itself is a small modification of Knuth's
algorithm. M is the number of significant elements of U however
there needs to be at least one extra element of B_DIVIDEND
- allocated, N is the number of elements of B_DIVISOR. */
-static void
+ allocated, N is the number of elements of B_DIVISOR.
+ Return new value for N. */
+static int
divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
unsigned HOST_HALF_WIDE_INT *b_remainder,
unsigned HOST_HALF_WIDE_INT *b_dividend,
@@ -1707,7 +1708,7 @@ divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
* (unsigned HOST_WIDE_INT)b_divisor[0]));
}
b_remainder[0] = k;
- return;
+ return 1;
}
s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
@@ -1770,6 +1771,10 @@ divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
b_dividend[j+n] += k;
}
}
+ /* If N > M, the main loop was skipped, quotient will be 0 and
+ we can't copy more than M half-limbs into the remainder, as they
+ aren't present in b_dividend (which has . */
+ n = MIN (n, m);
if (s)
for (i = 0; i < n; i++)
b_remainder[i] = (b_dividend[i] >> s)
@@ -1777,6 +1782,7 @@ divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
else
for (i = 0; i < n; i++)
b_remainder[i] = b_dividend[i];
+ return n;
}
@@ -1943,14 +1949,14 @@ wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
{
unsigned HOST_HALF_WIDE_INT *buf
= XALLOCAVEC (unsigned HOST_HALF_WIDE_INT,
- 12 * dividend_blocks_needed
- + 4 * divisor_blocks_needed + 1);
+ 3 * dividend_blocks_needed + 1
+ + divisor_blocks_needed);
b_quotient = buf;
- b_remainder = b_quotient + 4 * dividend_blocks_needed;
- b_dividend = b_remainder + 4 * dividend_blocks_needed;
- b_divisor = b_dividend + 4 * dividend_blocks_needed + 1;
+ b_remainder = b_quotient + dividend_blocks_needed;
+ b_dividend = b_remainder + dividend_blocks_needed;
+ b_divisor = b_dividend + dividend_blocks_needed + 1;
memset (b_quotient, 0,
- 4 * dividend_blocks_needed * sizeof (HOST_HALF_WIDE_INT));
+ dividend_blocks_needed * sizeof (HOST_HALF_WIDE_INT));
}
wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
dividend_blocks_needed, dividend_prec, UNSIGNED);
@@ -1969,7 +1975,7 @@ wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
if (b_quotient == b_quotient_buf)
memset (b_quotient_buf, 0, sizeof (b_quotient_buf));
- divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
+ n = divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
unsigned int quotient_len = 0;
if (quotient)
@@ -2673,6 +2679,9 @@ wide_int_cc_tests ()
wi::shifted_mask (0, 128, false, 128));
ASSERT_EQ (wi::mask (128, true, 128),
wi::shifted_mask (0, 128, true, 128));
+ ASSERT_EQ (wi::multiple_of_p (from_int <widest_int> (1),
+ from_int <widest_int> (-128), UNSIGNED),
+ false);
}
} // namespace selftest